You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ec...@apache.org on 2014/10/10 18:52:42 UTC
[01/38] HBASE-12197 Move rest to it's on module
Repository: hbase
Updated Branches:
refs/heads/0.98 386f36db8 -> 876617bd3
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
deleted file mode 100644
index 0f852ca..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.util.Iterator;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestStorageClusterStatusModel extends TestModelBase<StorageClusterStatusModel> {
-
- public TestStorageClusterStatusModel() throws Exception {
- super(StorageClusterStatusModel.class);
-
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
- "<ClusterStatus averageLoad=\"1.0\" regions=\"2\" requests=\"0\">" +
- "<DeadNodes/><LiveNodes>" +
- "<Node heapSizeMB=\"128\" maxHeapSizeMB=\"1024\" name=\"test1\" requests=\"0\" startCode=\"1245219839331\">" +
- "<Region currentCompactedKVs=\"1\" memstoreSizeMB=\"0\" name=\"aGJhc2U6cm9vdCwsMA==\" readRequestsCount=\"1\" " +
- "rootIndexSizeKB=\"1\" storefileIndexSizeMB=\"0\" storefileSizeMB=\"0\" storefiles=\"1\" stores=\"1\" " +
- "totalCompactingKVs=\"1\" totalStaticBloomSizeKB=\"1\" totalStaticIndexSizeKB=\"1\" writeRequestsCount=\"2\"/>" +
- "</Node>" +
- "<Node heapSizeMB=\"512\" maxHeapSizeMB=\"1024\" name=\"test2\" requests=\"0\" startCode=\"1245239331198\">" +
- "<Region currentCompactedKVs=\"1\" memstoreSizeMB=\"0\" name=\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\" " +
- "readRequestsCount=\"1\" rootIndexSizeKB=\"1\" storefileIndexSizeMB=\"0\" storefileSizeMB=\"0\" " +
- "storefiles=\"1\" stores=\"1\" totalCompactingKVs=\"1\" totalStaticBloomSizeKB=\"1\" " +
- "totalStaticIndexSizeKB=\"1\" writeRequestsCount=\"2\"/></Node></LiveNodes></ClusterStatus>";
-
- AS_PB =
- "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" +
- "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" +
- "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8=";
-
-
- //Using jackson will break json backward compatibilty for this representation
- //but the original one was broken as it would only print one Node element
- //so the format itself was broken
- AS_JSON =
- "{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," +
- "\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," +
- "\"storefileSizeMB\":0,\"memstoreSizeMB\":0,\"storefileIndexSizeMB\":0," +
- "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," +
- "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," +
- "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245219839331," +
- "\"heapSizeMB\":128,\"maxHeapSizeMB\":1024},{\"name\":\"test2\"," +
- "\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\",\"stores\":1," +
- "\"storefiles\":1,\"storefileSizeMB\":0,\"memstoreSizeMB\":0,\"storefileIndexSizeMB\":0," +
- "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," +
- "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," +
- "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245239331198," +
- "\"heapSizeMB\":512,\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}";
- }
-
- protected StorageClusterStatusModel buildTestModel() {
- StorageClusterStatusModel model = new StorageClusterStatusModel();
- model.setRegions(2);
- model.setRequests(0);
- model.setAverageLoad(1.0);
- model.addLiveNode("test1", 1245219839331L, 128, 1024)
- .addRegion(Bytes.toBytes("hbase:root,,0"), 1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1);
- model.addLiveNode("test2", 1245239331198L, 512, 1024)
- .addRegion(Bytes.toBytes(TableName.META_TABLE_NAME+",,1246000043724"),1, 1, 0, 0, 0,
- 1, 2, 1, 1, 1, 1, 1);
- return model;
- }
-
- protected void checkModel(StorageClusterStatusModel model) {
- assertEquals(model.getRegions(), 2);
- assertEquals(model.getRequests(), 0);
- assertEquals(model.getAverageLoad(), 1.0);
- Iterator<StorageClusterStatusModel.Node> nodes =
- model.getLiveNodes().iterator();
- StorageClusterStatusModel.Node node = nodes.next();
- assertEquals(node.getName(), "test1");
- assertEquals(node.getStartCode(), 1245219839331L);
- assertEquals(node.getHeapSizeMB(), 128);
- assertEquals(node.getMaxHeapSizeMB(), 1024);
- Iterator<StorageClusterStatusModel.Node.Region> regions =
- node.getRegions().iterator();
- StorageClusterStatusModel.Node.Region region = regions.next();
- assertTrue(Bytes.toString(region.getName()).equals(
- "hbase:root,,0"));
- assertEquals(region.getStores(), 1);
- assertEquals(region.getStorefiles(), 1);
- assertEquals(region.getStorefileSizeMB(), 0);
- assertEquals(region.getMemstoreSizeMB(), 0);
- assertEquals(region.getStorefileIndexSizeMB(), 0);
- assertEquals(region.getReadRequestsCount(), 1);
- assertEquals(region.getWriteRequestsCount(), 2);
- assertEquals(region.getRootIndexSizeKB(), 1);
- assertEquals(region.getTotalStaticIndexSizeKB(), 1);
- assertEquals(region.getTotalStaticBloomSizeKB(), 1);
- assertEquals(region.getTotalCompactingKVs(), 1);
- assertEquals(region.getCurrentCompactedKVs(), 1);
- assertFalse(regions.hasNext());
- node = nodes.next();
- assertEquals(node.getName(), "test2");
- assertEquals(node.getStartCode(), 1245239331198L);
- assertEquals(node.getHeapSizeMB(), 512);
- assertEquals(node.getMaxHeapSizeMB(), 1024);
- regions = node.getRegions().iterator();
- region = regions.next();
- assertEquals(Bytes.toString(region.getName()),
- TableName.META_TABLE_NAME+",,1246000043724");
- assertEquals(region.getStores(), 1);
- assertEquals(region.getStorefiles(), 1);
- assertEquals(region.getStorefileSizeMB(), 0);
- assertEquals(region.getMemstoreSizeMB(), 0);
- assertEquals(region.getStorefileIndexSizeMB(), 0);
- assertEquals(region.getReadRequestsCount(), 1);
- assertEquals(region.getWriteRequestsCount(), 2);
- assertEquals(region.getRootIndexSizeKB(), 1);
- assertEquals(region.getTotalStaticIndexSizeKB(), 1);
- assertEquals(region.getTotalStaticBloomSizeKB(), 1);
- assertEquals(region.getTotalCompactingKVs(), 1);
- assertEquals(region.getCurrentCompactedKVs(), 1);
-
- assertFalse(regions.hasNext());
- assertFalse(nodes.hasNext());
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
deleted file mode 100644
index bd4fa1f..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.StringReader;
-import java.io.StringWriter;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import junit.framework.TestCase;
-import org.apache.hadoop.hbase.SmallTests;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestStorageClusterVersionModel extends TestModelBase<StorageClusterVersionModel> {
- private static final String VERSION = "0.0.1-testing";
-
- public TestStorageClusterVersionModel() throws Exception {
- super(StorageClusterVersionModel.class);
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"+
- "<ClusterVersion>" + VERSION + "</ClusterVersion>";
-
- AS_JSON = "\"0.0.1-testing\"";
- }
-
- protected StorageClusterVersionModel buildTestModel() {
- StorageClusterVersionModel model = new StorageClusterVersionModel();
- model.setVersion(VERSION);
- return model;
- }
-
- protected void checkModel(StorageClusterVersionModel model) {
- assertEquals(model.getVersion(), VERSION);
- }
-
- @Override
- public void testFromPB() throws Exception {
- //ignore test no pb
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
deleted file mode 100644
index dadb9ad..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.util.Iterator;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import junit.framework.TestCase;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestTableInfoModel extends TestModelBase<TableInfoModel> {
- private static final String TABLE = "testtable";
- private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
- private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
- private static final long ID = 8731042424L;
- private static final String LOCATION = "testhost:9876";
-
- public TestTableInfoModel() throws Exception {
- super(TableInfoModel.class);
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><TableInfo " +
- "name=\"testtable\"><Region endKey=\"enp5eng=\" id=\"8731042424\" " +
- "location=\"testhost:9876\" " +
- "name=\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\" " +
- "startKey=\"YWJyYWNhZGJyYQ==\"/></TableInfo>";
-
- AS_PB =
- "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" +
- "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY=";
-
- AS_JSON =
- "{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424," +
- "\"location\":\"testhost:9876\",\"" +
- "name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" +
- "startKey\":\"YWJyYWNhZGJyYQ==\"}]}";
- }
-
- protected TableInfoModel buildTestModel() {
- TableInfoModel model = new TableInfoModel();
- model.setName(TABLE);
- model.add(new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION));
- return model;
- }
-
- protected void checkModel(TableInfoModel model) {
- assertEquals(model.getName(), TABLE);
- Iterator<TableRegionModel> regions = model.getRegions().iterator();
- TableRegionModel region = regions.next();
- assertTrue(Bytes.equals(region.getStartKey(), START_KEY));
- assertTrue(Bytes.equals(region.getEndKey(), END_KEY));
- assertEquals(region.getId(), ID);
- assertEquals(region.getLocation(), LOCATION);
- assertFalse(regions.hasNext());
- }
-
- public void testBuildModel() throws Exception {
- checkModel(buildTestModel());
- }
-
- public void testFromXML() throws Exception {
- checkModel(fromXML(AS_XML));
- }
-
- public void testFromPB() throws Exception {
- checkModel(fromPB(AS_PB));
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
deleted file mode 100644
index 4cb9194..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.util.Iterator;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.util.Base64;
-
-import junit.framework.TestCase;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestTableListModel extends TestModelBase<TableListModel> {
- private static final String TABLE1 = "table1";
- private static final String TABLE2 = "table2";
- private static final String TABLE3 = "table3";
-
- public TestTableListModel() throws Exception {
- super(TableListModel.class);
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><TableList><table " +
- "name=\"table1\"/><table name=\"table2\"/><table name=\"table3\"/></TableList>";
-
- AS_PB = "CgZ0YWJsZTEKBnRhYmxlMgoGdGFibGUz";
-
- AS_JSON =
- "{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}";
- }
-
- protected TableListModel buildTestModel() {
- TableListModel model = new TableListModel();
- model.add(new TableModel(TABLE1));
- model.add(new TableModel(TABLE2));
- model.add(new TableModel(TABLE3));
- return model;
- }
-
- protected void checkModel(TableListModel model) {
- Iterator<TableModel> tables = model.getTables().iterator();
- TableModel table = tables.next();
- assertEquals(table.getName(), TABLE1);
- table = tables.next();
- assertEquals(table.getName(), TABLE2);
- table = tables.next();
- assertEquals(table.getName(), TABLE3);
- assertFalse(tables.hasNext());
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
deleted file mode 100644
index 5c4b1a9..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.StringReader;
-import java.io.StringWriter;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import junit.framework.TestCase;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestTableRegionModel extends TestModelBase<TableRegionModel> {
- private static final String TABLE = "testtable";
- private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
- private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
- private static final long ID = 8731042424L;
- private static final String LOCATION = "testhost:9876";
-
- public TestTableRegionModel() throws Exception {
- super(TableRegionModel.class);
-
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Region endKey=\"enp5eng=\" " +
- "id=\"8731042424\" location=\"testhost:9876\" " +
- "name=\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\" " +
- "startKey=\"YWJyYWNhZGJyYQ==\"/>";
-
- AS_JSON =
- "{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," +
- "\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" +
- "startKey\":\"YWJyYWNhZGJyYQ==\"}";
- }
-
- protected TableRegionModel buildTestModel() {
- TableRegionModel model =
- new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION);
- return model;
- }
-
- protected void checkModel(TableRegionModel model) {
- assertTrue(Bytes.equals(model.getStartKey(), START_KEY));
- assertTrue(Bytes.equals(model.getEndKey(), END_KEY));
- assertEquals(model.getId(), ID);
- assertEquals(model.getLocation(), LOCATION);
- assertEquals(model.getName(),
- TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) +
- ".ad9860f031282c46ed431d7af8f94aca.");
- }
-
- public void testGetName() {
- TableRegionModel model = buildTestModel();
- String modelName = model.getName();
- HRegionInfo hri = new HRegionInfo(TableName.valueOf(TABLE),
- START_KEY, END_KEY, false, ID);
- assertEquals(modelName, hri.getRegionNameAsString());
- }
-
- public void testSetName() {
- TableRegionModel model = buildTestModel();
- String name = model.getName();
- model.setName(name);
- assertEquals(name, model.getName());
- }
-
- @Override
- public void testFromPB() throws Exception {
- //no pb ignore
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
deleted file mode 100644
index b725f7b..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.util.Iterator;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.util.Base64;
-
-import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
-
-import junit.framework.TestCase;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestTableSchemaModel extends TestModelBase<TableSchemaModel> {
-
- public static final String TABLE_NAME = "testTable";
- private static final boolean IS_META = false;
- private static final boolean IS_ROOT = false;
- private static final boolean READONLY = false;
-
- TestColumnSchemaModel testColumnSchemaModel;
-
- private JAXBContext context;
-
- public TestTableSchemaModel() throws Exception {
- super(TableSchemaModel.class);
- testColumnSchemaModel = new TestColumnSchemaModel();
-
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
- "<TableSchema name=\"testTable\" IS_META=\"false\" IS_ROOT=\"false\" READONLY=\"false\">" +
- "<ColumnSchema name=\"testcolumn\" BLOCKSIZE=\"16384\" BLOOMFILTER=\"NONE\" " +
- "BLOCKCACHE=\"true\" COMPRESSION=\"GZ\" VERSIONS=\"1\" TTL=\"86400\" IN_MEMORY=\"false\"/>" +
- "</TableSchema>";
-
- AS_PB =
- "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" +
- "TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" +
- "RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" +
- "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA";
-
- AS_JSON =
- "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," +
- "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," +
- "\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," +
- "\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}";
- }
-
- protected TableSchemaModel buildTestModel() {
- return buildTestModel(TABLE_NAME);
- }
-
- public TableSchemaModel buildTestModel(String name) {
- TableSchemaModel model = new TableSchemaModel();
- model.setName(name);
- model.__setIsMeta(IS_META);
- model.__setIsRoot(IS_ROOT);
- model.__setReadOnly(READONLY);
- model.addColumnFamily(testColumnSchemaModel.buildTestModel());
- return model;
- }
-
- protected void checkModel(TableSchemaModel model) {
- checkModel(model, TABLE_NAME);
- }
-
- public void checkModel(TableSchemaModel model, String tableName) {
- assertEquals(model.getName(), tableName);
- assertEquals(model.__getIsMeta(), IS_META);
- assertEquals(model.__getIsRoot(), IS_ROOT);
- assertEquals(model.__getReadOnly(), READONLY);
- Iterator<ColumnSchemaModel> families = model.getColumns().iterator();
- assertTrue(families.hasNext());
- ColumnSchemaModel family = families.next();
- testColumnSchemaModel.checkModel(family);
- assertFalse(families.hasNext());
- }
-
- public void testBuildModel() throws Exception {
- checkModel(buildTestModel());
- }
-
- public void testFromXML() throws Exception {
- checkModel(fromXML(AS_XML));
- }
-
- public void testFromPB() throws Exception {
- checkModel(fromPB(AS_PB));
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
deleted file mode 100644
index 553bb35..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.util.Base64;
-
-import junit.framework.TestCase;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestVersionModel extends TestModelBase<VersionModel> {
- private static final String REST_VERSION = "0.0.1";
- private static final String OS_VERSION =
- "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64";
- private static final String JVM_VERSION =
- "Sun Microsystems Inc. 1.6.0_13-11.3-b02";
- private static final String JETTY_VERSION = "6.1.14";
- private static final String JERSEY_VERSION = "1.1.0-ea";
-
- public TestVersionModel() throws Exception {
- super(VersionModel.class);
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Version JVM=\"Sun " +
- "Microsystems Inc. 1.6.0_13-11.3-b02\" Jersey=\"1.1.0-ea\" " +
- "OS=\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\" REST=\"0.0.1\" Server=\"6.1.14\"/>";
-
- AS_PB =
- "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" +
- "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE=";
-
- AS_JSON =
- "{\"JVM\":\"Sun Microsystems Inc. 1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," +
- "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" +
- "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}";
- }
-
- protected VersionModel buildTestModel() {
- VersionModel model = new VersionModel();
- model.setRESTVersion(REST_VERSION);
- model.setOSVersion(OS_VERSION);
- model.setJVMVersion(JVM_VERSION);
- model.setServerVersion(JETTY_VERSION);
- model.setJerseyVersion(JERSEY_VERSION);
- return model;
- }
-
- protected void checkModel(VersionModel model) {
- assertEquals(model.getRESTVersion(), REST_VERSION);
- assertEquals(model.getOSVersion(), OS_VERSION);
- assertEquals(model.getJVMVersion(), JVM_VERSION);
- assertEquals(model.getServerVersion(), JETTY_VERSION);
- assertEquals(model.getJerseyVersion(), JERSEY_VERSION);
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 5f63c0b..c5734a3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -51,6 +51,7 @@
<modules>
<module>hbase-server</module>
<module>hbase-thrift</module>
+ <module>hbase-rest</module>
<module>hbase-shell</module>
<module>hbase-protocol</module>
<module>hbase-client</module>
@@ -1094,12 +1095,24 @@
<type>test-jar</type>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-testing-util</artifactId>
- <version>${project.version}</version>
- <scope>test</scope>
- </dependency>
+ <dependency>
+ <artifactId>hbase-rest</artifactId>
+ <groupId>org.apache.hbase</groupId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <artifactId>hbase-rest</artifactId>
+ <groupId>org.apache.hbase</groupId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-testing-util</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-prefix-tree</artifactId>
[04/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java
deleted file mode 100644
index c14f3e2..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java
+++ /dev/null
@@ -1,585 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.StringWriter;
-import java.net.URLEncoder;
-import java.util.List;
-
-import javax.xml.bind.JAXBException;
-
-import org.apache.commons.httpclient.Header;
-import org.apache.hadoop.hbase.CompatibilityFactory;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.test.MetricsAssertHelper;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestGetAndPutResource extends RowResourceBase {
-
- private static final MetricsAssertHelper METRICS_ASSERT =
- CompatibilityFactory.getInstance(MetricsAssertHelper.class);
-
- @Test
- public void testForbidden() throws IOException, JAXBException {
- conf.set("hbase.rest.readonly", "true");
-
- Response response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 403);
- response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 403);
- response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_2);
- assertEquals(response.getCode(), 403);
- response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_2);
- assertEquals(response.getCode(), 403);
- response = deleteValue(TABLE, ROW_1, COLUMN_1);
- assertEquals(response.getCode(), 403);
- response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 403);
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 403);
-
- conf.set("hbase.rest.readonly", "false");
-
- response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 200);
- response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 200);
- response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_2);
- assertEquals(response.getCode(), 200);
- response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2, VALUE_3);
- assertEquals(response.getCode(), 200);
- response = deleteValue(TABLE, ROW_1, COLUMN_1);
- assertEquals(response.getCode(), 200);
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testSingleCellGetPutXML() throws IOException, JAXBException {
- Response response = getValueXML(TABLE, ROW_1, COLUMN_1);
- assertEquals(response.getCode(), 404);
-
- response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 200);
- checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
- response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2);
- assertEquals(response.getCode(), 200);
- checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2);
- response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2, VALUE_3);
- assertEquals(response.getCode(), 200);
- checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_3);
- response = checkAndDeleteXML(TABLE, ROW_1, COLUMN_1, VALUE_3);
- assertEquals(response.getCode(), 200);
-
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testSingleCellGetPutPB() throws IOException, JAXBException {
- Response response = getValuePB(TABLE, ROW_1, COLUMN_1);
- assertEquals(response.getCode(), 404);
-
- response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 200);
- checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
- response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2);
- assertEquals(response.getCode(), 200);
- checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2);
-
- response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2, VALUE_3);
- assertEquals(response.getCode(), 200);
- checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_3);
- response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_3, VALUE_4);
- assertEquals(response.getCode(), 200);
- checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_4);
-
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testSingleCellGetPutBinary() throws IOException {
- final String path = "/" + TABLE + "/" + ROW_3 + "/" + COLUMN_1;
- final byte[] body = Bytes.toBytes(VALUE_3);
- Response response = client.put(path, Constants.MIMETYPE_BINARY, body);
- assertEquals(response.getCode(), 200);
- Thread.yield();
-
- response = client.get(path, Constants.MIMETYPE_BINARY);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type"));
- assertTrue(Bytes.equals(response.getBody(), body));
- boolean foundTimestampHeader = false;
- for (Header header: response.getHeaders()) {
- if (header.getName().equals("X-Timestamp")) {
- foundTimestampHeader = true;
- break;
- }
- }
- assertTrue(foundTimestampHeader);
-
- response = deleteRow(TABLE, ROW_3);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testSingleCellGetJSON() throws IOException, JAXBException {
- final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
- Response response = client.put(path, Constants.MIMETYPE_BINARY,
- Bytes.toBytes(VALUE_4));
- assertEquals(response.getCode(), 200);
- Thread.yield();
- response = client.get(path, Constants.MIMETYPE_JSON);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- response = deleteRow(TABLE, ROW_4);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testLatestCellGetJSON() throws IOException, JAXBException {
- final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
- CellSetModel cellSetModel = new CellSetModel();
- RowModel rowModel = new RowModel(ROW_4);
- CellModel cellOne = new CellModel(Bytes.toBytes(COLUMN_1), 1L,
- Bytes.toBytes(VALUE_1));
- CellModel cellTwo = new CellModel(Bytes.toBytes(COLUMN_1), 2L,
- Bytes.toBytes(VALUE_2));
- rowModel.addCell(cellOne);
- rowModel.addCell(cellTwo);
- cellSetModel.addRow(rowModel);
- String jsonString = jsonMapper.writeValueAsString(cellSetModel);
- Response response = client.put(path, Constants.MIMETYPE_JSON,
- Bytes.toBytes(jsonString));
- assertEquals(response.getCode(), 200);
- Thread.yield();
- response = client.get(path, Constants.MIMETYPE_JSON);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- CellSetModel cellSet = jsonMapper.readValue(response.getBody(), CellSetModel.class);
- assertTrue(cellSet.getRows().size() == 1);
- assertTrue(cellSet.getRows().get(0).getCells().size() == 1);
- CellModel cell = cellSet.getRows().get(0).getCells().get(0);
- assertEquals(VALUE_2 , Bytes.toString(cell.getValue()));
- assertEquals(2L , cell.getTimestamp());
- response = deleteRow(TABLE, ROW_4);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testURLEncodedKey() throws IOException, JAXBException {
- String urlKey = "http://example.com/foo";
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(TABLE);
- path.append('/');
- path.append(URLEncoder.encode(urlKey, HConstants.UTF8_ENCODING));
- path.append('/');
- path.append(COLUMN_1);
- Response response;
- response = putValueXML(path.toString(), TABLE, urlKey, COLUMN_1,
- VALUE_1);
- assertEquals(response.getCode(), 200);
- checkValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1);
- }
-
- @Test
- public void testNoSuchCF() throws IOException, JAXBException {
- final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA+":";
- final String badPath = "/" + TABLE + "/" + ROW_1 + "/" + "BAD";
- Response response = client.post(goodPath, Constants.MIMETYPE_BINARY,
- Bytes.toBytes(VALUE_1));
- assertEquals(response.getCode(), 200);
- assertEquals(client.get(goodPath, Constants.MIMETYPE_BINARY).getCode(),
- 200);
- assertEquals(client.get(badPath, Constants.MIMETYPE_BINARY).getCode(),
- 404);
- assertEquals(client.get(goodPath, Constants.MIMETYPE_BINARY).getCode(),
- 200);
- }
-
- @Test
- public void testMultiCellGetPutXML() throws IOException, JAXBException {
- String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
-
- CellSetModel cellSetModel = new CellSetModel();
- RowModel rowModel = new RowModel(ROW_1);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_1)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_2)));
- cellSetModel.addRow(rowModel);
- rowModel = new RowModel(ROW_2);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_3)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_4)));
- cellSetModel.addRow(rowModel);
- StringWriter writer = new StringWriter();
- xmlMarshaller.marshal(cellSetModel, writer);
- Response response = client.put(path, Constants.MIMETYPE_XML,
- Bytes.toBytes(writer.toString()));
- Thread.yield();
-
- // make sure the fake row was not actually created
- response = client.get(path, Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 404);
-
- // check that all of the values were created
- checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
- checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
- checkValueXML(TABLE, ROW_2, COLUMN_1, VALUE_3);
- checkValueXML(TABLE, ROW_2, COLUMN_2, VALUE_4);
-
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- response = deleteRow(TABLE, ROW_2);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testMultiCellGetPutPB() throws IOException {
- String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
-
- CellSetModel cellSetModel = new CellSetModel();
- RowModel rowModel = new RowModel(ROW_1);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_1)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_2)));
- cellSetModel.addRow(rowModel);
- rowModel = new RowModel(ROW_2);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_3)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_4)));
- cellSetModel.addRow(rowModel);
- Response response = client.put(path, Constants.MIMETYPE_PROTOBUF,
- cellSetModel.createProtobufOutput());
- Thread.yield();
-
- // make sure the fake row was not actually created
- response = client.get(path, Constants.MIMETYPE_PROTOBUF);
- assertEquals(response.getCode(), 404);
-
- // check that all of the values were created
- checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
- checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2);
- checkValuePB(TABLE, ROW_2, COLUMN_1, VALUE_3);
- checkValuePB(TABLE, ROW_2, COLUMN_2, VALUE_4);
-
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- response = deleteRow(TABLE, ROW_2);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testStartEndRowGetPutXML() throws IOException, JAXBException {
- String[] rows = { ROW_1, ROW_2, ROW_3 };
- String[] values = { VALUE_1, VALUE_2, VALUE_3 };
- Response response = null;
- for (int i = 0; i < rows.length; i++) {
- response = putValueXML(TABLE, rows[i], COLUMN_1, values[i]);
- assertEquals(200, response.getCode());
- checkValueXML(TABLE, rows[i], COLUMN_1, values[i]);
- }
- response = getValueXML(TABLE, rows[0], rows[2], COLUMN_1);
- assertEquals(200, response.getCode());
- CellSetModel cellSet = (CellSetModel)
- xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
- assertEquals(2, cellSet.getRows().size());
- for (int i = 0; i < cellSet.getRows().size()-1; i++) {
- RowModel rowModel = cellSet.getRows().get(i);
- for (CellModel cell: rowModel.getCells()) {
- assertEquals(COLUMN_1, Bytes.toString(cell.getColumn()));
- assertEquals(values[i], Bytes.toString(cell.getValue()));
- }
- }
- for (String row : rows) {
- response = deleteRow(TABLE, row);
- assertEquals(200, response.getCode());
- }
- }
-
- @Test
- public void testInvalidCheckParam() throws IOException, JAXBException {
- CellSetModel cellSetModel = new CellSetModel();
- RowModel rowModel = new RowModel(ROW_1);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_1)));
- cellSetModel.addRow(rowModel);
- StringWriter writer = new StringWriter();
- xmlMarshaller.marshal(cellSetModel, writer);
-
- final String path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1 + "?check=blah";
-
- Response response = client.put(path, Constants.MIMETYPE_XML,
- Bytes.toBytes(writer.toString()));
- assertEquals(response.getCode(), 400);
- }
-
- @Test
- public void testInvalidColumnPut() throws IOException, JAXBException {
- String dummyColumn = "doesnot:exist";
- CellSetModel cellSetModel = new CellSetModel();
- RowModel rowModel = new RowModel(ROW_1);
- rowModel.addCell(new CellModel(Bytes.toBytes(dummyColumn),
- Bytes.toBytes(VALUE_1)));
- cellSetModel.addRow(rowModel);
- StringWriter writer = new StringWriter();
- xmlMarshaller.marshal(cellSetModel, writer);
-
- final String path = "/" + TABLE + "/" + ROW_1 + "/" + dummyColumn;
-
- Response response = client.put(path, Constants.MIMETYPE_XML,
- Bytes.toBytes(writer.toString()));
- assertEquals(response.getCode(), 404);
- }
-
- @Test
- public void testMultiCellGetJson() throws IOException, JAXBException {
- String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
-
- CellSetModel cellSetModel = new CellSetModel();
- RowModel rowModel = new RowModel(ROW_1);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_1)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_2)));
- cellSetModel.addRow(rowModel);
- rowModel = new RowModel(ROW_2);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_3)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_4)));
- cellSetModel.addRow(rowModel);
- String jsonString = jsonMapper.writeValueAsString(cellSetModel);
-
- Response response = client.put(path, Constants.MIMETYPE_JSON,
- Bytes.toBytes(jsonString));
- Thread.yield();
-
- // make sure the fake row was not actually created
- response = client.get(path, Constants.MIMETYPE_JSON);
- assertEquals(response.getCode(), 404);
-
- // check that all of the values were created
- checkValueJSON(TABLE, ROW_1, COLUMN_1, VALUE_1);
- checkValueJSON(TABLE, ROW_1, COLUMN_2, VALUE_2);
- checkValueJSON(TABLE, ROW_2, COLUMN_1, VALUE_3);
- checkValueJSON(TABLE, ROW_2, COLUMN_2, VALUE_4);
-
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- response = deleteRow(TABLE, ROW_2);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testMetrics() throws IOException, JAXBException {
- final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
- Response response = client.put(path, Constants.MIMETYPE_BINARY,
- Bytes.toBytes(VALUE_4));
- assertEquals(response.getCode(), 200);
- Thread.yield();
- response = client.get(path, Constants.MIMETYPE_JSON);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- response = deleteRow(TABLE, ROW_4);
- assertEquals(response.getCode(), 200);
-
- UserProvider userProvider = UserProvider.instantiate(conf);
- METRICS_ASSERT.assertCounterGt("requests", 2l,
- RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
-
- METRICS_ASSERT.assertCounterGt("successfulGet", 0l,
- RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
-
- METRICS_ASSERT.assertCounterGt("successfulPut", 0l,
- RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
-
- METRICS_ASSERT.assertCounterGt("successfulDelete", 0l,
- RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
- }
-
- @Test
- public void testMultiColumnGetXML() throws Exception {
- String path = "/" + TABLE + "/fakerow";
- CellSetModel cellSetModel = new CellSetModel();
- RowModel rowModel = new RowModel(ROW_1);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_3), Bytes.toBytes(VALUE_2)));
- cellSetModel.addRow(rowModel);
- StringWriter writer = new StringWriter();
- xmlMarshaller.marshal(cellSetModel, writer);
-
- Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString()));
- Thread.yield();
-
- // make sure the fake row was not actually created
- response = client.get(path, Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 404);
-
- // Try getting all the column values at once.
- path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1 + "," + COLUMN_2 + "," + COLUMN_3;
- response = client.get(path, Constants.MIMETYPE_XML);
- assertEquals(200, response.getCode());
- CellSetModel cellSet = (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response
- .getBody()));
- assertTrue(cellSet.getRows().size() == 1);
- assertTrue(cellSet.getRows().get(0).getCells().size() == 3);
- List<CellModel> cells = cellSet.getRows().get(0).getCells();
-
- assertTrue(containsCellModel(cells, COLUMN_1, VALUE_1));
- assertTrue(containsCellModel(cells, COLUMN_2, VALUE_2));
- assertTrue(containsCellModel(cells, COLUMN_3, VALUE_2));
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- }
-
- private boolean containsCellModel(List<CellModel> cells, String column, String value) {
- boolean contains = false;
- for (CellModel cell : cells) {
- if (Bytes.toString(cell.getColumn()).equals(column)
- && Bytes.toString(cell.getValue()).equals(value)) {
- contains = true;
- return contains;
- }
- }
- return contains;
- }
-
- @Test
- public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBException {
- String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
-
- CellSetModel cellSetModel = new CellSetModel();
- RowModel rowModel = new RowModel(ROW_1);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_1)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_2)));
- cellSetModel.addRow(rowModel);
- rowModel = new RowModel(ROW_2);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_3)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_4)));
- cellSetModel.addRow(rowModel);
- StringWriter writer = new StringWriter();
- xmlMarshaller.marshal(cellSetModel, writer);
- Response response = client.put(path, Constants.MIMETYPE_XML,
- Bytes.toBytes(writer.toString()));
- Thread.yield();
-
- // make sure the fake row was not actually created
- response = client.get(path, Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 404);
-
- // check that all of the values were created
- StringBuilder query = new StringBuilder();
- query.append('/');
- query.append(TABLE);
- query.append('/');
- query.append("testrow*");
- response = client.get(query.toString(), Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- CellSetModel cellSet = (CellSetModel)
- xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
- assertTrue(cellSet.getRows().size() == 2);
-
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- response = deleteRow(TABLE, ROW_2);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testSuffixGlobbingXML() throws IOException, JAXBException {
- String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
-
- CellSetModel cellSetModel = new CellSetModel();
- RowModel rowModel = new RowModel(ROW_1);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_1)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_2)));
- cellSetModel.addRow(rowModel);
- rowModel = new RowModel(ROW_2);
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
- Bytes.toBytes(VALUE_3)));
- rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
- Bytes.toBytes(VALUE_4)));
- cellSetModel.addRow(rowModel);
- StringWriter writer = new StringWriter();
- xmlMarshaller.marshal(cellSetModel, writer);
- Response response = client.put(path, Constants.MIMETYPE_XML,
- Bytes.toBytes(writer.toString()));
- Thread.yield();
-
- // make sure the fake row was not actually created
- response = client.get(path, Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 404);
-
- // check that all of the values were created
- StringBuilder query = new StringBuilder();
- query.append('/');
- query.append(TABLE);
- query.append('/');
- query.append("testrow*");
- query.append('/');
- query.append(COLUMN_1);
- response = client.get(query.toString(), Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- CellSetModel cellSet = (CellSetModel)
- xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
- List<RowModel> rows = cellSet.getRows();
- assertTrue(rows.size() == 2);
- for (RowModel row : rows) {
- assertTrue(row.getCells().size() == 1);
- assertEquals(COLUMN_1, Bytes.toString(row.getCells().get(0).getColumn()));
- }
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- response = deleteRow(TABLE, ROW_2);
- assertEquals(response.getCode(), 200);
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java
deleted file mode 100644
index 23da0ec..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.util.zip.GZIPInputStream;
-import java.util.zip.GZIPOutputStream;
-
-import org.apache.commons.httpclient.Header;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.filter.GzipFilter;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestGzipFilter {
- private static final String TABLE = "TestGzipFilter";
- private static final String CFA = "a";
- private static final String COLUMN_1 = CFA + ":1";
- private static final String COLUMN_2 = CFA + ":2";
- private static final String ROW_1 = "testrow1";
- private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1");
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- private static Client client;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- TEST_UTIL.startMiniCluster();
- REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (admin.tableExists(TABLE)) {
- return;
- }
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
- htd.addFamily(new HColumnDescriptor(CFA));
- admin.createTable(htd);
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- @Test
- public void testGzipFilter() throws Exception {
- String path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1;
-
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- GZIPOutputStream os = new GZIPOutputStream(bos);
- os.write(VALUE_1);
- os.close();
- byte[] value_1_gzip = bos.toByteArray();
-
- // input side filter
-
- Header[] headers = new Header[2];
- headers[0] = new Header("Content-Type", Constants.MIMETYPE_BINARY);
- headers[1] = new Header("Content-Encoding", "gzip");
- Response response = client.put(path, headers, value_1_gzip);
- assertEquals(response.getCode(), 200);
-
- HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
- Get get = new Get(Bytes.toBytes(ROW_1));
- get.addColumn(Bytes.toBytes(CFA), Bytes.toBytes("1"));
- Result result = table.get(get);
- byte[] value = result.getValue(Bytes.toBytes(CFA), Bytes.toBytes("1"));
- assertNotNull(value);
- assertTrue(Bytes.equals(value, VALUE_1));
-
- // output side filter
-
- headers[0] = new Header("Accept", Constants.MIMETYPE_BINARY);
- headers[1] = new Header("Accept-Encoding", "gzip");
- response = client.get(path, headers);
- assertEquals(response.getCode(), 200);
- ByteArrayInputStream bis = new ByteArrayInputStream(response.getBody());
- GZIPInputStream is = new GZIPInputStream(bis);
- value = new byte[VALUE_1.length];
- is.read(value, 0, VALUE_1.length);
- assertTrue(Bytes.equals(value, VALUE_1));
- is.close();
- table.close();
-
- testScannerResultCodes();
- }
-
- @Test
- public void testErrorNotGzipped() throws Exception {
- Header[] headers = new Header[2];
- headers[0] = new Header("Accept", Constants.MIMETYPE_BINARY);
- headers[1] = new Header("Accept-Encoding", "gzip");
- Response response = client.get("/" + TABLE + "/" + ROW_1 + "/" + COLUMN_2, headers);
- assertEquals(response.getCode(), 404);
- String contentEncoding = response.getHeader("Content-Encoding");
- assertTrue(contentEncoding == null || !contentEncoding.contains("gzip"));
- response = client.get("/" + TABLE, headers);
- assertEquals(response.getCode(), 405);
- contentEncoding = response.getHeader("Content-Encoding");
- assertTrue(contentEncoding == null || !contentEncoding.contains("gzip"));
- }
-
- void testScannerResultCodes() throws Exception {
- Header[] headers = new Header[3];
- headers[0] = new Header("Content-Type", Constants.MIMETYPE_XML);
- headers[1] = new Header("Accept", Constants.MIMETYPE_JSON);
- headers[2] = new Header("Accept-Encoding", "gzip");
- Response response = client.post("/" + TABLE + "/scanner", headers,
- "<Scanner/>".getBytes());
- assertEquals(response.getCode(), 201);
- String scannerUrl = response.getLocation();
- assertNotNull(scannerUrl);
- response = client.get(scannerUrl);
- assertEquals(response.getCode(), 200);
- response = client.get(scannerUrl);
- assertEquals(response.getCode(), 204);
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
deleted file mode 100644
index 0c999b8..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.rest.provider.JacksonProvider;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import javax.ws.rs.core.MediaType;
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-import java.io.IOException;
-
-import static org.junit.Assert.assertEquals;
-
-
-@Category(MediumTests.class)
-public class TestMultiRowResource {
-
- private static final String TABLE = "TestRowResource";
- private static final String CFA = "a";
- private static final String CFB = "b";
- private static final String COLUMN_1 = CFA + ":1";
- private static final String COLUMN_2 = CFB + ":2";
- private static final String ROW_1 = "testrow5";
- private static final String VALUE_1 = "testvalue5";
- private static final String ROW_2 = "testrow6";
- private static final String VALUE_2 = "testvalue6";
-
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility();
-
- private static Client client;
- private static JAXBContext context;
- private static Marshaller marshaller;
- private static Unmarshaller unmarshaller;
- private static Configuration conf;
-
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- conf = TEST_UTIL.getConfiguration();
- TEST_UTIL.startMiniCluster();
- REST_TEST_UTIL.startServletContainer(conf);
- context = JAXBContext.newInstance(
- CellModel.class,
- CellSetModel.class,
- RowModel.class);
- marshaller = context.createMarshaller();
- unmarshaller = context.createUnmarshaller();
- client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (admin.tableExists(TABLE)) {
- return;
- }
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
- htd.addFamily(new HColumnDescriptor(CFA));
- htd.addFamily(new HColumnDescriptor(CFB));
- admin.createTable(htd);
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
-
- @Test
- public void testMultiCellGetJSON() throws IOException, JAXBException {
- String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1;
- String row_6_url = "/" + TABLE + "/" + ROW_2 + "/" + COLUMN_2;
-
-
- StringBuilder path = new StringBuilder();
- path.append("/");
- path.append(TABLE);
- path.append("/multiget/?row=");
- path.append(ROW_1);
- path.append("&row=");
- path.append(ROW_2);
-
- client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1));
- client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2));
-
-
- Response response = client.get(path.toString(), Constants.MIMETYPE_JSON);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
-
- client.delete(row_5_url);
- client.delete(row_6_url);
-
- }
-
- @Test
- public void testMultiCellGetXML() throws IOException, JAXBException {
- String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1;
- String row_6_url = "/" + TABLE + "/" + ROW_2 + "/" + COLUMN_2;
-
-
- StringBuilder path = new StringBuilder();
- path.append("/");
- path.append(TABLE);
- path.append("/multiget/?row=");
- path.append(ROW_1);
- path.append("&row=");
- path.append(ROW_2);
-
- client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1));
- client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2));
-
-
- Response response = client.get(path.toString(), Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
-
- client.delete(row_5_url);
- client.delete(row_6_url);
-
- }
-
- @Test
- public void testMultiCellGetJSONNotFound() throws IOException, JAXBException {
- String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1;
-
- StringBuilder path = new StringBuilder();
- path.append("/");
- path.append(TABLE);
- path.append("/multiget/?row=");
- path.append(ROW_1);
- path.append("&row=");
- path.append(ROW_2);
-
- client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1));
- Response response = client.get(path.toString(), Constants.MIMETYPE_JSON);
- assertEquals(response.getCode(), 200);
- ObjectMapper mapper = new JacksonProvider().locateMapper(CellSetModel.class,
- MediaType.APPLICATION_JSON_TYPE);
- CellSetModel cellSet = (CellSetModel) mapper.readValue(response.getBody(), CellSetModel.class);
- assertEquals(1, cellSet.getRows().size());
- assertEquals(ROW_1, Bytes.toString(cellSet.getRows().get(0).getKey()));
- assertEquals(VALUE_1, Bytes.toString(cellSet.getRows().get(0).getCells().get(0).getValue()));
- client.delete(row_5_url);
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java
deleted file mode 100644
index 70d425c..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestResourceFilter {
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- private static Client client;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- TEST_UTIL.getConfiguration().set(Constants.FILTER_CLASSES, DummyFilter.class.getName());
- TEST_UTIL.startMiniCluster();
- REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- @Test
- public void testFilter() throws Exception {
- String path = "/status/cluster";
- Response response = client.get(path);
- assertEquals(404, response.getCode());
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
deleted file mode 100644
index 84aa994..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.StringWriter;
-import java.util.Iterator;
-import java.util.Random;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-
-import org.apache.commons.httpclient.Header;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.rest.model.ScannerModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import static org.junit.Assert.*;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestScannerResource {
- private static final String TABLE = "TestScannerResource";
- private static final String NONEXISTENT_TABLE = "ThisTableDoesNotExist";
- private static final String CFA = "a";
- private static final String CFB = "b";
- private static final String COLUMN_1 = CFA + ":1";
- private static final String COLUMN_2 = CFB + ":2";
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- private static Client client;
- private static JAXBContext context;
- private static Marshaller marshaller;
- private static Unmarshaller unmarshaller;
- private static int expectedRows1;
- private static int expectedRows2;
- private static Configuration conf;
-
- static int insertData(Configuration conf, String tableName, String column, double prob)
- throws IOException {
- Random rng = new Random();
- int count = 0;
- HTable table = new HTable(conf, tableName);
- byte[] k = new byte[3];
- byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
- for (byte b1 = 'a'; b1 < 'z'; b1++) {
- for (byte b2 = 'a'; b2 < 'z'; b2++) {
- for (byte b3 = 'a'; b3 < 'z'; b3++) {
- if (rng.nextDouble() < prob) {
- k[0] = b1;
- k[1] = b2;
- k[2] = b3;
- Put put = new Put(k);
- put.setDurability(Durability.SKIP_WAL);
- put.add(famAndQf[0], famAndQf[1], k);
- table.put(put);
- count++;
- }
- }
- }
- }
- table.flushCommits();
- table.close();
- return count;
- }
-
- static int countCellSet(CellSetModel model) {
- int count = 0;
- Iterator<RowModel> rows = model.getRows().iterator();
- while (rows.hasNext()) {
- RowModel row = rows.next();
- Iterator<CellModel> cells = row.getCells().iterator();
- while (cells.hasNext()) {
- cells.next();
- count++;
- }
- }
- return count;
- }
-
- private static int fullTableScan(ScannerModel model) throws IOException {
- model.setBatch(100);
- Response response = client.put("/" + TABLE + "/scanner",
- Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
- assertEquals(response.getCode(), 201);
- String scannerURI = response.getLocation();
- assertNotNull(scannerURI);
- int count = 0;
- while (true) {
- response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF);
- assertTrue(response.getCode() == 200 || response.getCode() == 204);
- if (response.getCode() == 200) {
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- CellSetModel cellSet = new CellSetModel();
- cellSet.getObjectFromMessage(response.getBody());
- Iterator<RowModel> rows = cellSet.getRows().iterator();
- while (rows.hasNext()) {
- RowModel row = rows.next();
- Iterator<CellModel> cells = row.getCells().iterator();
- while (cells.hasNext()) {
- cells.next();
- count++;
- }
- }
- } else {
- break;
- }
- }
- // delete the scanner
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 200);
- return count;
- }
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- conf = TEST_UTIL.getConfiguration();
- TEST_UTIL.startMiniCluster();
- REST_TEST_UTIL.startServletContainer(conf);
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- context = JAXBContext.newInstance(
- CellModel.class,
- CellSetModel.class,
- RowModel.class,
- ScannerModel.class);
- marshaller = context.createMarshaller();
- unmarshaller = context.createUnmarshaller();
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (admin.tableExists(TABLE)) {
- return;
- }
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
- htd.addFamily(new HColumnDescriptor(CFA));
- htd.addFamily(new HColumnDescriptor(CFB));
- admin.createTable(htd);
- expectedRows1 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_1, 1.0);
- expectedRows2 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_2, 0.5);
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- @Test
- public void testSimpleScannerXML() throws IOException, JAXBException {
- final int BATCH_SIZE = 5;
- // new scanner
- ScannerModel model = new ScannerModel();
- model.setBatch(BATCH_SIZE);
- model.addColumn(Bytes.toBytes(COLUMN_1));
- StringWriter writer = new StringWriter();
- marshaller.marshal(model, writer);
- byte[] body = Bytes.toBytes(writer.toString());
-
- // test put operation is forbidden in read-only mode
- conf.set("hbase.rest.readonly", "true");
- Response response = client.put("/" + TABLE + "/scanner",
- Constants.MIMETYPE_XML, body);
- assertEquals(response.getCode(), 403);
- String scannerURI = response.getLocation();
- assertNull(scannerURI);
-
- // recall previous put operation with read-only off
- conf.set("hbase.rest.readonly", "false");
- response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML,
- body);
- assertEquals(response.getCode(), 201);
- scannerURI = response.getLocation();
- assertNotNull(scannerURI);
-
- // get a cell set
- response = client.get(scannerURI, Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- CellSetModel cellSet = (CellSetModel)
- unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
- // confirm batch size conformance
- assertEquals(countCellSet(cellSet), BATCH_SIZE);
-
- // test delete scanner operation is forbidden in read-only mode
- conf.set("hbase.rest.readonly", "true");
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 403);
-
- // recall previous delete scanner operation with read-only off
- conf.set("hbase.rest.readonly", "false");
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testSimpleScannerPB() throws IOException {
- final int BATCH_SIZE = 10;
- // new scanner
- ScannerModel model = new ScannerModel();
- model.setBatch(BATCH_SIZE);
- model.addColumn(Bytes.toBytes(COLUMN_1));
-
- // test put operation is forbidden in read-only mode
- conf.set("hbase.rest.readonly", "true");
- Response response = client.put("/" + TABLE + "/scanner",
- Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
- assertEquals(response.getCode(), 403);
- String scannerURI = response.getLocation();
- assertNull(scannerURI);
-
- // recall previous put operation with read-only off
- conf.set("hbase.rest.readonly", "false");
- response = client.put("/" + TABLE + "/scanner",
- Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
- assertEquals(response.getCode(), 201);
- scannerURI = response.getLocation();
- assertNotNull(scannerURI);
-
- // get a cell set
- response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- CellSetModel cellSet = new CellSetModel();
- cellSet.getObjectFromMessage(response.getBody());
- // confirm batch size conformance
- assertEquals(countCellSet(cellSet), BATCH_SIZE);
-
- // test delete scanner operation is forbidden in read-only mode
- conf.set("hbase.rest.readonly", "true");
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 403);
-
- // recall previous delete scanner operation with read-only off
- conf.set("hbase.rest.readonly", "false");
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testSimpleScannerBinary() throws IOException {
- // new scanner
- ScannerModel model = new ScannerModel();
- model.setBatch(1);
- model.addColumn(Bytes.toBytes(COLUMN_1));
-
- // test put operation is forbidden in read-only mode
- conf.set("hbase.rest.readonly", "true");
- Response response = client.put("/" + TABLE + "/scanner",
- Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
- assertEquals(response.getCode(), 403);
- String scannerURI = response.getLocation();
- assertNull(scannerURI);
-
- // recall previous put operation with read-only off
- conf.set("hbase.rest.readonly", "false");
- response = client.put("/" + TABLE + "/scanner",
- Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
- assertEquals(response.getCode(), 201);
- scannerURI = response.getLocation();
- assertNotNull(scannerURI);
-
- // get a cell
- response = client.get(scannerURI, Constants.MIMETYPE_BINARY);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type"));
- // verify that data was returned
- assertTrue(response.getBody().length > 0);
- // verify that the expected X-headers are present
- boolean foundRowHeader = false, foundColumnHeader = false,
- foundTimestampHeader = false;
- for (Header header: response.getHeaders()) {
- if (header.getName().equals("X-Row")) {
- foundRowHeader = true;
- } else if (header.getName().equals("X-Column")) {
- foundColumnHeader = true;
- } else if (header.getName().equals("X-Timestamp")) {
- foundTimestampHeader = true;
- }
- }
- assertTrue(foundRowHeader);
- assertTrue(foundColumnHeader);
- assertTrue(foundTimestampHeader);
-
- // test delete scanner operation is forbidden in read-only mode
- conf.set("hbase.rest.readonly", "true");
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 403);
-
- // recall previous delete scanner operation with read-only off
- conf.set("hbase.rest.readonly", "false");
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 200);
- }
-
- @Test
- public void testFullTableScan() throws IOException {
- ScannerModel model = new ScannerModel();
- model.addColumn(Bytes.toBytes(COLUMN_1));
- assertEquals(fullTableScan(model), expectedRows1);
-
- model = new ScannerModel();
- model.addColumn(Bytes.toBytes(COLUMN_2));
- assertEquals(fullTableScan(model), expectedRows2);
- }
-
- @Test
- public void testTableDoesNotExist() throws IOException, JAXBException {
- ScannerModel model = new ScannerModel();
- StringWriter writer = new StringWriter();
- marshaller.marshal(model, writer);
- byte[] body = Bytes.toBytes(writer.toString());
- Response response = client.put("/" + NONEXISTENT_TABLE +
- "/scanner", Constants.MIMETYPE_XML, body);
- assertEquals(response.getCode(), 404);
- }
-
-}
-
[30/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java
new file mode 100644
index 0000000..ef9d1b9
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java
@@ -0,0 +1,1578 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: ScannerMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class ScannerMessage {
+ private ScannerMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface ScannerOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional bytes startRow = 1;
+ /**
+ * <code>optional bytes startRow = 1;</code>
+ */
+ boolean hasStartRow();
+ /**
+ * <code>optional bytes startRow = 1;</code>
+ */
+ com.google.protobuf.ByteString getStartRow();
+
+ // optional bytes endRow = 2;
+ /**
+ * <code>optional bytes endRow = 2;</code>
+ */
+ boolean hasEndRow();
+ /**
+ * <code>optional bytes endRow = 2;</code>
+ */
+ com.google.protobuf.ByteString getEndRow();
+
+ // repeated bytes columns = 3;
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ java.util.List<com.google.protobuf.ByteString> getColumnsList();
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ int getColumnsCount();
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ com.google.protobuf.ByteString getColumns(int index);
+
+ // optional int32 batch = 4;
+ /**
+ * <code>optional int32 batch = 4;</code>
+ */
+ boolean hasBatch();
+ /**
+ * <code>optional int32 batch = 4;</code>
+ */
+ int getBatch();
+
+ // optional int64 startTime = 5;
+ /**
+ * <code>optional int64 startTime = 5;</code>
+ */
+ boolean hasStartTime();
+ /**
+ * <code>optional int64 startTime = 5;</code>
+ */
+ long getStartTime();
+
+ // optional int64 endTime = 6;
+ /**
+ * <code>optional int64 endTime = 6;</code>
+ */
+ boolean hasEndTime();
+ /**
+ * <code>optional int64 endTime = 6;</code>
+ */
+ long getEndTime();
+
+ // optional int32 maxVersions = 7;
+ /**
+ * <code>optional int32 maxVersions = 7;</code>
+ */
+ boolean hasMaxVersions();
+ /**
+ * <code>optional int32 maxVersions = 7;</code>
+ */
+ int getMaxVersions();
+
+ // optional string filter = 8;
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ boolean hasFilter();
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ java.lang.String getFilter();
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ com.google.protobuf.ByteString
+ getFilterBytes();
+
+ // optional int32 caching = 9;
+ /**
+ * <code>optional int32 caching = 9;</code>
+ *
+ * <pre>
+ * specifies REST scanner caching
+ * </pre>
+ */
+ boolean hasCaching();
+ /**
+ * <code>optional int32 caching = 9;</code>
+ *
+ * <pre>
+ * specifies REST scanner caching
+ * </pre>
+ */
+ int getCaching();
+
+ // repeated string labels = 10;
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ java.util.List<java.lang.String>
+ getLabelsList();
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ int getLabelsCount();
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ java.lang.String getLabels(int index);
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ com.google.protobuf.ByteString
+ getLabelsBytes(int index);
+
+ // optional bool cacheBlocks = 11;
+ /**
+ * <code>optional bool cacheBlocks = 11;</code>
+ *
+ * <pre>
+ * server side block caching hint
+ * </pre>
+ */
+ boolean hasCacheBlocks();
+ /**
+ * <code>optional bool cacheBlocks = 11;</code>
+ *
+ * <pre>
+ * server side block caching hint
+ * </pre>
+ */
+ boolean getCacheBlocks();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Scanner}
+ */
+ public static final class Scanner extends
+ com.google.protobuf.GeneratedMessage
+ implements ScannerOrBuilder {
+ // Use Scanner.newBuilder() to construct.
+ private Scanner(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Scanner(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Scanner defaultInstance;
+ public static Scanner getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Scanner getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Scanner(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ startRow_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ endRow_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ columns_ = new java.util.ArrayList<com.google.protobuf.ByteString>();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ columns_.add(input.readBytes());
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000004;
+ batch_ = input.readInt32();
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000008;
+ startTime_ = input.readInt64();
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000010;
+ endTime_ = input.readInt64();
+ break;
+ }
+ case 56: {
+ bitField0_ |= 0x00000020;
+ maxVersions_ = input.readInt32();
+ break;
+ }
+ case 66: {
+ bitField0_ |= 0x00000040;
+ filter_ = input.readBytes();
+ break;
+ }
+ case 72: {
+ bitField0_ |= 0x00000080;
+ caching_ = input.readInt32();
+ break;
+ }
+ case 82: {
+ if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
+ labels_ = new com.google.protobuf.LazyStringArrayList();
+ mutable_bitField0_ |= 0x00000200;
+ }
+ labels_.add(input.readBytes());
+ break;
+ }
+ case 88: {
+ bitField0_ |= 0x00000100;
+ cacheBlocks_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ columns_ = java.util.Collections.unmodifiableList(columns_);
+ }
+ if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
+ labels_ = new com.google.protobuf.UnmodifiableLazyStringList(labels_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.class, org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Scanner> PARSER =
+ new com.google.protobuf.AbstractParser<Scanner>() {
+ public Scanner parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Scanner(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Scanner> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional bytes startRow = 1;
+ public static final int STARTROW_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString startRow_;
+ /**
+ * <code>optional bytes startRow = 1;</code>
+ */
+ public boolean hasStartRow() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional bytes startRow = 1;</code>
+ */
+ public com.google.protobuf.ByteString getStartRow() {
+ return startRow_;
+ }
+
+ // optional bytes endRow = 2;
+ public static final int ENDROW_FIELD_NUMBER = 2;
+ private com.google.protobuf.ByteString endRow_;
+ /**
+ * <code>optional bytes endRow = 2;</code>
+ */
+ public boolean hasEndRow() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional bytes endRow = 2;</code>
+ */
+ public com.google.protobuf.ByteString getEndRow() {
+ return endRow_;
+ }
+
+ // repeated bytes columns = 3;
+ public static final int COLUMNS_FIELD_NUMBER = 3;
+ private java.util.List<com.google.protobuf.ByteString> columns_;
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public java.util.List<com.google.protobuf.ByteString>
+ getColumnsList() {
+ return columns_;
+ }
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public int getColumnsCount() {
+ return columns_.size();
+ }
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public com.google.protobuf.ByteString getColumns(int index) {
+ return columns_.get(index);
+ }
+
+ // optional int32 batch = 4;
+ public static final int BATCH_FIELD_NUMBER = 4;
+ private int batch_;
+ /**
+ * <code>optional int32 batch = 4;</code>
+ */
+ public boolean hasBatch() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int32 batch = 4;</code>
+ */
+ public int getBatch() {
+ return batch_;
+ }
+
+ // optional int64 startTime = 5;
+ public static final int STARTTIME_FIELD_NUMBER = 5;
+ private long startTime_;
+ /**
+ * <code>optional int64 startTime = 5;</code>
+ */
+ public boolean hasStartTime() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int64 startTime = 5;</code>
+ */
+ public long getStartTime() {
+ return startTime_;
+ }
+
+ // optional int64 endTime = 6;
+ public static final int ENDTIME_FIELD_NUMBER = 6;
+ private long endTime_;
+ /**
+ * <code>optional int64 endTime = 6;</code>
+ */
+ public boolean hasEndTime() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional int64 endTime = 6;</code>
+ */
+ public long getEndTime() {
+ return endTime_;
+ }
+
+ // optional int32 maxVersions = 7;
+ public static final int MAXVERSIONS_FIELD_NUMBER = 7;
+ private int maxVersions_;
+ /**
+ * <code>optional int32 maxVersions = 7;</code>
+ */
+ public boolean hasMaxVersions() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>optional int32 maxVersions = 7;</code>
+ */
+ public int getMaxVersions() {
+ return maxVersions_;
+ }
+
+ // optional string filter = 8;
+ public static final int FILTER_FIELD_NUMBER = 8;
+ private java.lang.Object filter_;
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ public boolean hasFilter() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ public java.lang.String getFilter() {
+ java.lang.Object ref = filter_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ filter_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ public com.google.protobuf.ByteString
+ getFilterBytes() {
+ java.lang.Object ref = filter_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ filter_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int32 caching = 9;
+ public static final int CACHING_FIELD_NUMBER = 9;
+ private int caching_;
+ /**
+ * <code>optional int32 caching = 9;</code>
+ *
+ * <pre>
+ * specifies REST scanner caching
+ * </pre>
+ */
+ public boolean hasCaching() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * <code>optional int32 caching = 9;</code>
+ *
+ * <pre>
+ * specifies REST scanner caching
+ * </pre>
+ */
+ public int getCaching() {
+ return caching_;
+ }
+
+ // repeated string labels = 10;
+ public static final int LABELS_FIELD_NUMBER = 10;
+ private com.google.protobuf.LazyStringList labels_;
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public java.util.List<java.lang.String>
+ getLabelsList() {
+ return labels_;
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public int getLabelsCount() {
+ return labels_.size();
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public java.lang.String getLabels(int index) {
+ return labels_.get(index);
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public com.google.protobuf.ByteString
+ getLabelsBytes(int index) {
+ return labels_.getByteString(index);
+ }
+
+ // optional bool cacheBlocks = 11;
+ public static final int CACHEBLOCKS_FIELD_NUMBER = 11;
+ private boolean cacheBlocks_;
+ /**
+ * <code>optional bool cacheBlocks = 11;</code>
+ *
+ * <pre>
+ * server side block caching hint
+ * </pre>
+ */
+ public boolean hasCacheBlocks() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional bool cacheBlocks = 11;</code>
+ *
+ * <pre>
+ * server side block caching hint
+ * </pre>
+ */
+ public boolean getCacheBlocks() {
+ return cacheBlocks_;
+ }
+
+ private void initFields() {
+ startRow_ = com.google.protobuf.ByteString.EMPTY;
+ endRow_ = com.google.protobuf.ByteString.EMPTY;
+ columns_ = java.util.Collections.emptyList();
+ batch_ = 0;
+ startTime_ = 0L;
+ endTime_ = 0L;
+ maxVersions_ = 0;
+ filter_ = "";
+ caching_ = 0;
+ labels_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ cacheBlocks_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, startRow_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, endRow_);
+ }
+ for (int i = 0; i < columns_.size(); i++) {
+ output.writeBytes(3, columns_.get(i));
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(4, batch_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeInt64(5, startTime_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeInt64(6, endTime_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeInt32(7, maxVersions_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeBytes(8, getFilterBytes());
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeInt32(9, caching_);
+ }
+ for (int i = 0; i < labels_.size(); i++) {
+ output.writeBytes(10, labels_.getByteString(i));
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeBool(11, cacheBlocks_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, startRow_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, endRow_);
+ }
+ {
+ int dataSize = 0;
+ for (int i = 0; i < columns_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(columns_.get(i));
+ }
+ size += dataSize;
+ size += 1 * getColumnsList().size();
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, batch_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(5, startTime_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(6, endTime_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(7, maxVersions_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(8, getFilterBytes());
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(9, caching_);
+ }
+ {
+ int dataSize = 0;
+ for (int i = 0; i < labels_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(labels_.getByteString(i));
+ }
+ size += dataSize;
+ size += 1 * getLabelsList().size();
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(11, cacheBlocks_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Scanner}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.ScannerOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.class, org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ startRow_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ endRow_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ columns_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ batch_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ startTime_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ endTime_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ maxVersions_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ filter_ = "";
+ bitField0_ = (bitField0_ & ~0x00000080);
+ caching_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ labels_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ cacheBlocks_ = false;
+ bitField0_ = (bitField0_ & ~0x00000400);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner result = new org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.startRow_ = startRow_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.endRow_ = endRow_;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ columns_ = java.util.Collections.unmodifiableList(columns_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.columns_ = columns_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.batch_ = batch_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.startTime_ = startTime_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.endTime_ = endTime_;
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.maxVersions_ = maxVersions_;
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.filter_ = filter_;
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ result.caching_ = caching_;
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ labels_ = new com.google.protobuf.UnmodifiableLazyStringList(
+ labels_);
+ bitField0_ = (bitField0_ & ~0x00000200);
+ }
+ result.labels_ = labels_;
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ result.cacheBlocks_ = cacheBlocks_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance()) return this;
+ if (other.hasStartRow()) {
+ setStartRow(other.getStartRow());
+ }
+ if (other.hasEndRow()) {
+ setEndRow(other.getEndRow());
+ }
+ if (!other.columns_.isEmpty()) {
+ if (columns_.isEmpty()) {
+ columns_ = other.columns_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureColumnsIsMutable();
+ columns_.addAll(other.columns_);
+ }
+ onChanged();
+ }
+ if (other.hasBatch()) {
+ setBatch(other.getBatch());
+ }
+ if (other.hasStartTime()) {
+ setStartTime(other.getStartTime());
+ }
+ if (other.hasEndTime()) {
+ setEndTime(other.getEndTime());
+ }
+ if (other.hasMaxVersions()) {
+ setMaxVersions(other.getMaxVersions());
+ }
+ if (other.hasFilter()) {
+ bitField0_ |= 0x00000080;
+ filter_ = other.filter_;
+ onChanged();
+ }
+ if (other.hasCaching()) {
+ setCaching(other.getCaching());
+ }
+ if (!other.labels_.isEmpty()) {
+ if (labels_.isEmpty()) {
+ labels_ = other.labels_;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ } else {
+ ensureLabelsIsMutable();
+ labels_.addAll(other.labels_);
+ }
+ onChanged();
+ }
+ if (other.hasCacheBlocks()) {
+ setCacheBlocks(other.getCacheBlocks());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional bytes startRow = 1;
+ private com.google.protobuf.ByteString startRow_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes startRow = 1;</code>
+ */
+ public boolean hasStartRow() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional bytes startRow = 1;</code>
+ */
+ public com.google.protobuf.ByteString getStartRow() {
+ return startRow_;
+ }
+ /**
+ * <code>optional bytes startRow = 1;</code>
+ */
+ public Builder setStartRow(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ startRow_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes startRow = 1;</code>
+ */
+ public Builder clearStartRow() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ startRow_ = getDefaultInstance().getStartRow();
+ onChanged();
+ return this;
+ }
+
+ // optional bytes endRow = 2;
+ private com.google.protobuf.ByteString endRow_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes endRow = 2;</code>
+ */
+ public boolean hasEndRow() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional bytes endRow = 2;</code>
+ */
+ public com.google.protobuf.ByteString getEndRow() {
+ return endRow_;
+ }
+ /**
+ * <code>optional bytes endRow = 2;</code>
+ */
+ public Builder setEndRow(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ endRow_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes endRow = 2;</code>
+ */
+ public Builder clearEndRow() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ endRow_ = getDefaultInstance().getEndRow();
+ onChanged();
+ return this;
+ }
+
+ // repeated bytes columns = 3;
+ private java.util.List<com.google.protobuf.ByteString> columns_ = java.util.Collections.emptyList();
+ private void ensureColumnsIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ columns_ = new java.util.ArrayList<com.google.protobuf.ByteString>(columns_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public java.util.List<com.google.protobuf.ByteString>
+ getColumnsList() {
+ return java.util.Collections.unmodifiableList(columns_);
+ }
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public int getColumnsCount() {
+ return columns_.size();
+ }
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public com.google.protobuf.ByteString getColumns(int index) {
+ return columns_.get(index);
+ }
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public Builder setColumns(
+ int index, com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColumnsIsMutable();
+ columns_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public Builder addColumns(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColumnsIsMutable();
+ columns_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public Builder addAllColumns(
+ java.lang.Iterable<? extends com.google.protobuf.ByteString> values) {
+ ensureColumnsIsMutable();
+ super.addAll(values, columns_);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated bytes columns = 3;</code>
+ */
+ public Builder clearColumns() {
+ columns_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ return this;
+ }
+
+ // optional int32 batch = 4;
+ private int batch_ ;
+ /**
+ * <code>optional int32 batch = 4;</code>
+ */
+ public boolean hasBatch() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int32 batch = 4;</code>
+ */
+ public int getBatch() {
+ return batch_;
+ }
+ /**
+ * <code>optional int32 batch = 4;</code>
+ */
+ public Builder setBatch(int value) {
+ bitField0_ |= 0x00000008;
+ batch_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 batch = 4;</code>
+ */
+ public Builder clearBatch() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ batch_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 startTime = 5;
+ private long startTime_ ;
+ /**
+ * <code>optional int64 startTime = 5;</code>
+ */
+ public boolean hasStartTime() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional int64 startTime = 5;</code>
+ */
+ public long getStartTime() {
+ return startTime_;
+ }
+ /**
+ * <code>optional int64 startTime = 5;</code>
+ */
+ public Builder setStartTime(long value) {
+ bitField0_ |= 0x00000010;
+ startTime_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 startTime = 5;</code>
+ */
+ public Builder clearStartTime() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ startTime_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 endTime = 6;
+ private long endTime_ ;
+ /**
+ * <code>optional int64 endTime = 6;</code>
+ */
+ public boolean hasEndTime() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>optional int64 endTime = 6;</code>
+ */
+ public long getEndTime() {
+ return endTime_;
+ }
+ /**
+ * <code>optional int64 endTime = 6;</code>
+ */
+ public Builder setEndTime(long value) {
+ bitField0_ |= 0x00000020;
+ endTime_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 endTime = 6;</code>
+ */
+ public Builder clearEndTime() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ endTime_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 maxVersions = 7;
+ private int maxVersions_ ;
+ /**
+ * <code>optional int32 maxVersions = 7;</code>
+ */
+ public boolean hasMaxVersions() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * <code>optional int32 maxVersions = 7;</code>
+ */
+ public int getMaxVersions() {
+ return maxVersions_;
+ }
+ /**
+ * <code>optional int32 maxVersions = 7;</code>
+ */
+ public Builder setMaxVersions(int value) {
+ bitField0_ |= 0x00000040;
+ maxVersions_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 maxVersions = 7;</code>
+ */
+ public Builder clearMaxVersions() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ maxVersions_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional string filter = 8;
+ private java.lang.Object filter_ = "";
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ public boolean hasFilter() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ public java.lang.String getFilter() {
+ java.lang.Object ref = filter_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ filter_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ public com.google.protobuf.ByteString
+ getFilterBytes() {
+ java.lang.Object ref = filter_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ filter_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ public Builder setFilter(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000080;
+ filter_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ public Builder clearFilter() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ filter_ = getDefaultInstance().getFilter();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string filter = 8;</code>
+ */
+ public Builder setFilterBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000080;
+ filter_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 caching = 9;
+ private int caching_ ;
+ /**
+ * <code>optional int32 caching = 9;</code>
+ *
+ * <pre>
+ * specifies REST scanner caching
+ * </pre>
+ */
+ public boolean hasCaching() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional int32 caching = 9;</code>
+ *
+ * <pre>
+ * specifies REST scanner caching
+ * </pre>
+ */
+ public int getCaching() {
+ return caching_;
+ }
+ /**
+ * <code>optional int32 caching = 9;</code>
+ *
+ * <pre>
+ * specifies REST scanner caching
+ * </pre>
+ */
+ public Builder setCaching(int value) {
+ bitField0_ |= 0x00000100;
+ caching_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 caching = 9;</code>
+ *
+ * <pre>
+ * specifies REST scanner caching
+ * </pre>
+ */
+ public Builder clearCaching() {
+ bitField0_ = (bitField0_ & ~0x00000100);
+ caching_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // repeated string labels = 10;
+ private com.google.protobuf.LazyStringList labels_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ private void ensureLabelsIsMutable() {
+ if (!((bitField0_ & 0x00000200) == 0x00000200)) {
+ labels_ = new com.google.protobuf.LazyStringArrayList(labels_);
+ bitField0_ |= 0x00000200;
+ }
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public java.util.List<java.lang.String>
+ getLabelsList() {
+ return java.util.Collections.unmodifiableList(labels_);
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public int getLabelsCount() {
+ return labels_.size();
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public java.lang.String getLabels(int index) {
+ return labels_.get(index);
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public com.google.protobuf.ByteString
+ getLabelsBytes(int index) {
+ return labels_.getByteString(index);
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public Builder setLabels(
+ int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLabelsIsMutable();
+ labels_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public Builder addLabels(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLabelsIsMutable();
+ labels_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public Builder addAllLabels(
+ java.lang.Iterable<java.lang.String> values) {
+ ensureLabelsIsMutable();
+ super.addAll(values, labels_);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public Builder clearLabels() {
+ labels_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string labels = 10;</code>
+ */
+ public Builder addLabelsBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLabelsIsMutable();
+ labels_.add(value);
+ onChanged();
+ return this;
+ }
+
+ // optional bool cacheBlocks = 11;
+ private boolean cacheBlocks_ ;
+ /**
+ * <code>optional bool cacheBlocks = 11;</code>
+ *
+ * <pre>
+ * server side block caching hint
+ * </pre>
+ */
+ public boolean hasCacheBlocks() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * <code>optional bool cacheBlocks = 11;</code>
+ *
+ * <pre>
+ * server side block caching hint
+ * </pre>
+ */
+ public boolean getCacheBlocks() {
+ return cacheBlocks_;
+ }
+ /**
+ * <code>optional bool cacheBlocks = 11;</code>
+ *
+ * <pre>
+ * server side block caching hint
+ * </pre>
+ */
+ public Builder setCacheBlocks(boolean value) {
+ bitField0_ |= 0x00000400;
+ cacheBlocks_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bool cacheBlocks = 11;</code>
+ *
+ * <pre>
+ * server side block caching hint
+ * </pre>
+ */
+ public Builder clearCacheBlocks() {
+ bitField0_ = (bitField0_ & ~0x00000400);
+ cacheBlocks_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Scanner)
+ }
+
+ static {
+ defaultInstance = new Scanner(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Scanner)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\024ScannerMessage.proto\022/org.apache.hadoo" +
+ "p.hbase.rest.protobuf.generated\"\312\001\n\007Scan" +
+ "ner\022\020\n\010startRow\030\001 \001(\014\022\016\n\006endRow\030\002 \001(\014\022\017\n" +
+ "\007columns\030\003 \003(\014\022\r\n\005batch\030\004 \001(\005\022\021\n\tstartTi" +
+ "me\030\005 \001(\003\022\017\n\007endTime\030\006 \001(\003\022\023\n\013maxVersions" +
+ "\030\007 \001(\005\022\016\n\006filter\030\010 \001(\t\022\017\n\007caching\030\t \001(\005\022" +
+ "\016\n\006labels\030\n \003(\t\022\023\n\013cacheBlocks\030\013 \001(\010"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor,
+ new java.lang.String[] { "StartRow", "EndRow", "Columns", "Batch", "StartTime", "EndTime", "MaxVersions", "Filter", "Caching", "Labels", "CacheBlocks", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
[06/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java
deleted file mode 100644
index 590b0d3..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java
+++ /dev/null
@@ -1,1147 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: VersionMessage.proto
-
-package org.apache.hadoop.hbase.rest.protobuf.generated;
-
-public final class VersionMessage {
- private VersionMessage() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- public interface VersionOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // optional string restVersion = 1;
- /**
- * <code>optional string restVersion = 1;</code>
- */
- boolean hasRestVersion();
- /**
- * <code>optional string restVersion = 1;</code>
- */
- java.lang.String getRestVersion();
- /**
- * <code>optional string restVersion = 1;</code>
- */
- com.google.protobuf.ByteString
- getRestVersionBytes();
-
- // optional string jvmVersion = 2;
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- boolean hasJvmVersion();
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- java.lang.String getJvmVersion();
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- com.google.protobuf.ByteString
- getJvmVersionBytes();
-
- // optional string osVersion = 3;
- /**
- * <code>optional string osVersion = 3;</code>
- */
- boolean hasOsVersion();
- /**
- * <code>optional string osVersion = 3;</code>
- */
- java.lang.String getOsVersion();
- /**
- * <code>optional string osVersion = 3;</code>
- */
- com.google.protobuf.ByteString
- getOsVersionBytes();
-
- // optional string serverVersion = 4;
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- boolean hasServerVersion();
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- java.lang.String getServerVersion();
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- com.google.protobuf.ByteString
- getServerVersionBytes();
-
- // optional string jerseyVersion = 5;
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- boolean hasJerseyVersion();
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- java.lang.String getJerseyVersion();
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- com.google.protobuf.ByteString
- getJerseyVersionBytes();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Version}
- */
- public static final class Version extends
- com.google.protobuf.GeneratedMessage
- implements VersionOrBuilder {
- // Use Version.newBuilder() to construct.
- private Version(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Version(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Version defaultInstance;
- public static Version getDefaultInstance() {
- return defaultInstance;
- }
-
- public Version getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Version(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- restVersion_ = input.readBytes();
- break;
- }
- case 18: {
- bitField0_ |= 0x00000002;
- jvmVersion_ = input.readBytes();
- break;
- }
- case 26: {
- bitField0_ |= 0x00000004;
- osVersion_ = input.readBytes();
- break;
- }
- case 34: {
- bitField0_ |= 0x00000008;
- serverVersion_ = input.readBytes();
- break;
- }
- case 42: {
- bitField0_ |= 0x00000010;
- jerseyVersion_ = input.readBytes();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.class, org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Version> PARSER =
- new com.google.protobuf.AbstractParser<Version>() {
- public Version parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Version(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Version> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // optional string restVersion = 1;
- public static final int RESTVERSION_FIELD_NUMBER = 1;
- private java.lang.Object restVersion_;
- /**
- * <code>optional string restVersion = 1;</code>
- */
- public boolean hasRestVersion() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string restVersion = 1;</code>
- */
- public java.lang.String getRestVersion() {
- java.lang.Object ref = restVersion_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- restVersion_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string restVersion = 1;</code>
- */
- public com.google.protobuf.ByteString
- getRestVersionBytes() {
- java.lang.Object ref = restVersion_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- restVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // optional string jvmVersion = 2;
- public static final int JVMVERSION_FIELD_NUMBER = 2;
- private java.lang.Object jvmVersion_;
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- public boolean hasJvmVersion() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- public java.lang.String getJvmVersion() {
- java.lang.Object ref = jvmVersion_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- jvmVersion_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- public com.google.protobuf.ByteString
- getJvmVersionBytes() {
- java.lang.Object ref = jvmVersion_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- jvmVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // optional string osVersion = 3;
- public static final int OSVERSION_FIELD_NUMBER = 3;
- private java.lang.Object osVersion_;
- /**
- * <code>optional string osVersion = 3;</code>
- */
- public boolean hasOsVersion() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional string osVersion = 3;</code>
- */
- public java.lang.String getOsVersion() {
- java.lang.Object ref = osVersion_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- osVersion_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string osVersion = 3;</code>
- */
- public com.google.protobuf.ByteString
- getOsVersionBytes() {
- java.lang.Object ref = osVersion_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- osVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // optional string serverVersion = 4;
- public static final int SERVERVERSION_FIELD_NUMBER = 4;
- private java.lang.Object serverVersion_;
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- public boolean hasServerVersion() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- public java.lang.String getServerVersion() {
- java.lang.Object ref = serverVersion_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- serverVersion_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- public com.google.protobuf.ByteString
- getServerVersionBytes() {
- java.lang.Object ref = serverVersion_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- serverVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // optional string jerseyVersion = 5;
- public static final int JERSEYVERSION_FIELD_NUMBER = 5;
- private java.lang.Object jerseyVersion_;
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- public boolean hasJerseyVersion() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- public java.lang.String getJerseyVersion() {
- java.lang.Object ref = jerseyVersion_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- jerseyVersion_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- public com.google.protobuf.ByteString
- getJerseyVersionBytes() {
- java.lang.Object ref = jerseyVersion_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- jerseyVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- private void initFields() {
- restVersion_ = "";
- jvmVersion_ = "";
- osVersion_ = "";
- serverVersion_ = "";
- jerseyVersion_ = "";
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getRestVersionBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, getJvmVersionBytes());
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeBytes(3, getOsVersionBytes());
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeBytes(4, getServerVersionBytes());
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- output.writeBytes(5, getJerseyVersionBytes());
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getRestVersionBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, getJvmVersionBytes());
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(3, getOsVersionBytes());
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(4, getServerVersionBytes());
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(5, getJerseyVersionBytes());
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Version}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.VersionOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.class, org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- restVersion_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- jvmVersion_ = "";
- bitField0_ = (bitField0_ & ~0x00000002);
- osVersion_ = "";
- bitField0_ = (bitField0_ & ~0x00000004);
- serverVersion_ = "";
- bitField0_ = (bitField0_ & ~0x00000008);
- jerseyVersion_ = "";
- bitField0_ = (bitField0_ & ~0x00000010);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version result = new org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.restVersion_ = restVersion_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.jvmVersion_ = jvmVersion_;
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000004;
- }
- result.osVersion_ = osVersion_;
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000008;
- }
- result.serverVersion_ = serverVersion_;
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
- to_bitField0_ |= 0x00000010;
- }
- result.jerseyVersion_ = jerseyVersion_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.getDefaultInstance()) return this;
- if (other.hasRestVersion()) {
- bitField0_ |= 0x00000001;
- restVersion_ = other.restVersion_;
- onChanged();
- }
- if (other.hasJvmVersion()) {
- bitField0_ |= 0x00000002;
- jvmVersion_ = other.jvmVersion_;
- onChanged();
- }
- if (other.hasOsVersion()) {
- bitField0_ |= 0x00000004;
- osVersion_ = other.osVersion_;
- onChanged();
- }
- if (other.hasServerVersion()) {
- bitField0_ |= 0x00000008;
- serverVersion_ = other.serverVersion_;
- onChanged();
- }
- if (other.hasJerseyVersion()) {
- bitField0_ |= 0x00000010;
- jerseyVersion_ = other.jerseyVersion_;
- onChanged();
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // optional string restVersion = 1;
- private java.lang.Object restVersion_ = "";
- /**
- * <code>optional string restVersion = 1;</code>
- */
- public boolean hasRestVersion() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string restVersion = 1;</code>
- */
- public java.lang.String getRestVersion() {
- java.lang.Object ref = restVersion_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- restVersion_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string restVersion = 1;</code>
- */
- public com.google.protobuf.ByteString
- getRestVersionBytes() {
- java.lang.Object ref = restVersion_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- restVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string restVersion = 1;</code>
- */
- public Builder setRestVersion(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- restVersion_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string restVersion = 1;</code>
- */
- public Builder clearRestVersion() {
- bitField0_ = (bitField0_ & ~0x00000001);
- restVersion_ = getDefaultInstance().getRestVersion();
- onChanged();
- return this;
- }
- /**
- * <code>optional string restVersion = 1;</code>
- */
- public Builder setRestVersionBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- restVersion_ = value;
- onChanged();
- return this;
- }
-
- // optional string jvmVersion = 2;
- private java.lang.Object jvmVersion_ = "";
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- public boolean hasJvmVersion() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- public java.lang.String getJvmVersion() {
- java.lang.Object ref = jvmVersion_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- jvmVersion_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- public com.google.protobuf.ByteString
- getJvmVersionBytes() {
- java.lang.Object ref = jvmVersion_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- jvmVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- public Builder setJvmVersion(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- jvmVersion_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- public Builder clearJvmVersion() {
- bitField0_ = (bitField0_ & ~0x00000002);
- jvmVersion_ = getDefaultInstance().getJvmVersion();
- onChanged();
- return this;
- }
- /**
- * <code>optional string jvmVersion = 2;</code>
- */
- public Builder setJvmVersionBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- jvmVersion_ = value;
- onChanged();
- return this;
- }
-
- // optional string osVersion = 3;
- private java.lang.Object osVersion_ = "";
- /**
- * <code>optional string osVersion = 3;</code>
- */
- public boolean hasOsVersion() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional string osVersion = 3;</code>
- */
- public java.lang.String getOsVersion() {
- java.lang.Object ref = osVersion_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- osVersion_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string osVersion = 3;</code>
- */
- public com.google.protobuf.ByteString
- getOsVersionBytes() {
- java.lang.Object ref = osVersion_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- osVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string osVersion = 3;</code>
- */
- public Builder setOsVersion(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000004;
- osVersion_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string osVersion = 3;</code>
- */
- public Builder clearOsVersion() {
- bitField0_ = (bitField0_ & ~0x00000004);
- osVersion_ = getDefaultInstance().getOsVersion();
- onChanged();
- return this;
- }
- /**
- * <code>optional string osVersion = 3;</code>
- */
- public Builder setOsVersionBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000004;
- osVersion_ = value;
- onChanged();
- return this;
- }
-
- // optional string serverVersion = 4;
- private java.lang.Object serverVersion_ = "";
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- public boolean hasServerVersion() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- public java.lang.String getServerVersion() {
- java.lang.Object ref = serverVersion_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- serverVersion_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- public com.google.protobuf.ByteString
- getServerVersionBytes() {
- java.lang.Object ref = serverVersion_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- serverVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- public Builder setServerVersion(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000008;
- serverVersion_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- public Builder clearServerVersion() {
- bitField0_ = (bitField0_ & ~0x00000008);
- serverVersion_ = getDefaultInstance().getServerVersion();
- onChanged();
- return this;
- }
- /**
- * <code>optional string serverVersion = 4;</code>
- */
- public Builder setServerVersionBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000008;
- serverVersion_ = value;
- onChanged();
- return this;
- }
-
- // optional string jerseyVersion = 5;
- private java.lang.Object jerseyVersion_ = "";
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- public boolean hasJerseyVersion() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- public java.lang.String getJerseyVersion() {
- java.lang.Object ref = jerseyVersion_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- jerseyVersion_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- public com.google.protobuf.ByteString
- getJerseyVersionBytes() {
- java.lang.Object ref = jerseyVersion_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- jerseyVersion_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- public Builder setJerseyVersion(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000010;
- jerseyVersion_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- public Builder clearJerseyVersion() {
- bitField0_ = (bitField0_ & ~0x00000010);
- jerseyVersion_ = getDefaultInstance().getJerseyVersion();
- onChanged();
- return this;
- }
- /**
- * <code>optional string jerseyVersion = 5;</code>
- */
- public Builder setJerseyVersionBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000010;
- jerseyVersion_ = value;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Version)
- }
-
- static {
- defaultInstance = new Version(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Version)
- }
-
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\024VersionMessage.proto\022/org.apache.hadoo" +
- "p.hbase.rest.protobuf.generated\"s\n\007Versi" +
- "on\022\023\n\013restVersion\030\001 \001(\t\022\022\n\njvmVersion\030\002 " +
- "\001(\t\022\021\n\tosVersion\030\003 \001(\t\022\025\n\rserverVersion\030" +
- "\004 \001(\t\022\025\n\rjerseyVersion\030\005 \001(\t"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor,
- new java.lang.String[] { "RestVersion", "JvmVersion", "OsVersion", "ServerVersion", "JerseyVersion", });
- return null;
- }
- };
- com.google.protobuf.Descriptors.FileDescriptor
- .internalBuildGeneratedFileFrom(descriptorData,
- new com.google.protobuf.Descriptors.FileDescriptor[] {
- }, assigner);
- }
-
- // @@protoc_insertion_point(outer_class_scope)
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java
deleted file mode 100644
index 3b8b8ca..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.provider;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Set;
-
-import javax.ws.rs.ext.ContextResolver;
-import javax.ws.rs.ext.Provider;
-import javax.xml.bind.JAXBContext;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.rest.model.ScannerModel;
-import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
-import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
-import org.apache.hadoop.hbase.rest.model.TableInfoModel;
-import org.apache.hadoop.hbase.rest.model.TableListModel;
-import org.apache.hadoop.hbase.rest.model.TableModel;
-import org.apache.hadoop.hbase.rest.model.TableRegionModel;
-import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
-import org.apache.hadoop.hbase.rest.model.VersionModel;
-
-import com.sun.jersey.api.json.JSONConfiguration;
-import com.sun.jersey.api.json.JSONJAXBContext;
-
-/**
- * Plumbing for hooking up Jersey's JSON entity body encoding and decoding
- * support to JAXB. Modify how the context is created (by using e.g. a
- * different configuration builder) to control how JSON is processed and
- * created.
- */
-@Provider
-@InterfaceAudience.Private
-public class JAXBContextResolver implements ContextResolver<JAXBContext> {
-
- private final JAXBContext context;
-
- private final Set<Class<?>> types;
-
- private final Class<?>[] cTypes = {
- CellModel.class,
- CellSetModel.class,
- ColumnSchemaModel.class,
- RowModel.class,
- ScannerModel.class,
- StorageClusterStatusModel.class,
- StorageClusterVersionModel.class,
- TableInfoModel.class,
- TableListModel.class,
- TableModel.class,
- TableRegionModel.class,
- TableSchemaModel.class,
- VersionModel.class
- };
-
- @SuppressWarnings("unchecked")
- public JAXBContextResolver() throws Exception {
- this.types = new HashSet(Arrays.asList(cTypes));
- this.context = new JSONJAXBContext(JSONConfiguration.natural().build(),
- cTypes);
- }
-
- @Override
- public JAXBContext getContext(Class<?> objectType) {
- return (types.contains(objectType)) ? context : null;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/JacksonProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/JacksonProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/JacksonProvider.java
deleted file mode 100644
index 7791d02..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/JacksonProvider.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest.provider;
-
-import org.codehaus.jackson.jaxrs.JacksonJaxbJsonProvider;
-
-import javax.ws.rs.ext.Provider;
-
-//create a class in the defined resource package name
-//so it gets activated
-//Use jackson to take care of json
-//since it has better support for object
-//deserializaiton and less clunky to deal with
-@Provider
-public class JacksonProvider extends JacksonJaxbJsonProvider {
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
deleted file mode 100644
index ec39db0..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.provider.consumer;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Type;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.ext.MessageBodyReader;
-import javax.ws.rs.ext.Provider;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.Constants;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-
-/**
- * Adapter for hooking up Jersey content processing dispatch to
- * ProtobufMessageHandler interface capable handlers for decoding protobuf input.
- */
-@Provider
-@Consumes({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF})
-@InterfaceAudience.Private
-public class ProtobufMessageBodyConsumer
- implements MessageBodyReader<ProtobufMessageHandler> {
- private static final Log LOG =
- LogFactory.getLog(ProtobufMessageBodyConsumer.class);
-
- @Override
- public boolean isReadable(Class<?> type, Type genericType,
- Annotation[] annotations, MediaType mediaType) {
- return ProtobufMessageHandler.class.isAssignableFrom(type);
- }
-
- @Override
- public ProtobufMessageHandler readFrom(Class<ProtobufMessageHandler> type, Type genericType,
- Annotation[] annotations, MediaType mediaType,
- MultivaluedMap<String, String> httpHeaders, InputStream inputStream)
- throws IOException, WebApplicationException {
- ProtobufMessageHandler obj = null;
- try {
- obj = type.newInstance();
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- byte[] buffer = new byte[4096];
- int read;
- do {
- read = inputStream.read(buffer, 0, buffer.length);
- if (read > 0) {
- baos.write(buffer, 0, read);
- }
- } while (read > 0);
- if (LOG.isDebugEnabled()) {
- LOG.debug(getClass() + ": read " + baos.size() + " bytes from " +
- inputStream);
- }
- obj = obj.getObjectFromMessage(baos.toByteArray());
- } catch (InstantiationException e) {
- throw new WebApplicationException(e);
- } catch (IllegalAccessException e) {
- throw new WebApplicationException(e);
- }
- return obj;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
deleted file mode 100644
index 523692a..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.provider.producer;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Type;
-
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.ext.MessageBodyWriter;
-import javax.ws.rs.ext.Provider;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.Constants;
-
-/**
- * An adapter between Jersey and Object.toString(). Hooks up plain text output
- * to the Jersey content handling framework.
- * Jersey will first call getSize() to learn the number of bytes that will be
- * sent, then writeTo to perform the actual I/O.
- */
-@Provider
-@Produces(Constants.MIMETYPE_TEXT)
-@InterfaceAudience.Private
-public class PlainTextMessageBodyProducer
- implements MessageBodyWriter<Object> {
-
- private ThreadLocal<byte[]> buffer = new ThreadLocal<byte[]>();
-
- @Override
- public boolean isWriteable(Class<?> arg0, Type arg1, Annotation[] arg2,
- MediaType arg3) {
- return true;
- }
-
- @Override
- public long getSize(Object object, Class<?> type, Type genericType,
- Annotation[] annotations, MediaType mediaType) {
- byte[] bytes = object.toString().getBytes();
- buffer.set(bytes);
- return bytes.length;
- }
-
- @Override
- public void writeTo(Object object, Class<?> type, Type genericType,
- Annotation[] annotations, MediaType mediaType,
- MultivaluedMap<String, Object> httpHeaders, OutputStream outStream)
- throws IOException, WebApplicationException {
- byte[] bytes = buffer.get();
- outStream.write(bytes);
- buffer.remove();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
deleted file mode 100644
index 6d737b5..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.provider.producer;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Type;
-
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.ext.MessageBodyWriter;
-import javax.ws.rs.ext.Provider;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.Constants;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-
-/**
- * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up
- * protobuf output producing methods to the Jersey content handling framework.
- * Jersey will first call getSize() to learn the number of bytes that will be
- * sent, then writeTo to perform the actual I/O.
- */
-@Provider
-@Produces({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF})
-@InterfaceAudience.Private
-public class ProtobufMessageBodyProducer
- implements MessageBodyWriter<ProtobufMessageHandler> {
-
- private ThreadLocal<byte[]> buffer = new ThreadLocal<byte[]>();
-
- @Override
- public boolean isWriteable(Class<?> type, Type genericType,
- Annotation[] annotations, MediaType mediaType) {
- return ProtobufMessageHandler.class.isAssignableFrom(type);
- }
-
- @Override
- public long getSize(ProtobufMessageHandler m, Class<?> type, Type genericType,
- Annotation[] annotations, MediaType mediaType) {
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- try {
- baos.write(m.createProtobufOutput());
- } catch (IOException e) {
- return -1;
- }
- byte[] bytes = baos.toByteArray();
- buffer.set(bytes);
- return bytes.length;
- }
-
- public void writeTo(ProtobufMessageHandler m, Class<?> type, Type genericType,
- Annotation[] annotations, MediaType mediaType,
- MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream)
- throws IOException, WebApplicationException {
- byte[] bytes = buffer.get();
- entityStream.write(bytes);
- buffer.remove();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java
deleted file mode 100644
index ef53f46..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-public class DummyFilter implements Filter {
- private Log LOG = LogFactory.getLog(getClass());
-
- @Override
- public void destroy() {
- }
-
- @Override
- public void doFilter(ServletRequest paramServletRequest, ServletResponse paramServletResponse,
- FilterChain paramFilterChain) throws IOException, ServletException {
- if (paramServletRequest instanceof HttpServletRequest
- && paramServletResponse instanceof HttpServletResponse) {
- HttpServletRequest request = (HttpServletRequest) paramServletRequest;
- HttpServletResponse response = (HttpServletResponse) paramServletResponse;
-
- String path = request.getRequestURI();
- LOG.info(path);
- if (path.indexOf("/status/cluster") >= 0) {
- LOG.info("Blocking cluster status request");
- response.sendError(HttpServletResponse.SC_NOT_FOUND, "Cluster status cannot be requested.");
- } else {
- paramFilterChain.doFilter(request, response);
- }
- }
- }
-
- @Override
- public void init(FilterConfig filterChain) throws ServletException {
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java
deleted file mode 100644
index 8a399e9..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.util.HttpServerUtil;
-import org.apache.hadoop.util.StringUtils;
-import org.mortbay.jetty.Server;
-import org.mortbay.jetty.servlet.Context;
-import org.mortbay.jetty.servlet.ServletHolder;
-
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-
-public class HBaseRESTTestingUtility {
-
- static final Log LOG = LogFactory.getLog(HBaseRESTTestingUtility.class);
-
- private int testServletPort;
- private Server server;
-
- public int getServletPort() {
- return testServletPort;
- }
-
- public void startServletContainer(Configuration conf) throws Exception {
- if (server != null) {
- LOG.error("ServletContainer already running");
- return;
- }
-
- // Inject the conf for the test by being first to make singleton
- RESTServlet.getInstance(conf, UserProvider.instantiate(conf));
-
- // set up the Jersey servlet container for Jetty
- ServletHolder sh = new ServletHolder(ServletContainer.class);
- sh.setInitParameter(
- "com.sun.jersey.config.property.resourceConfigClass",
- ResourceConfig.class.getCanonicalName());
- sh.setInitParameter("com.sun.jersey.config.property.packages",
- "jetty");
-
- LOG.info("configured " + ServletContainer.class.getName());
-
- // set up Jetty and run the embedded server
- server = new Server(0);
- server.setSendServerVersion(false);
- server.setSendDateHeader(false);
- // set up context
- Context context = new Context(server, "/", Context.SESSIONS);
- context.addServlet(sh, "/*");
- // Load filters specified from configuration.
- String[] filterClasses = conf.getStrings(Constants.FILTER_CLASSES,
- ArrayUtils.EMPTY_STRING_ARRAY);
- for (String filter : filterClasses) {
- filter = filter.trim();
- context.addFilter(Class.forName(filter), "/*", 0);
- }
- HttpServerUtil.constrainHttpMethods(context);
- LOG.info("Loaded filter classes :" + filterClasses);
- // start the server
- server.start();
- // get the port
- testServletPort = server.getConnectors()[0].getLocalPort();
-
- LOG.info("started " + server.getClass().getName() + " on port " +
- testServletPort);
- }
-
- public void shutdownServletContainer() {
- if (server != null) try {
- server.stop();
- server = null;
- RESTServlet.stop();
- } catch (Exception e) {
- LOG.warn(StringUtils.stringifyException(e));
- }
- }
-}
[16/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
deleted file mode 100644
index 2ffdd4f..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
+++ /dev/null
@@ -1,852 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.BitComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
-import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
-import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
-import org.apache.hadoop.hbase.filter.ColumnRangeFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.DependentColumnFilter;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
-import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
-import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
-import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter;
-import org.apache.hadoop.hbase.filter.NullComparator;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.PrefixFilter;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.RandomRowFilter;
-import org.apache.hadoop.hbase.filter.RegexStringComparator;
-import org.apache.hadoop.hbase.filter.RowFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.apache.hadoop.hbase.filter.SkipFilter;
-import org.apache.hadoop.hbase.filter.SubstringComparator;
-import org.apache.hadoop.hbase.filter.TimestampsFilter;
-import org.apache.hadoop.hbase.filter.ValueFilter;
-import org.apache.hadoop.hbase.filter.WhileMatchFilter;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner;
-import org.apache.hadoop.hbase.security.visibility.Authorizations;
-import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.ByteStringer;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import com.google.protobuf.ByteString;
-import com.sun.jersey.api.json.JSONConfiguration;
-import com.sun.jersey.api.json.JSONJAXBContext;
-import com.sun.jersey.api.json.JSONMarshaller;
-import com.sun.jersey.api.json.JSONUnmarshaller;
-
-/**
- * A representation of Scanner parameters.
- *
- * <pre>
- * <complexType name="Scanner">
- * <sequence>
- * <element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/>
- * <element name="filter" type="string" minOccurs="0" maxOccurs="1"></element>
- * </sequence>
- * <attribute name="startRow" type="base64Binary"></attribute>
- * <attribute name="endRow" type="base64Binary"></attribute>
- * <attribute name="batch" type="int"></attribute>
- * <attribute name="caching" type="int"></attribute>
- * <attribute name="startTime" type="int"></attribute>
- * <attribute name="endTime" type="int"></attribute>
- * <attribute name="maxVersions" type="int"></attribute>
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="Scanner")
-@InterfaceAudience.Private
-public class ScannerModel implements ProtobufMessageHandler, Serializable {
-
- private static final long serialVersionUID = 1L;
-
- private byte[] startRow = HConstants.EMPTY_START_ROW;
- private byte[] endRow = HConstants.EMPTY_END_ROW;;
- private List<byte[]> columns = new ArrayList<byte[]>();
- private int batch = Integer.MAX_VALUE;
- private long startTime = 0;
- private long endTime = Long.MAX_VALUE;
- private String filter = null;
- private int maxVersions = Integer.MAX_VALUE;
- private int caching = -1;
- private List<String> labels = new ArrayList<String>();
- private boolean cacheBlocks = true;
-
- @XmlRootElement
- static class FilterModel {
-
- @XmlRootElement
- static class ByteArrayComparableModel {
- @XmlAttribute public String type;
- @XmlAttribute public String value;
- @XmlAttribute public String op;
-
- static enum ComparatorType {
- BinaryComparator,
- BinaryPrefixComparator,
- BitComparator,
- NullComparator,
- RegexStringComparator,
- SubstringComparator
- }
-
- public ByteArrayComparableModel() { }
-
- public ByteArrayComparableModel(
- ByteArrayComparable comparator) {
- String typeName = comparator.getClass().getSimpleName();
- ComparatorType type = ComparatorType.valueOf(typeName);
- this.type = typeName;
- switch (type) {
- case BinaryComparator:
- case BinaryPrefixComparator:
- this.value = Base64.encodeBytes(comparator.getValue());
- break;
- case BitComparator:
- this.value = Base64.encodeBytes(comparator.getValue());
- this.op = ((BitComparator)comparator).getOperator().toString();
- break;
- case NullComparator:
- break;
- case RegexStringComparator:
- case SubstringComparator:
- this.value = Bytes.toString(comparator.getValue());
- break;
- default:
- throw new RuntimeException("unhandled filter type: " + type);
- }
- }
-
- public ByteArrayComparable build() {
- ByteArrayComparable comparator;
- switch (ComparatorType.valueOf(type)) {
- case BinaryComparator:
- comparator = new BinaryComparator(Base64.decode(value));
- break;
- case BinaryPrefixComparator:
- comparator = new BinaryPrefixComparator(Base64.decode(value));
- break;
- case BitComparator:
- comparator = new BitComparator(Base64.decode(value),
- BitComparator.BitwiseOp.valueOf(op));
- break;
- case NullComparator:
- comparator = new NullComparator();
- break;
- case RegexStringComparator:
- comparator = new RegexStringComparator(value);
- break;
- case SubstringComparator:
- comparator = new SubstringComparator(value);
- break;
- default:
- throw new RuntimeException("unhandled comparator type: " + type);
- }
- return comparator;
- }
-
- }
-
- // A grab bag of fields, would have been a union if this were C.
- // These are null by default and will only be serialized if set (non null).
- @XmlAttribute public String type;
- @XmlAttribute public String op;
- @XmlElement ByteArrayComparableModel comparator;
- @XmlAttribute public String value;
- @XmlElement public List<FilterModel> filters;
- @XmlAttribute public Integer limit;
- @XmlAttribute public Integer offset;
- @XmlAttribute public String family;
- @XmlAttribute public String qualifier;
- @XmlAttribute public Boolean ifMissing;
- @XmlAttribute public Boolean latestVersion;
- @XmlAttribute public String minColumn;
- @XmlAttribute public Boolean minColumnInclusive;
- @XmlAttribute public String maxColumn;
- @XmlAttribute public Boolean maxColumnInclusive;
- @XmlAttribute public Boolean dropDependentColumn;
- @XmlAttribute public Float chance;
- @XmlElement public List<String> prefixes;
- @XmlElement public List<Long> timestamps;
-
- static enum FilterType {
- ColumnCountGetFilter,
- ColumnPaginationFilter,
- ColumnPrefixFilter,
- ColumnRangeFilter,
- DependentColumnFilter,
- FamilyFilter,
- FilterList,
- FirstKeyOnlyFilter,
- InclusiveStopFilter,
- KeyOnlyFilter,
- MultipleColumnPrefixFilter,
- PageFilter,
- PrefixFilter,
- QualifierFilter,
- RandomRowFilter,
- RowFilter,
- SingleColumnValueExcludeFilter,
- SingleColumnValueFilter,
- SkipFilter,
- TimestampsFilter,
- ValueFilter,
- WhileMatchFilter
- }
-
- public FilterModel() { }
-
- public FilterModel(Filter filter) {
- String typeName = filter.getClass().getSimpleName();
- FilterType type = FilterType.valueOf(typeName);
- this.type = typeName;
- switch (type) {
- case ColumnCountGetFilter:
- this.limit = ((ColumnCountGetFilter)filter).getLimit();
- break;
- case ColumnPaginationFilter:
- this.limit = ((ColumnPaginationFilter)filter).getLimit();
- this.offset = ((ColumnPaginationFilter)filter).getOffset();
- break;
- case ColumnPrefixFilter:
- this.value = Base64.encodeBytes(((ColumnPrefixFilter)filter).getPrefix());
- break;
- case ColumnRangeFilter:
- this.minColumn = Base64.encodeBytes(((ColumnRangeFilter)filter).getMinColumn());
- this.minColumnInclusive = ((ColumnRangeFilter)filter).getMinColumnInclusive();
- this.maxColumn = Base64.encodeBytes(((ColumnRangeFilter)filter).getMaxColumn());
- this.maxColumnInclusive = ((ColumnRangeFilter)filter).getMaxColumnInclusive();
- break;
- case DependentColumnFilter: {
- DependentColumnFilter dcf = (DependentColumnFilter)filter;
- this.family = Base64.encodeBytes(dcf.getFamily());
- byte[] qualifier = dcf.getQualifier();
- if (qualifier != null) {
- this.qualifier = Base64.encodeBytes(qualifier);
- }
- this.op = dcf.getOperator().toString();
- this.comparator = new ByteArrayComparableModel(dcf.getComparator());
- this.dropDependentColumn = dcf.dropDependentColumn();
- } break;
- case FilterList:
- this.op = ((FilterList)filter).getOperator().toString();
- this.filters = new ArrayList<FilterModel>();
- for (Filter child: ((FilterList)filter).getFilters()) {
- this.filters.add(new FilterModel(child));
- }
- break;
- case FirstKeyOnlyFilter:
- case KeyOnlyFilter:
- break;
- case InclusiveStopFilter:
- this.value =
- Base64.encodeBytes(((InclusiveStopFilter)filter).getStopRowKey());
- break;
- case MultipleColumnPrefixFilter:
- this.prefixes = new ArrayList<String>();
- for (byte[] prefix: ((MultipleColumnPrefixFilter)filter).getPrefix()) {
- this.prefixes.add(Base64.encodeBytes(prefix));
- }
- break;
- case PageFilter:
- this.value = Long.toString(((PageFilter)filter).getPageSize());
- break;
- case PrefixFilter:
- this.value = Base64.encodeBytes(((PrefixFilter)filter).getPrefix());
- break;
- case FamilyFilter:
- case QualifierFilter:
- case RowFilter:
- case ValueFilter:
- this.op = ((CompareFilter)filter).getOperator().toString();
- this.comparator =
- new ByteArrayComparableModel(
- ((CompareFilter)filter).getComparator());
- break;
- case RandomRowFilter:
- this.chance = ((RandomRowFilter)filter).getChance();
- break;
- case SingleColumnValueExcludeFilter:
- case SingleColumnValueFilter: {
- SingleColumnValueFilter scvf = (SingleColumnValueFilter) filter;
- this.family = Base64.encodeBytes(scvf.getFamily());
- byte[] qualifier = scvf.getQualifier();
- if (qualifier != null) {
- this.qualifier = Base64.encodeBytes(qualifier);
- }
- this.op = scvf.getOperator().toString();
- this.comparator =
- new ByteArrayComparableModel(scvf.getComparator());
- if (scvf.getFilterIfMissing()) {
- this.ifMissing = true;
- }
- if (scvf.getLatestVersionOnly()) {
- this.latestVersion = true;
- }
- } break;
- case SkipFilter:
- this.filters = new ArrayList<FilterModel>();
- this.filters.add(new FilterModel(((SkipFilter)filter).getFilter()));
- break;
- case TimestampsFilter:
- this.timestamps = ((TimestampsFilter)filter).getTimestamps();
- break;
- case WhileMatchFilter:
- this.filters = new ArrayList<FilterModel>();
- this.filters.add(
- new FilterModel(((WhileMatchFilter)filter).getFilter()));
- break;
- default:
- throw new RuntimeException("unhandled filter type " + type);
- }
- }
-
- public Filter build() {
- Filter filter;
- switch (FilterType.valueOf(type)) {
- case ColumnCountGetFilter:
- filter = new ColumnCountGetFilter(limit);
- break;
- case ColumnPaginationFilter:
- filter = new ColumnPaginationFilter(limit, offset);
- break;
- case ColumnPrefixFilter:
- filter = new ColumnPrefixFilter(Base64.decode(value));
- break;
- case ColumnRangeFilter:
- filter = new ColumnRangeFilter(Base64.decode(minColumn),
- minColumnInclusive, Base64.decode(maxColumn),
- maxColumnInclusive);
- break;
- case DependentColumnFilter:
- filter = new DependentColumnFilter(Base64.decode(family),
- qualifier != null ? Base64.decode(qualifier) : null,
- dropDependentColumn, CompareOp.valueOf(op), comparator.build());
- break;
- case FamilyFilter:
- filter = new FamilyFilter(CompareOp.valueOf(op), comparator.build());
- break;
- case FilterList: {
- List<Filter> list = new ArrayList<Filter>();
- for (FilterModel model: filters) {
- list.add(model.build());
- }
- filter = new FilterList(FilterList.Operator.valueOf(op), list);
- } break;
- case FirstKeyOnlyFilter:
- filter = new FirstKeyOnlyFilter();
- break;
- case InclusiveStopFilter:
- filter = new InclusiveStopFilter(Base64.decode(value));
- break;
- case KeyOnlyFilter:
- filter = new KeyOnlyFilter();
- break;
- case MultipleColumnPrefixFilter: {
- byte[][] values = new byte[prefixes.size()][];
- for (int i = 0; i < prefixes.size(); i++) {
- values[i] = Base64.decode(prefixes.get(i));
- }
- filter = new MultipleColumnPrefixFilter(values);
- } break;
- case PageFilter:
- filter = new PageFilter(Long.valueOf(value));
- break;
- case PrefixFilter:
- filter = new PrefixFilter(Base64.decode(value));
- break;
- case QualifierFilter:
- filter = new QualifierFilter(CompareOp.valueOf(op), comparator.build());
- break;
- case RandomRowFilter:
- filter = new RandomRowFilter(chance);
- break;
- case RowFilter:
- filter = new RowFilter(CompareOp.valueOf(op), comparator.build());
- break;
- case SingleColumnValueFilter:
- filter = new SingleColumnValueFilter(Base64.decode(family),
- qualifier != null ? Base64.decode(qualifier) : null,
- CompareOp.valueOf(op), comparator.build());
- if (ifMissing != null) {
- ((SingleColumnValueFilter)filter).setFilterIfMissing(ifMissing);
- }
- if (latestVersion != null) {
- ((SingleColumnValueFilter)filter).setLatestVersionOnly(latestVersion);
- }
- break;
- case SingleColumnValueExcludeFilter:
- filter = new SingleColumnValueExcludeFilter(Base64.decode(family),
- qualifier != null ? Base64.decode(qualifier) : null,
- CompareOp.valueOf(op), comparator.build());
- if (ifMissing != null) {
- ((SingleColumnValueExcludeFilter)filter).setFilterIfMissing(ifMissing);
- }
- if (latestVersion != null) {
- ((SingleColumnValueExcludeFilter)filter).setLatestVersionOnly(latestVersion);
- }
- break;
- case SkipFilter:
- filter = new SkipFilter(filters.get(0).build());
- break;
- case TimestampsFilter:
- filter = new TimestampsFilter(timestamps);
- break;
- case ValueFilter:
- filter = new ValueFilter(CompareOp.valueOf(op), comparator.build());
- break;
- case WhileMatchFilter:
- filter = new WhileMatchFilter(filters.get(0).build());
- break;
- default:
- throw new RuntimeException("unhandled filter type: " + type);
- }
- return filter;
- }
-
- }
-
- /**
- * @param s the JSON representation of the filter
- * @return the filter
- * @throws Exception
- */
- public static Filter buildFilter(String s) throws Exception {
- JSONJAXBContext context =
- new JSONJAXBContext(JSONConfiguration.natural().build(),
- FilterModel.class);
- JSONUnmarshaller unmarshaller = context.createJSONUnmarshaller();
- FilterModel model = unmarshaller.unmarshalFromJSON(new StringReader(s),
- FilterModel.class);
- return model.build();
- }
-
- /**
- * @param filter the filter
- * @return the JSON representation of the filter
- * @throws Exception
- */
- public static String stringifyFilter(final Filter filter) throws Exception {
- JSONJAXBContext context =
- new JSONJAXBContext(JSONConfiguration.natural().build(),
- FilterModel.class);
- JSONMarshaller marshaller = context.createJSONMarshaller();
- StringWriter writer = new StringWriter();
- marshaller.marshallToJSON(new FilterModel(filter), writer);
- return writer.toString();
- }
-
- private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":");
-
- /**
- * @param scan the scan specification
- * @throws Exception
- */
- public static ScannerModel fromScan(Scan scan) throws Exception {
- ScannerModel model = new ScannerModel();
- model.setStartRow(scan.getStartRow());
- model.setEndRow(scan.getStopRow());
- Map<byte [], NavigableSet<byte []>> families = scan.getFamilyMap();
- if (families != null) {
- for (Map.Entry<byte [], NavigableSet<byte []>> entry : families.entrySet()) {
- if (entry.getValue() != null) {
- for (byte[] qualifier: entry.getValue()) {
- model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier));
- }
- } else {
- model.addColumn(entry.getKey());
- }
- }
- }
- model.setStartTime(scan.getTimeRange().getMin());
- model.setEndTime(scan.getTimeRange().getMax());
- int caching = scan.getCaching();
- if (caching > 0) {
- model.setCaching(caching);
- }
- int batch = scan.getBatch();
- if (batch > 0) {
- model.setBatch(batch);
- }
- int maxVersions = scan.getMaxVersions();
- if (maxVersions > 0) {
- model.setMaxVersions(maxVersions);
- }
- Filter filter = scan.getFilter();
- if (filter != null) {
- model.setFilter(stringifyFilter(filter));
- }
- // Add the visbility labels if found in the attributes
- Authorizations authorizations = scan.getAuthorizations();
- if (authorizations != null) {
- List<String> labels = authorizations.getLabels();
- for (String label : labels) {
- model.addLabel(label);
- }
- }
- return model;
- }
-
- /**
- * Default constructor
- */
- public ScannerModel() {}
-
- /**
- * Constructor
- * @param startRow the start key of the row-range
- * @param endRow the end key of the row-range
- * @param columns the columns to scan
- * @param batch the number of values to return in batch
- * @param caching the number of rows that the scanner will fetch at once
- * @param endTime the upper bound on timestamps of values of interest
- * @param maxVersions the maximum number of versions to return
- * @param filter a filter specification
- * (values with timestamps later than this are excluded)
- */
- public ScannerModel(byte[] startRow, byte[] endRow, List<byte[]> columns,
- int batch, int caching, long endTime, int maxVersions, String filter) {
- super();
- this.startRow = startRow;
- this.endRow = endRow;
- this.columns = columns;
- this.batch = batch;
- this.caching = caching;
- this.endTime = endTime;
- this.maxVersions = maxVersions;
- this.filter = filter;
- }
-
- /**
- * Constructor
- * @param startRow the start key of the row-range
- * @param endRow the end key of the row-range
- * @param columns the columns to scan
- * @param batch the number of values to return in batch
- * @param caching the number of rows that the scanner will fetch at once
- * @param startTime the lower bound on timestamps of values of interest
- * (values with timestamps earlier than this are excluded)
- * @param endTime the upper bound on timestamps of values of interest
- * (values with timestamps later than this are excluded)
- * @param filter a filter specification
- */
- public ScannerModel(byte[] startRow, byte[] endRow, List<byte[]> columns,
- int batch, int caching, long startTime, long endTime, String filter) {
- super();
- this.startRow = startRow;
- this.endRow = endRow;
- this.columns = columns;
- this.batch = batch;
- this.caching = caching;
- this.startTime = startTime;
- this.endTime = endTime;
- this.filter = filter;
- }
-
- /**
- * Add a column to the column set
- * @param column the column name, as <column>(:<qualifier>)?
- */
- public void addColumn(byte[] column) {
- columns.add(column);
- }
-
- /**
- * Add a visibility label to the scan
- */
- public void addLabel(String label) {
- labels.add(label);
- }
- /**
- * @return true if a start row was specified
- */
- public boolean hasStartRow() {
- return !Bytes.equals(startRow, HConstants.EMPTY_START_ROW);
- }
-
- /**
- * @return start row
- */
- @XmlAttribute
- public byte[] getStartRow() {
- return startRow;
- }
-
- /**
- * @return true if an end row was specified
- */
- public boolean hasEndRow() {
- return !Bytes.equals(endRow, HConstants.EMPTY_END_ROW);
- }
-
- /**
- * @return end row
- */
- @XmlAttribute
- public byte[] getEndRow() {
- return endRow;
- }
-
- /**
- * @return list of columns of interest in column:qualifier format, or empty for all
- */
- @XmlElement(name="column")
- public List<byte[]> getColumns() {
- return columns;
- }
-
- @XmlElement(name="labels")
- public List<String> getLabels() {
- return labels;
- }
-
- /**
- * @return the number of cells to return in batch
- */
- @XmlAttribute
- public int getBatch() {
- return batch;
- }
-
- /**
- * @return the number of rows that the scanner to fetch at once
- */
- @XmlAttribute
- public int getCaching() {
- return caching;
- }
-
- /**
- * @return true if HFile blocks should be cached on the servers for this scan, false otherwise
- */
- @XmlAttribute
- public boolean getCacheBlocks() {
- return cacheBlocks;
- }
-
- /**
- * @return the lower bound on timestamps of items of interest
- */
- @XmlAttribute
- public long getStartTime() {
- return startTime;
- }
-
- /**
- * @return the upper bound on timestamps of items of interest
- */
- @XmlAttribute
- public long getEndTime() {
- return endTime;
- }
-
- /**
- * @return maximum number of versions to return
- */
- @XmlAttribute
- public int getMaxVersions() {
- return maxVersions;
- }
-
- /**
- * @return the filter specification
- */
- @XmlElement
- public String getFilter() {
- return filter;
- }
-
- /**
- * @param startRow start row
- */
- public void setStartRow(byte[] startRow) {
- this.startRow = startRow;
- }
-
- /**
- * @param endRow end row
- */
- public void setEndRow(byte[] endRow) {
- this.endRow = endRow;
- }
-
- /**
- * @param columns list of columns of interest in column:qualifier format, or empty for all
- */
- public void setColumns(List<byte[]> columns) {
- this.columns = columns;
- }
-
- /**
- * @param batch the number of cells to return in batch
- */
- public void setBatch(int batch) {
- this.batch = batch;
- }
-
- /**
- * @param caching the number of rows to fetch at once
- */
- public void setCaching(int caching) {
- this.caching = caching;
- }
-
- /**
- * @param value true if HFile blocks should be cached on the servers for this scan, false otherwise
- */
- public void setCacheBlocks(boolean value) {
- this.cacheBlocks = value;
- }
-
- /**
- * @param maxVersions maximum number of versions to return
- */
- public void setMaxVersions(int maxVersions) {
- this.maxVersions = maxVersions;
- }
-
- /**
- * @param startTime the lower bound on timestamps of values of interest
- */
- public void setStartTime(long startTime) {
- this.startTime = startTime;
- }
-
- /**
- * @param endTime the upper bound on timestamps of values of interest
- */
- public void setEndTime(long endTime) {
- this.endTime = endTime;
- }
-
- /**
- * @param filter the filter specification
- */
- public void setFilter(String filter) {
- this.filter = filter;
- }
-
- @Override
- public byte[] createProtobufOutput() {
- Scanner.Builder builder = Scanner.newBuilder();
- if (!Bytes.equals(startRow, HConstants.EMPTY_START_ROW)) {
- builder.setStartRow(ByteStringer.wrap(startRow));
- }
- if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) {
- builder.setEndRow(ByteStringer.wrap(endRow));
- }
- for (byte[] column: columns) {
- builder.addColumns(ByteStringer.wrap(column));
- }
- if (startTime != 0) {
- builder.setStartTime(startTime);
- }
- if (endTime != 0) {
- builder.setEndTime(endTime);
- }
- builder.setBatch(getBatch());
- if (caching > 0) {
- builder.setCaching(caching);
- }
- builder.setMaxVersions(maxVersions);
- if (filter != null) {
- builder.setFilter(filter);
- }
- if (labels != null && labels.size() > 0) {
- for (String label : labels)
- builder.addLabels(label);
- }
- builder.setCacheBlocks(cacheBlocks);
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- Scanner.Builder builder = Scanner.newBuilder();
- builder.mergeFrom(message);
- if (builder.hasStartRow()) {
- startRow = builder.getStartRow().toByteArray();
- }
- if (builder.hasEndRow()) {
- endRow = builder.getEndRow().toByteArray();
- }
- for (ByteString column: builder.getColumnsList()) {
- addColumn(column.toByteArray());
- }
- if (builder.hasBatch()) {
- batch = builder.getBatch();
- }
- if (builder.hasCaching()) {
- caching = builder.getCaching();
- }
- if (builder.hasStartTime()) {
- startTime = builder.getStartTime();
- }
- if (builder.hasEndTime()) {
- endTime = builder.getEndTime();
- }
- if (builder.hasMaxVersions()) {
- maxVersions = builder.getMaxVersions();
- }
- if (builder.hasFilter()) {
- filter = builder.getFilter();
- }
- if (builder.getLabelsList() != null) {
- List<String> labels = builder.getLabelsList();
- for(String label : labels) {
- addLabel(label);
- }
- }
- if (builder.hasCacheBlocks()) {
- this.cacheBlocks = builder.getCacheBlocks();
- }
- return this;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
deleted file mode 100644
index 3b044e7..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
+++ /dev/null
@@ -1,790 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlElementWrapper;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.util.ByteStringer;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Representation of the status of a storage cluster:
- * <p>
- * <ul>
- * <li>regions: the total number of regions served by the cluster</li>
- * <li>requests: the total number of requests per second handled by the
- * cluster in the last reporting interval</li>
- * <li>averageLoad: the average load of the region servers in the cluster</li>
- * <li>liveNodes: detailed status of the live region servers</li>
- * <li>deadNodes: the names of region servers declared dead</li>
- * </ul>
- *
- * <pre>
- * <complexType name="StorageClusterStatus">
- * <sequence>
- * <element name="liveNode" type="tns:Node"
- * maxOccurs="unbounded" minOccurs="0">
- * </element>
- * <element name="deadNode" type="string" maxOccurs="unbounded"
- * minOccurs="0">
- * </element>
- * </sequence>
- * <attribute name="regions" type="int"></attribute>
- * <attribute name="requests" type="int"></attribute>
- * <attribute name="averageLoad" type="float"></attribute>
- * </complexType>
- *
- * <complexType name="Node">
- * <sequence>
- * <element name="region" type="tns:Region"
- * maxOccurs="unbounded" minOccurs="0"></element>
- * </sequence>
- * <attribute name="name" type="string"></attribute>
- * <attribute name="startCode" type="int"></attribute>
- * <attribute name="requests" type="int"></attribute>
- * <attribute name="heapSizeMB" type="int"></attribute>
- * <attribute name="maxHeapSizeMB" type="int"></attribute>
- * </complexType>
- *
- * <complexType name="Region">
- * <attribute name="name" type="base64Binary"></attribute>
- * <attribute name="stores" type="int"></attribute>
- * <attribute name="storefiles" type="int"></attribute>
- * <attribute name="storefileSizeMB" type="int"></attribute>
- * <attribute name="memstoreSizeMB" type="int"></attribute>
- * <attribute name="storefileIndexSizeMB" type="int"></attribute>
- * <attribute name="readRequestsCount" type="int"></attribute>
- * <attribute name="writeRequestsCount" type="int"></attribute>
- * <attribute name="rootIndexSizeKB" type="int"></attribute>
- * <attribute name="totalStaticIndexSizeKB" type="int"></attribute>
- * <attribute name="totalStaticBloomSizeKB" type="int"></attribute>
- * <attribute name="totalCompactingKVs" type="int"></attribute>
- * <attribute name="currentCompactedKVs" type="int"></attribute>
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="ClusterStatus")
-@InterfaceAudience.Private
-public class StorageClusterStatusModel
- implements Serializable, ProtobufMessageHandler {
- private static final long serialVersionUID = 1L;
-
- /**
- * Represents a region server.
- */
- public static class Node implements Serializable {
- private static final long serialVersionUID = 1L;
-
- /**
- * Represents a region hosted on a region server.
- */
- public static class Region {
- private byte[] name;
- private int stores;
- private int storefiles;
- private int storefileSizeMB;
- private int memstoreSizeMB;
- private int storefileIndexSizeMB;
- private long readRequestsCount;
- private long writeRequestsCount;
- private int rootIndexSizeKB;
- private int totalStaticIndexSizeKB;
- private int totalStaticBloomSizeKB;
- private long totalCompactingKVs;
- private long currentCompactedKVs;
-
- /**
- * Default constructor
- */
- public Region() {
- }
-
- /**
- * Constructor
- * @param name the region name
- */
- public Region(byte[] name) {
- this.name = name;
- }
-
- /**
- * Constructor
- * @param name the region name
- * @param stores the number of stores
- * @param storefiles the number of store files
- * @param storefileSizeMB total size of store files, in MB
- * @param memstoreSizeMB total size of memstore, in MB
- * @param storefileIndexSizeMB total size of store file indexes, in MB
- */
- public Region(byte[] name, int stores, int storefiles,
- int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB,
- long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
- int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
- long totalCompactingKVs, long currentCompactedKVs) {
- this.name = name;
- this.stores = stores;
- this.storefiles = storefiles;
- this.storefileSizeMB = storefileSizeMB;
- this.memstoreSizeMB = memstoreSizeMB;
- this.storefileIndexSizeMB = storefileIndexSizeMB;
- this.readRequestsCount = readRequestsCount;
- this.writeRequestsCount = writeRequestsCount;
- this.rootIndexSizeKB = rootIndexSizeKB;
- this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
- this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
- this.totalCompactingKVs = totalCompactingKVs;
- this.currentCompactedKVs = currentCompactedKVs;
- }
-
- /**
- * @return the region name
- */
- @XmlAttribute
- public byte[] getName() {
- return name;
- }
-
- /**
- * @return the number of stores
- */
- @XmlAttribute
- public int getStores() {
- return stores;
- }
-
- /**
- * @return the number of store files
- */
- @XmlAttribute
- public int getStorefiles() {
- return storefiles;
- }
-
- /**
- * @return the total size of store files, in MB
- */
- @XmlAttribute
- public int getStorefileSizeMB() {
- return storefileSizeMB;
- }
-
- /**
- * @return memstore size, in MB
- */
- @XmlAttribute
- public int getMemstoreSizeMB() {
- return memstoreSizeMB;
- }
-
- /**
- * @return the total size of store file indexes, in MB
- */
- @XmlAttribute
- public int getStorefileIndexSizeMB() {
- return storefileIndexSizeMB;
- }
-
- /**
- * @return the current total read requests made to region
- */
- @XmlAttribute
- public long getReadRequestsCount() {
- return readRequestsCount;
- }
-
- /**
- * @return the current total write requests made to region
- */
- @XmlAttribute
- public long getWriteRequestsCount() {
- return writeRequestsCount;
- }
-
- /**
- * @return The current total size of root-level indexes for the region, in KB.
- */
- @XmlAttribute
- public int getRootIndexSizeKB() {
- return rootIndexSizeKB;
- }
-
- /**
- * @return The total size of static index, in KB
- */
- @XmlAttribute
- public int getTotalStaticIndexSizeKB() {
- return totalStaticIndexSizeKB;
- }
-
- /**
- * @return The total size of static bloom, in KB
- */
- @XmlAttribute
- public int getTotalStaticBloomSizeKB() {
- return totalStaticBloomSizeKB;
- }
-
- /**
- * @return The total number of compacting key-values
- */
- @XmlAttribute
- public long getTotalCompactingKVs() {
- return totalCompactingKVs;
- }
-
- /**
- * @return The number of current compacted key-values
- */
- @XmlAttribute
- public long getCurrentCompactedKVs() {
- return currentCompactedKVs;
- }
-
- /**
- * @param readRequestsCount The current total read requests made to region
- */
- public void setReadRequestsCount(long readRequestsCount) {
- this.readRequestsCount = readRequestsCount;
- }
-
- /**
- * @param rootIndexSizeKB The current total size of root-level indexes
- * for the region, in KB
- */
- public void setRootIndexSizeKB(int rootIndexSizeKB) {
- this.rootIndexSizeKB = rootIndexSizeKB;
- }
-
- /**
- * @param writeRequestsCount The current total write requests made to region
- */
- public void setWriteRequestsCount(long writeRequestsCount) {
- this.writeRequestsCount = writeRequestsCount;
- }
-
- /**
- * @param currentCompactedKVs The completed count of key values
- * in currently running compaction
- */
- public void setCurrentCompactedKVs(long currentCompactedKVs) {
- this.currentCompactedKVs = currentCompactedKVs;
- }
-
- /**
- * @param totalCompactingKVs The total compacting key values
- * in currently running compaction
- */
- public void setTotalCompactingKVs(long totalCompactingKVs) {
- this.totalCompactingKVs = totalCompactingKVs;
- }
-
- /**
- * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks,
- * not just loaded into the block cache, in KB.
- */
- public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) {
- this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
- }
-
- /**
- * @param totalStaticIndexSizeKB The total size of all index blocks,
- * not just the root level, in KB.
- */
- public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) {
- this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
- }
-
- /**
- * @param name the region name
- */
- public void setName(byte[] name) {
- this.name = name;
- }
-
- /**
- * @param stores the number of stores
- */
- public void setStores(int stores) {
- this.stores = stores;
- }
-
- /**
- * @param storefiles the number of store files
- */
- public void setStorefiles(int storefiles) {
- this.storefiles = storefiles;
- }
-
- /**
- * @param storefileSizeMB total size of store files, in MB
- */
- public void setStorefileSizeMB(int storefileSizeMB) {
- this.storefileSizeMB = storefileSizeMB;
- }
-
- /**
- * @param memstoreSizeMB memstore size, in MB
- */
- public void setMemstoreSizeMB(int memstoreSizeMB) {
- this.memstoreSizeMB = memstoreSizeMB;
- }
-
- /**
- * @param storefileIndexSizeMB total size of store file indexes, in MB
- */
- public void setStorefileIndexSizeMB(int storefileIndexSizeMB) {
- this.storefileIndexSizeMB = storefileIndexSizeMB;
- }
- }
-
- private String name;
- private long startCode;
- private int requests;
- private int heapSizeMB;
- private int maxHeapSizeMB;
- private List<Region> regions = new ArrayList<Region>();
-
- /**
- * Add a region name to the list
- * @param name the region name
- */
- public void addRegion(byte[] name, int stores, int storefiles,
- int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB,
- long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
- int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
- long totalCompactingKVs, long currentCompactedKVs) {
- regions.add(new Region(name, stores, storefiles, storefileSizeMB,
- memstoreSizeMB, storefileIndexSizeMB, readRequestsCount,
- writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB,
- totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs));
- }
-
- /**
- * @param index the index
- * @return the region name
- */
- public Region getRegion(int index) {
- return regions.get(index);
- }
-
- /**
- * Default constructor
- */
- public Node() {}
-
- /**
- * Constructor
- * @param name the region server name
- * @param startCode the region server's start code
- */
- public Node(String name, long startCode) {
- this.name = name;
- this.startCode = startCode;
- }
-
- /**
- * @return the region server's name
- */
- @XmlAttribute
- public String getName() {
- return name;
- }
-
- /**
- * @return the region server's start code
- */
- @XmlAttribute
- public long getStartCode() {
- return startCode;
- }
-
- /**
- * @return the current heap size, in MB
- */
- @XmlAttribute
- public int getHeapSizeMB() {
- return heapSizeMB;
- }
-
- /**
- * @return the maximum heap size, in MB
- */
- @XmlAttribute
- public int getMaxHeapSizeMB() {
- return maxHeapSizeMB;
- }
-
- /**
- * @return the list of regions served by the region server
- */
- @XmlElement(name="Region")
- public List<Region> getRegions() {
- return regions;
- }
-
- /**
- * @return the number of requests per second processed by the region server
- */
- @XmlAttribute
- public int getRequests() {
- return requests;
- }
-
- /**
- * @param name the region server's hostname
- */
- public void setName(String name) {
- this.name = name;
- }
-
- /**
- * @param startCode the region server's start code
- */
- public void setStartCode(long startCode) {
- this.startCode = startCode;
- }
-
- /**
- * @param heapSizeMB the current heap size, in MB
- */
- public void setHeapSizeMB(int heapSizeMB) {
- this.heapSizeMB = heapSizeMB;
- }
-
- /**
- * @param maxHeapSizeMB the maximum heap size, in MB
- */
- public void setMaxHeapSizeMB(int maxHeapSizeMB) {
- this.maxHeapSizeMB = maxHeapSizeMB;
- }
-
- /**
- * @param regions a list of regions served by the region server
- */
- public void setRegions(List<Region> regions) {
- this.regions = regions;
- }
-
- /**
- * @param requests the number of requests per second processed by the
- * region server
- */
- public void setRequests(int requests) {
- this.requests = requests;
- }
- }
-
- private List<Node> liveNodes = new ArrayList<Node>();
- private List<String> deadNodes = new ArrayList<String>();
- private int regions;
- private int requests;
- private double averageLoad;
-
- /**
- * Add a live node to the cluster representation.
- * @param name the region server name
- * @param startCode the region server's start code
- * @param heapSizeMB the current heap size, in MB
- * @param maxHeapSizeMB the maximum heap size, in MB
- */
- public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) {
- Node node = new Node(name, startCode);
- node.setHeapSizeMB(heapSizeMB);
- node.setMaxHeapSizeMB(maxHeapSizeMB);
- liveNodes.add(node);
- return node;
- }
-
- /**
- * @param index the index
- * @return the region server model
- */
- public Node getLiveNode(int index) {
- return liveNodes.get(index);
- }
-
- /**
- * Add a dead node to the cluster representation.
- * @param node the dead region server's name
- */
- public void addDeadNode(String node) {
- deadNodes.add(node);
- }
-
- /**
- * @param index the index
- * @return the dead region server's name
- */
- public String getDeadNode(int index) {
- return deadNodes.get(index);
- }
-
- /**
- * Default constructor
- */
- public StorageClusterStatusModel() {
- }
-
- /**
- * @return the list of live nodes
- */
- @XmlElement(name = "Node")
- @XmlElementWrapper(name = "LiveNodes")
- public List<Node> getLiveNodes() {
- return liveNodes;
- }
-
- /**
- * @return the list of dead nodes
- */
- @XmlElement(name = "Node")
- @XmlElementWrapper(name = "DeadNodes")
- public List<String> getDeadNodes() {
- return deadNodes;
- }
-
- /**
- * @return the total number of regions served by the cluster
- */
- @XmlAttribute
- public int getRegions() {
- return regions;
- }
-
- /**
- * @return the total number of requests per second handled by the cluster in
- * the last reporting interval
- */
- @XmlAttribute
- public int getRequests() {
- return requests;
- }
-
- /**
- * @return the average load of the region servers in the cluster
- */
- @XmlAttribute
- public double getAverageLoad() {
- return averageLoad;
- }
-
- /**
- * @param nodes the list of live node models
- */
- public void setLiveNodes(List<Node> nodes) {
- this.liveNodes = nodes;
- }
-
- /**
- * @param nodes the list of dead node names
- */
- public void setDeadNodes(List<String> nodes) {
- this.deadNodes = nodes;
- }
-
- /**
- * @param regions the total number of regions served by the cluster
- */
- public void setRegions(int regions) {
- this.regions = regions;
- }
-
- /**
- * @param requests the total number of requests per second handled by the
- * cluster
- */
- public void setRequests(int requests) {
- this.requests = requests;
- }
-
- /**
- * @param averageLoad the average load of region servers in the cluster
- */
- public void setAverageLoad(double averageLoad) {
- this.averageLoad = averageLoad;
- }
-
- /*
- * (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append(String.format("%d live servers, %d dead servers, " +
- "%.4f average load%n%n", liveNodes.size(), deadNodes.size(),
- averageLoad));
- if (!liveNodes.isEmpty()) {
- sb.append(liveNodes.size());
- sb.append(" live servers\n");
- for (Node node: liveNodes) {
- sb.append(" ");
- sb.append(node.name);
- sb.append(' ');
- sb.append(node.startCode);
- sb.append("\n requests=");
- sb.append(node.requests);
- sb.append(", regions=");
- sb.append(node.regions.size());
- sb.append("\n heapSizeMB=");
- sb.append(node.heapSizeMB);
- sb.append("\n maxHeapSizeMB=");
- sb.append(node.maxHeapSizeMB);
- sb.append("\n\n");
- for (Node.Region region: node.regions) {
- sb.append(" ");
- sb.append(Bytes.toString(region.name));
- sb.append("\n stores=");
- sb.append(region.stores);
- sb.append("\n storefiless=");
- sb.append(region.storefiles);
- sb.append("\n storefileSizeMB=");
- sb.append(region.storefileSizeMB);
- sb.append("\n memstoreSizeMB=");
- sb.append(region.memstoreSizeMB);
- sb.append("\n storefileIndexSizeMB=");
- sb.append(region.storefileIndexSizeMB);
- sb.append("\n readRequestsCount=");
- sb.append(region.readRequestsCount);
- sb.append("\n writeRequestsCount=");
- sb.append(region.writeRequestsCount);
- sb.append("\n rootIndexSizeKB=");
- sb.append(region.rootIndexSizeKB);
- sb.append("\n totalStaticIndexSizeKB=");
- sb.append(region.totalStaticIndexSizeKB);
- sb.append("\n totalStaticBloomSizeKB=");
- sb.append(region.totalStaticBloomSizeKB);
- sb.append("\n totalCompactingKVs=");
- sb.append(region.totalCompactingKVs);
- sb.append("\n currentCompactedKVs=");
- sb.append(region.currentCompactedKVs);
- sb.append('\n');
- }
- sb.append('\n');
- }
- }
- if (!deadNodes.isEmpty()) {
- sb.append('\n');
- sb.append(deadNodes.size());
- sb.append(" dead servers\n");
- for (String node: deadNodes) {
- sb.append(" ");
- sb.append(node);
- sb.append('\n');
- }
- }
- return sb.toString();
- }
-
- @Override
- public byte[] createProtobufOutput() {
- StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
- builder.setRegions(regions);
- builder.setRequests(requests);
- builder.setAverageLoad(averageLoad);
- for (Node node: liveNodes) {
- StorageClusterStatus.Node.Builder nodeBuilder =
- StorageClusterStatus.Node.newBuilder();
- nodeBuilder.setName(node.name);
- nodeBuilder.setStartCode(node.startCode);
- nodeBuilder.setRequests(node.requests);
- nodeBuilder.setHeapSizeMB(node.heapSizeMB);
- nodeBuilder.setMaxHeapSizeMB(node.maxHeapSizeMB);
- for (Node.Region region: node.regions) {
- StorageClusterStatus.Region.Builder regionBuilder =
- StorageClusterStatus.Region.newBuilder();
- regionBuilder.setName(ByteStringer.wrap(region.name));
- regionBuilder.setStores(region.stores);
- regionBuilder.setStorefiles(region.storefiles);
- regionBuilder.setStorefileSizeMB(region.storefileSizeMB);
- regionBuilder.setMemstoreSizeMB(region.memstoreSizeMB);
- regionBuilder.setStorefileIndexSizeMB(region.storefileIndexSizeMB);
- regionBuilder.setReadRequestsCount(region.readRequestsCount);
- regionBuilder.setWriteRequestsCount(region.writeRequestsCount);
- regionBuilder.setRootIndexSizeKB(region.rootIndexSizeKB);
- regionBuilder.setTotalStaticIndexSizeKB(region.totalStaticIndexSizeKB);
- regionBuilder.setTotalStaticBloomSizeKB(region.totalStaticBloomSizeKB);
- regionBuilder.setTotalCompactingKVs(region.totalCompactingKVs);
- regionBuilder.setCurrentCompactedKVs(region.currentCompactedKVs);
- nodeBuilder.addRegions(regionBuilder);
- }
- builder.addLiveNodes(nodeBuilder);
- }
- for (String node: deadNodes) {
- builder.addDeadNodes(node);
- }
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
- builder.mergeFrom(message);
- if (builder.hasRegions()) {
- regions = builder.getRegions();
- }
- if (builder.hasRequests()) {
- requests = builder.getRequests();
- }
- if (builder.hasAverageLoad()) {
- averageLoad = builder.getAverageLoad();
- }
- for (StorageClusterStatus.Node node: builder.getLiveNodesList()) {
- long startCode = node.hasStartCode() ? node.getStartCode() : -1;
- StorageClusterStatusModel.Node nodeModel =
- addLiveNode(node.getName(), startCode, node.getHeapSizeMB(),
- node.getMaxHeapSizeMB());
- int requests = node.hasRequests() ? node.getRequests() : 0;
- nodeModel.setRequests(requests);
- for (StorageClusterStatus.Region region: node.getRegionsList()) {
- nodeModel.addRegion(
- region.getName().toByteArray(),
- region.getStores(),
- region.getStorefiles(),
- region.getStorefileSizeMB(),
- region.getMemstoreSizeMB(),
- region.getStorefileIndexSizeMB(),
- region.getReadRequestsCount(),
- region.getWriteRequestsCount(),
- region.getRootIndexSizeKB(),
- region.getTotalStaticIndexSizeKB(),
- region.getTotalStaticBloomSizeKB(),
- region.getTotalCompactingKVs(),
- region.getCurrentCompactedKVs());
- }
- }
- for (String node: builder.getDeadNodesList()) {
- addDeadNode(node);
- }
- return this;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
deleted file mode 100644
index 4321a8e..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import org.codehaus.jackson.annotate.JsonValue;
-
-import java.io.Serializable;
-
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlValue;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Simple representation of the version of the storage cluster
- *
- * <pre>
- * <complexType name="StorageClusterVersion">
- * <attribute name="version" type="string"></attribute>
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="ClusterVersion")
-@InterfaceAudience.Private
-public class StorageClusterVersionModel implements Serializable {
- private static final long serialVersionUID = 1L;
-
- private String version;
-
- /**
- * @return the storage cluster version
- */
- @XmlValue
- public String getVersion() {
- return version;
- }
-
- /**
- * @param version the storage cluster version
- */
- public void setVersion(String version) {
- this.version = version;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @JsonValue
- @Override
- public String toString() {
- return version;
- }
-
- //needed for jackson deserialization
- private static StorageClusterVersionModel valueOf(String value) {
- StorageClusterVersionModel versionModel
- = new StorageClusterVersionModel();
- versionModel.setVersion(value);
- return versionModel;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
deleted file mode 100644
index 700e766..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.util.ByteStringer;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo;
-
-/**
- * Representation of a list of table regions.
- *
- * <pre>
- * <complexType name="TableInfo">
- * <sequence>
- * <element name="region" type="tns:TableRegion"
- * maxOccurs="unbounded" minOccurs="1"></element>
- * </sequence>
- * <attribute name="name" type="string"></attribute>
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="TableInfo")
-@InterfaceAudience.Private
-public class TableInfoModel implements Serializable, ProtobufMessageHandler {
- private static final long serialVersionUID = 1L;
-
- private String name;
- private List<TableRegionModel> regions = new ArrayList<TableRegionModel>();
-
- /**
- * Default constructor
- */
- public TableInfoModel() {}
-
- /**
- * Constructor
- * @param name
- */
- public TableInfoModel(String name) {
- this.name = name;
- }
-
- /**
- * Add a region model to the list
- * @param region the region
- */
- public void add(TableRegionModel region) {
- regions.add(region);
- }
-
- /**
- * @param index the index
- * @return the region model
- */
- public TableRegionModel get(int index) {
- return regions.get(index);
- }
-
- /**
- * @return the table name
- */
- @XmlAttribute
- public String getName() {
- return name;
- }
-
- /**
- * @return the regions
- */
- @XmlElement(name="Region")
- public List<TableRegionModel> getRegions() {
- return regions;
- }
-
- /**
- * @param name the table name
- */
- public void setName(String name) {
- this.name = name;
- }
-
- /**
- * @param regions the regions to set
- */
- public void setRegions(List<TableRegionModel> regions) {
- this.regions = regions;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- for(TableRegionModel aRegion : regions) {
- sb.append(aRegion.toString());
- sb.append('\n');
- }
- return sb.toString();
- }
-
- @Override
- public byte[] createProtobufOutput() {
- TableInfo.Builder builder = TableInfo.newBuilder();
- builder.setName(name);
- for (TableRegionModel aRegion: regions) {
- TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder();
- regionBuilder.setName(aRegion.getName());
- regionBuilder.setId(aRegion.getId());
- regionBuilder.setStartKey(ByteStringer.wrap(aRegion.getStartKey()));
- regionBuilder.setEndKey(ByteStringer.wrap(aRegion.getEndKey()));
- regionBuilder.setLocation(aRegion.getLocation());
- builder.addRegions(regionBuilder);
- }
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- TableInfo.Builder builder = TableInfo.newBuilder();
- builder.mergeFrom(message);
- setName(builder.getName());
- for (TableInfo.Region region: builder.getRegionsList()) {
- add(new TableRegionModel(builder.getName(), region.getId(),
- region.getStartKey().toByteArray(),
- region.getEndKey().toByteArray(),
- region.getLocation()));
- }
- return this;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
deleted file mode 100644
index 596adac..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlElementRef;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList;
-
-/**
- * Simple representation of a list of table names.
- */
-@XmlRootElement(name="TableList")
-@InterfaceAudience.Private
-public class TableListModel implements Serializable, ProtobufMessageHandler {
-
- private static final long serialVersionUID = 1L;
-
- private List<TableModel> tables = new ArrayList<TableModel>();
-
- /**
- * Default constructor
- */
- public TableListModel() {}
-
- /**
- * Add the table name model to the list
- * @param table the table model
- */
- public void add(TableModel table) {
- tables.add(table);
- }
-
- /**
- * @param index the index
- * @return the table model
- */
- public TableModel get(int index) {
- return tables.get(index);
- }
-
- /**
- * @return the tables
- */
- @XmlElementRef(name="table")
- public List<TableModel> getTables() {
- return tables;
- }
-
- /**
- * @param tables the tables to set
- */
- public void setTables(List<TableModel> tables) {
- this.tables = tables;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- for(TableModel aTable : tables) {
- sb.append(aTable.toString());
- sb.append('\n');
- }
- return sb.toString();
- }
-
- @Override
- public byte[] createProtobufOutput() {
- TableList.Builder builder = TableList.newBuilder();
- for (TableModel aTable : tables) {
- builder.addName(aTable.getName());
- }
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- TableList.Builder builder = TableList.newBuilder();
- builder.mergeFrom(message);
- for (String table: builder.getNameList()) {
- this.add(new TableModel(table));
- }
- return this;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
deleted file mode 100644
index 0fb0d6e..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.Serializable;
-
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Simple representation of a table name.
- *
- * <pre>
- * <complexType name="Table">
- * <sequence>
- * <element name="name" type="string"></element>
- * </sequence>
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="table")
-@InterfaceAudience.Private
-public class TableModel implements Serializable {
-
- private static final long serialVersionUID = 1L;
-
- private String name;
-
- /**
- * Default constructor
- */
- public TableModel() {}
-
- /**
- * Constructor
- * @param name
- */
- public TableModel(String name) {
- super();
- this.name = name;
- }
-
- /**
- * @return the name
- */
- @XmlAttribute
- public String getName() {
- return name;
- }
-
- /**
- * @param name the name to set
- */
- public void setName(String name) {
- this.name = name;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return this.name;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
deleted file mode 100644
index d9b2b65..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.Serializable;
-
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Representation of a region of a table and its current location on the
- * storage cluster.
- *
- * <pre>
- * <complexType name="TableRegion">
- * <attribute name="name" type="string"></attribute>
- * <attribute name="id" type="int"></attribute>
- * <attribute name="startKey" type="base64Binary"></attribute>
- * <attribute name="endKey" type="base64Binary"></attribute>
- * <attribute name="location" type="string"></attribute>
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="Region")
-@InterfaceAudience.Private
-public class TableRegionModel implements Serializable {
-
- private static final long serialVersionUID = 1L;
-
- private String table;
- private long id;
- private byte[] startKey;
- private byte[] endKey;
- private String location;
-
- /**
- * Constructor
- */
- public TableRegionModel() {}
-
- /**
- * Constructor
- * @param table the table name
- * @param id the encoded id of the region
- * @param startKey the start key of the region
- * @param endKey the end key of the region
- */
- public TableRegionModel(String table, long id, byte[] startKey,
- byte[] endKey) {
- this(table, id, startKey, endKey, null);
- }
-
- /**
- * Constructor
- * @param table the table name
- * @param id the encoded id of the region
- * @param startKey the start key of the region
- * @param endKey the end key of the region
- * @param location the name and port of the region server hosting the region
- */
- public TableRegionModel(String table, long id, byte[] startKey,
- byte[] endKey, String location) {
- this.table = table;
- this.id = id;
- this.startKey = startKey;
- this.endKey = endKey;
- this.location = location;
- }
-
- /**
- * @return the region name
- */
- @XmlAttribute
- public String getName() {
- byte [] tableNameAsBytes = Bytes.toBytes(this.table);
- TableName tableName = TableName.valueOf(tableNameAsBytes);
- byte [] nameAsBytes = HRegionInfo.createRegionName(
- tableName, this.startKey, this.id, !tableName.isSystemTable());
- return Bytes.toString(nameAsBytes);
- }
-
- /**
- * @return the encoded region id
- */
- @XmlAttribute
- public long getId() {
- return id;
- }
-
- /**
- * @return the start key
- */
- @XmlAttribute
- public byte[] getStartKey() {
- return startKey;
- }
-
- /**
- * @return the end key
- */
- @XmlAttribute
- public byte[] getEndKey() {
- return endKey;
- }
-
- /**
- * @return the name and port of the region server hosting the region
- */
- @XmlAttribute
- public String getLocation() {
- return location;
- }
-
- /**
- * @param name region printable name
- */
- public void setName(String name) {
- String split[] = name.split(",");
- this.table = split[0];
- this.startKey = Bytes.toBytes(split[1]);
- String tail = split[2];
- split = tail.split("\\.");
- id = Long.valueOf(split[0]);
- }
-
- /**
- * @param id the region's encoded id
- */
- public void setId(long id) {
- this.id = id;
- }
-
- /**
- * @param startKey the start key
- */
- public void setStartKey(byte[] startKey) {
- this.startKey = startKey;
- }
-
- /**
- * @param endKey the end key
- */
- public void setEndKey(byte[] endKey) {
- this.endKey = endKey;
- }
-
- /**
- * @param location the name and port of the region server hosting the region
- */
- public void setLocation(String location) {
- this.location = location;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append(getName());
- sb.append(" [\n id=");
- sb.append(id);
- sb.append("\n startKey='");
- sb.append(Bytes.toString(startKey));
- sb.append("'\n endKey='");
- sb.append(Bytes.toString(endKey));
- if (location != null) {
- sb.append("'\n location='");
- sb.append(location);
- }
- sb.append("'\n]\n");
- return sb.toString();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
deleted file mode 100644
index d843e79..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import javax.xml.bind.annotation.XmlAnyAttribute;
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.namespace.QName;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema;
-import org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.codehaus.jackson.annotate.JsonAnyGetter;
-import org.codehaus.jackson.annotate.JsonAnySetter;
-import org.codehaus.jackson.annotate.JsonIgnore;
-
-/**
- * A representation of HBase table descriptors.
- *
- * <pre>
- * <complexType name="TableSchema">
- * <sequence>
- * <element name="column" type="tns:ColumnSchema"
- * maxOccurs="unbounded" minOccurs="1"></element>
- * </sequence>
- * <attribute name="name" type="string"></attribute>
- * <anyAttribute></anyAttribute>
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="TableSchema")
-@InterfaceAudience.Private
-public class TableSchemaModel implements Serializable, ProtobufMessageHandler {
- private static final long serialVersionUID = 1L;
- private static final QName IS_META = new QName(HTableDescriptor.IS_META);
- private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT);
- private static final QName READONLY = new QName(HTableDescriptor.READONLY);
- private static final QName TTL = new QName(HColumnDescriptor.TTL);
- private static final QName VERSIONS = new QName(HConstants.VERSIONS);
- private static final QName COMPRESSION =
- new QName(HColumnDescriptor.COMPRESSION);
-
- private String name;
- private Map<QName,Object> attrs = new LinkedHashMap<QName,Object>();
- private List<ColumnSchemaModel> columns = new ArrayList<ColumnSchemaModel>();
-
- /**
- * Default constructor.
- */
- public TableSchemaModel() {}
-
- /**
- * Constructor
- * @param htd the table descriptor
- */
- public TableSchemaModel(HTableDescriptor htd) {
- setName(htd.getTableName().getNameAsString());
- for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
- htd.getValues().entrySet()) {
- addAttribute(Bytes.toString(e.getKey().get()),
- Bytes.toString(e.getValue().get()));
- }
- for (HColumnDescriptor hcd: htd.getFamilies()) {
- ColumnSchemaModel columnModel = new ColumnSchemaModel();
- columnModel.setName(hcd.getNameAsString());
- for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
- hcd.getValues().entrySet()) {
- columnModel.addAttribute(Bytes.toString(e.getKey().get()),
- Bytes.toString(e.getValue().get()));
- }
- addColumnFamily(columnModel);
- }
- }
-
- /**
- * Add an attribute to the table descriptor
- * @param name attribute name
- * @param value attribute value
- */
- @JsonAnySetter
- public void addAttribute(String name, Object value) {
- attrs.put(new QName(name), value);
- }
-
- /**
- * Return a table descriptor value as a string. Calls toString() on the
- * object stored in the descriptor value map.
- * @param name the attribute name
- * @return the attribute value
- */
- public String getAttribute(String name) {
- Object o = attrs.get(new QName(name));
- return o != null ? o.toString() : null;
- }
-
- /**
- * Add a column family to the table descriptor
- * @param family the column family model
- */
- public void addColumnFamily(ColumnSchemaModel family) {
- columns.add(family);
- }
-
- /**
- * Retrieve the column family at the given index from the table descriptor
- * @param index the index
- * @return the column family model
- */
- public ColumnSchemaModel getColumnFamily(int index) {
- return columns.get(index);
- }
-
- /**
- * @return the table name
- */
- @XmlAttribute
- public String getName() {
- return name;
- }
-
- /**
- * @return the map for holding unspecified (user) attributes
- */
- @XmlAnyAttribute
- @JsonAnyGetter
- public Map<QName,Object> getAny() {
- return attrs;
- }
-
- /**
- * @return the columns
- */
- @XmlElement(name="ColumnSchema")
- public List<ColumnSchemaModel> getColumns() {
- return columns;
- }
-
- /**
- * @param name the table name
- */
- public void setName(String name) {
- this.name = name;
- }
-
- /**
- * @param columns the columns to set
- */
- public void setColumns(List<ColumnSchemaModel> columns) {
- this.columns = columns;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append("{ NAME=> '");
- sb.append(name);
- sb.append('\'');
- for (Map.Entry<QName,Object> e: attrs.entrySet()) {
- sb.append(", ");
- sb.append(e.getKey().getLocalPart());
- sb.append(" => '");
- sb.append(e.getValue().toString());
- sb.append('\'');
- }
- sb.append(", COLUMNS => [ ");
- Iterator<ColumnSchemaModel> i = columns.iterator();
- while (i.hasNext()) {
- ColumnSchemaModel family = i.next();
- sb.append(family.toString());
- if (i.hasNext()) {
- sb.append(',');
- }
- sb.append(' ');
- }
- sb.append("] }");
- return sb.toString();
- }
-
- // getters and setters for common schema attributes
-
- // cannot be standard bean type getters and setters, otherwise this would
- // confuse JAXB
-
- /**
- * @return true if IS_META attribute exists and is truel
- */
- public boolean __getIsMeta() {
- Object o = attrs.get(IS_META);
- return o != null ? Boolean.valueOf(o.toString()) : false;
- }
-
- /**
- * @return true if IS_ROOT attribute exists and is truel
- */
- public boolean __getIsRoot() {
- Object o = attrs.get(IS_ROOT);
- return o != null ? Boolean.valueOf(o.toString()) : false;
- }
-
- /**
- * @return true if READONLY attribute exists and is truel
- */
- public boolean __getReadOnly() {
- Object o = attrs.get(READONLY);
- return o != null ?
- Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_READONLY;
- }
-
- /**
- * @param value desired value of IS_META attribute
- */
- public void __setIsMeta(boolean value) {
- attrs.put(IS_META, Boolean.toString(value));
- }
-
- /**
- * @param value desired value of IS_ROOT attribute
- */
- public void __setIsRoot(boolean value) {
- attrs.put(IS_ROOT, Boolean.toString(value));
- }
-
- /**
- * @param value desired value of READONLY attribute
- */
- public void __setReadOnly(boolean value) {
- attrs.put(READONLY, Boolean.toString(value));
- }
-
- @Override
- public byte[] createProtobufOutput() {
- TableSchema.Builder builder = TableSchema.newBuilder();
- builder.setName(name);
- for (Map.Entry<QName, Object> e: attrs.entrySet()) {
- TableSchema.Attribute.Builder attrBuilder =
- TableSchema.Attribute.newBuilder();
- attrBuilder.setName(e.getKey().getLocalPart());
- attrBuilder.setValue(e.getValue().toString());
- builder.addAttrs(attrBuilder);
- }
- for (ColumnSchemaModel family: columns) {
- Map<QName, Object> familyAttrs = family.getAny();
- ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder();
- familyBuilder.setName(family.getName());
- for (Map.Entry<QName, Object> e: familyAttrs.entrySet()) {
- ColumnSchema.Attribute.Builder attrBuilder =
- ColumnSchema.Attribute.newBuilder();
- attrBuilder.setName(e.getKey().getLocalPart());
- attrBuilder.setValue(e.getValue().toString());
- familyBuilder.addAttrs(attrBuilder);
- }
- if (familyAttrs.containsKey(TTL)) {
- familyBuilder.setTtl(
- Integer.valueOf(familyAttrs.get(TTL).toString()));
- }
- if (familyAttrs.containsKey(VERSIONS)) {
- familyBuilder.setMaxVersions(
- Integer.valueOf(familyAttrs.get(VERSIONS).toString()));
- }
- if (familyAttrs.containsKey(COMPRESSION)) {
- familyBuilder.setCompression(familyAttrs.get(COMPRESSION).toString());
- }
- builder.addColumns(familyBuilder);
- }
- if (attrs.containsKey(READONLY)) {
- builder.setReadOnly(
- Boolean.valueOf(attrs.get(READONLY).toString()));
- }
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- TableSchema.Builder builder = TableSchema.newBuilder();
- builder.mergeFrom(message);
- this.setName(builder.getName());
- for (TableSchema.Attribute attr: builder.getAttrsList()) {
- this.addAttribute(attr.getName(), attr.getValue());
- }
- if (builder.hasReadOnly()) {
- this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly());
- }
- for (ColumnSchema family: builder.getColumnsList()) {
- ColumnSchemaModel familyModel = new ColumnSchemaModel();
- familyModel.setName(family.getName());
- for (ColumnSchema.Attribute attr: family.getAttrsList()) {
- familyModel.addAttribute(attr.getName(), attr.getValue());
- }
- if (family.hasTtl()) {
- familyModel.addAttribute(HColumnDescriptor.TTL, family.getTtl());
- }
- if (family.hasMaxVersions()) {
- familyModel.addAttribute(HConstants.VERSIONS,
- family.getMaxVersions());
- }
- if (family.hasCompression()) {
- familyModel.addAttribute(HColumnDescriptor.COMPRESSION,
- family.getCompression());
- }
- this.addColumnFamily(familyModel);
- }
- return this;
- }
-
- /**
- * @return a table descriptor
- */
- @JsonIgnore
- public HTableDescriptor getTableDescriptor() {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(getName()));
- for (Map.Entry<QName, Object> e: getAny().entrySet()) {
- htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
- }
- for (ColumnSchemaModel column: getColumns()) {
- HColumnDescriptor hcd = new HColumnDescriptor(column.getName());
- for (Map.Entry<QName, Object> e: column.getAny().entrySet()) {
- hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
- }
- htd.addFamily(hcd);
- }
- return htd;
- }
-
-}
[38/38] git commit: HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
HBASE-12197 Move rest to it's on module
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/876617bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/876617bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/876617bd
Branch: refs/heads/0.98
Commit: 876617bd32865885293ea6b28d096a3d911419a3
Parents: 386f36d
Author: Elliott Clark <ec...@apache.org>
Authored: Fri Oct 10 09:42:56 2014 -0700
Committer: Elliott Clark <ec...@apache.org>
Committed: Fri Oct 10 09:51:31 2014 -0700
----------------------------------------------------------------------
hbase-rest/pom.xml | 296 ++
.../org/apache/hadoop/hbase/rest/Constants.java | 76 +
.../hadoop/hbase/rest/ExistsResource.java | 75 +
.../apache/hadoop/hbase/rest/MetricsREST.java | 103 +
.../hadoop/hbase/rest/MultiRowResource.java | 108 +
.../hbase/rest/ProtobufMessageHandler.java | 46 +
.../hbase/rest/ProtobufStreamingUtil.java | 102 +
.../apache/hadoop/hbase/rest/RESTServer.java | 253 ++
.../apache/hadoop/hbase/rest/RESTServlet.java | 132 +
.../hadoop/hbase/rest/RESTServletContainer.java | 76 +
.../hadoop/hbase/rest/RegionsResource.java | 104 +
.../apache/hadoop/hbase/rest/ResourceBase.java | 96 +
.../hadoop/hbase/rest/ResourceConfig.java | 31 +
.../hadoop/hbase/rest/ResultGenerator.java | 50 +
.../apache/hadoop/hbase/rest/RootResource.java | 106 +
.../apache/hadoop/hbase/rest/RowResource.java | 598 +++
.../hadoop/hbase/rest/RowResultGenerator.java | 122 +
.../org/apache/hadoop/hbase/rest/RowSpec.java | 407 ++
.../hbase/rest/ScannerInstanceResource.java | 201 +
.../hadoop/hbase/rest/ScannerResource.java | 164 +
.../hbase/rest/ScannerResultGenerator.java | 191 +
.../hadoop/hbase/rest/SchemaResource.java | 246 ++
.../rest/StorageClusterStatusResource.java | 109 +
.../rest/StorageClusterVersionResource.java | 79 +
.../apache/hadoop/hbase/rest/TableResource.java | 180 +
.../hadoop/hbase/rest/TableScanResource.java | 168 +
.../hadoop/hbase/rest/VersionResource.java | 104 +
.../apache/hadoop/hbase/rest/client/Client.java | 525 +++
.../hadoop/hbase/rest/client/Cluster.java | 103 +
.../hadoop/hbase/rest/client/RemoteAdmin.java | 390 ++
.../hadoop/hbase/rest/client/RemoteHTable.java | 825 ++++
.../hadoop/hbase/rest/client/Response.java | 155 +
.../hadoop/hbase/rest/filter/AuthFilter.java | 82 +
.../hbase/rest/filter/GZIPRequestStream.java | 58 +
.../hbase/rest/filter/GZIPRequestWrapper.java | 52 +
.../hbase/rest/filter/GZIPResponseStream.java | 78 +
.../hbase/rest/filter/GZIPResponseWrapper.java | 147 +
.../hadoop/hbase/rest/filter/GzipFilter.java | 85 +
.../hadoop/hbase/rest/model/CellModel.java | 207 +
.../hadoop/hbase/rest/model/CellSetModel.java | 152 +
.../hbase/rest/model/ColumnSchemaModel.java | 241 ++
.../hadoop/hbase/rest/model/RowModel.java | 151 +
.../hadoop/hbase/rest/model/ScannerModel.java | 852 ++++
.../rest/model/StorageClusterStatusModel.java | 790 ++++
.../rest/model/StorageClusterVersionModel.java | 78 +
.../hadoop/hbase/rest/model/TableInfoModel.java | 159 +
.../hadoop/hbase/rest/model/TableListModel.java | 113 +
.../hadoop/hbase/rest/model/TableModel.java | 84 +
.../hbase/rest/model/TableRegionModel.java | 196 +
.../hbase/rest/model/TableSchemaModel.java | 361 ++
.../hadoop/hbase/rest/model/VersionModel.java | 209 +
.../org/apache/hadoop/hbase/rest/package.html | 1660 ++++++++
.../rest/protobuf/generated/CellMessage.java | 731 ++++
.../rest/protobuf/generated/CellSetMessage.java | 1521 +++++++
.../protobuf/generated/ColumnSchemaMessage.java | 1904 +++++++++
.../rest/protobuf/generated/ScannerMessage.java | 1578 +++++++
.../generated/StorageClusterStatusMessage.java | 3955 ++++++++++++++++++
.../protobuf/generated/TableInfoMessage.java | 1802 ++++++++
.../protobuf/generated/TableListMessage.java | 547 +++
.../protobuf/generated/TableSchemaMessage.java | 2125 ++++++++++
.../rest/protobuf/generated/VersionMessage.java | 1147 +++++
.../rest/provider/JAXBContextResolver.java | 89 +
.../hbase/rest/provider/JacksonProvider.java | 31 +
.../consumer/ProtobufMessageBodyConsumer.java | 88 +
.../producer/PlainTextMessageBodyProducer.java | 74 +
.../producer/ProtobufMessageBodyProducer.java | 81 +
.../resources/hbase-webapps/rest/index.html | 20 +
.../main/resources/hbase-webapps/rest/rest.jsp | 117 +
.../org/apache/hadoop/hbase/rest/XMLSchema.xsd | 181 +
.../hbase/rest/protobuf/CellMessage.proto | 25 +
.../hbase/rest/protobuf/CellSetMessage.proto | 28 +
.../rest/protobuf/ColumnSchemaMessage.proto | 31 +
.../hbase/rest/protobuf/ScannerMessage.proto | 32 +
.../protobuf/StorageClusterStatusMessage.proto | 51 +
.../hbase/rest/protobuf/TableInfoMessage.proto | 30 +
.../hbase/rest/protobuf/TableListMessage.proto | 22 +
.../rest/protobuf/TableSchemaMessage.proto | 33 +
.../hbase/rest/protobuf/VersionMessage.proto | 26 +
.../apache/hadoop/hbase/rest/DummyFilter.java | 64 +
.../hbase/rest/HBaseRESTTestingUtility.java | 98 +
.../hbase/rest/PerformanceEvaluation.java | 1520 +++++++
.../hadoop/hbase/rest/RowResourceBase.java | 481 +++
.../apache/hadoop/hbase/rest/TestDeleteRow.java | 99 +
.../hbase/rest/TestGZIPResponseWrapper.java | 117 +
.../hbase/rest/TestGetAndPutResource.java | 585 +++
.../hadoop/hbase/rest/TestGzipFilter.java | 161 +
.../hadoop/hbase/rest/TestMultiRowResource.java | 181 +
.../hadoop/hbase/rest/TestResourceFilter.java | 61 +
.../hadoop/hbase/rest/TestScannerResource.java | 354 ++
.../hbase/rest/TestScannersWithFilters.java | 999 +++++
.../hbase/rest/TestScannersWithLabels.java | 238 ++
.../hadoop/hbase/rest/TestSchemaResource.java | 191 +
.../hadoop/hbase/rest/TestStatusResource.java | 117 +
.../hadoop/hbase/rest/TestTableResource.java | 262 ++
.../apache/hadoop/hbase/rest/TestTableScan.java | 508 +++
.../hadoop/hbase/rest/TestVersionResource.java | 179 +
.../rest/client/TestRemoteAdminRetries.java | 165 +
.../rest/client/TestRemoteHTableRetries.java | 193 +
.../hbase/rest/client/TestRemoteTable.java | 538 +++
.../hadoop/hbase/rest/model/TestCellModel.java | 84 +
.../hbase/rest/model/TestCellSetModel.java | 146 +
.../hbase/rest/model/TestColumnSchemaModel.java | 86 +
.../hadoop/hbase/rest/model/TestModelBase.java | 134 +
.../hadoop/hbase/rest/model/TestRowModel.java | 79 +
.../hbase/rest/model/TestScannerModel.java | 109 +
.../model/TestStorageClusterStatusModel.java | 145 +
.../model/TestStorageClusterVersionModel.java | 60 +
.../hbase/rest/model/TestTableInfoModel.java | 96 +
.../hbase/rest/model/TestTableListModel.java | 73 +
.../hbase/rest/model/TestTableRegionModel.java | 93 +
.../hbase/rest/model/TestTableSchemaModel.java | 117 +
.../hbase/rest/model/TestVersionModel.java | 80 +
hbase-rest/src/test/resources/hbase-site.xml | 150 +
hbase-rest/src/test/resources/hdfs-site.xml | 32 +
hbase-rest/src/test/resources/log4j.properties | 66 +
hbase-rest/src/test/resources/mapred-queues.xml | 75 +
hbase-rest/src/test/resources/mapred-site.xml | 34 +
hbase-rest/src/test/resources/zoo.cfg | 43 +
hbase-server/pom.xml | 23 -
.../org/apache/hadoop/hbase/rest/Constants.java | 76 -
.../hadoop/hbase/rest/ExistsResource.java | 75 -
.../apache/hadoop/hbase/rest/MetricsREST.java | 103 -
.../hadoop/hbase/rest/MultiRowResource.java | 108 -
.../hbase/rest/ProtobufMessageHandler.java | 46 -
.../hbase/rest/ProtobufStreamingUtil.java | 102 -
.../apache/hadoop/hbase/rest/RESTServer.java | 253 --
.../apache/hadoop/hbase/rest/RESTServlet.java | 132 -
.../hadoop/hbase/rest/RESTServletContainer.java | 76 -
.../hadoop/hbase/rest/RegionsResource.java | 104 -
.../apache/hadoop/hbase/rest/ResourceBase.java | 96 -
.../hadoop/hbase/rest/ResourceConfig.java | 31 -
.../hadoop/hbase/rest/ResultGenerator.java | 50 -
.../apache/hadoop/hbase/rest/RootResource.java | 106 -
.../apache/hadoop/hbase/rest/RowResource.java | 598 ---
.../hadoop/hbase/rest/RowResultGenerator.java | 122 -
.../org/apache/hadoop/hbase/rest/RowSpec.java | 407 --
.../hbase/rest/ScannerInstanceResource.java | 201 -
.../hadoop/hbase/rest/ScannerResource.java | 164 -
.../hbase/rest/ScannerResultGenerator.java | 191 -
.../hadoop/hbase/rest/SchemaResource.java | 246 --
.../rest/StorageClusterStatusResource.java | 109 -
.../rest/StorageClusterVersionResource.java | 79 -
.../apache/hadoop/hbase/rest/TableResource.java | 180 -
.../hadoop/hbase/rest/TableScanResource.java | 168 -
.../hadoop/hbase/rest/VersionResource.java | 104 -
.../apache/hadoop/hbase/rest/client/Client.java | 525 ---
.../hadoop/hbase/rest/client/Cluster.java | 103 -
.../hadoop/hbase/rest/client/RemoteAdmin.java | 390 --
.../hadoop/hbase/rest/client/RemoteHTable.java | 825 ----
.../hadoop/hbase/rest/client/Response.java | 155 -
.../hadoop/hbase/rest/filter/AuthFilter.java | 82 -
.../hbase/rest/filter/GZIPRequestStream.java | 58 -
.../hbase/rest/filter/GZIPRequestWrapper.java | 52 -
.../hbase/rest/filter/GZIPResponseStream.java | 78 -
.../hbase/rest/filter/GZIPResponseWrapper.java | 147 -
.../hadoop/hbase/rest/filter/GzipFilter.java | 85 -
.../hadoop/hbase/rest/model/CellModel.java | 207 -
.../hadoop/hbase/rest/model/CellSetModel.java | 152 -
.../hbase/rest/model/ColumnSchemaModel.java | 241 --
.../hadoop/hbase/rest/model/RowModel.java | 151 -
.../hadoop/hbase/rest/model/ScannerModel.java | 852 ----
.../rest/model/StorageClusterStatusModel.java | 790 ----
.../rest/model/StorageClusterVersionModel.java | 78 -
.../hadoop/hbase/rest/model/TableInfoModel.java | 159 -
.../hadoop/hbase/rest/model/TableListModel.java | 113 -
.../hadoop/hbase/rest/model/TableModel.java | 84 -
.../hbase/rest/model/TableRegionModel.java | 196 -
.../hbase/rest/model/TableSchemaModel.java | 361 --
.../hadoop/hbase/rest/model/VersionModel.java | 209 -
.../org/apache/hadoop/hbase/rest/package.html | 1660 --------
.../rest/protobuf/generated/CellMessage.java | 731 ----
.../rest/protobuf/generated/CellSetMessage.java | 1521 -------
.../protobuf/generated/ColumnSchemaMessage.java | 1904 ---------
.../rest/protobuf/generated/ScannerMessage.java | 1578 -------
.../generated/StorageClusterStatusMessage.java | 3955 ------------------
.../protobuf/generated/TableInfoMessage.java | 1802 --------
.../protobuf/generated/TableListMessage.java | 547 ---
.../protobuf/generated/TableSchemaMessage.java | 2125 ----------
.../rest/protobuf/generated/VersionMessage.java | 1147 -----
.../rest/provider/JAXBContextResolver.java | 89 -
.../hbase/rest/provider/JacksonProvider.java | 31 -
.../consumer/ProtobufMessageBodyConsumer.java | 88 -
.../producer/PlainTextMessageBodyProducer.java | 74 -
.../producer/ProtobufMessageBodyProducer.java | 81 -
.../apache/hadoop/hbase/rest/DummyFilter.java | 64 -
.../hbase/rest/HBaseRESTTestingUtility.java | 98 -
.../hbase/rest/PerformanceEvaluation.java | 1520 -------
.../hadoop/hbase/rest/RowResourceBase.java | 481 ---
.../apache/hadoop/hbase/rest/TestDeleteRow.java | 99 -
.../hbase/rest/TestGZIPResponseWrapper.java | 117 -
.../hbase/rest/TestGetAndPutResource.java | 585 ---
.../hadoop/hbase/rest/TestGzipFilter.java | 161 -
.../hadoop/hbase/rest/TestMultiRowResource.java | 181 -
.../hadoop/hbase/rest/TestResourceFilter.java | 61 -
.../hadoop/hbase/rest/TestScannerResource.java | 354 --
.../hbase/rest/TestScannersWithFilters.java | 999 -----
.../hbase/rest/TestScannersWithLabels.java | 238 --
.../hadoop/hbase/rest/TestSchemaResource.java | 191 -
.../hadoop/hbase/rest/TestStatusResource.java | 117 -
.../hadoop/hbase/rest/TestTableResource.java | 262 --
.../apache/hadoop/hbase/rest/TestTableScan.java | 508 ---
.../hadoop/hbase/rest/TestVersionResource.java | 179 -
.../rest/client/TestRemoteAdminRetries.java | 165 -
.../rest/client/TestRemoteHTableRetries.java | 193 -
.../hbase/rest/client/TestRemoteTable.java | 538 ---
.../hadoop/hbase/rest/model/TestCellModel.java | 84 -
.../hbase/rest/model/TestCellSetModel.java | 146 -
.../hbase/rest/model/TestColumnSchemaModel.java | 86 -
.../hadoop/hbase/rest/model/TestModelBase.java | 134 -
.../hadoop/hbase/rest/model/TestRowModel.java | 79 -
.../hbase/rest/model/TestScannerModel.java | 109 -
.../model/TestStorageClusterStatusModel.java | 145 -
.../model/TestStorageClusterVersionModel.java | 60 -
.../hbase/rest/model/TestTableInfoModel.java | 96 -
.../hbase/rest/model/TestTableListModel.java | 73 -
.../hbase/rest/model/TestTableRegionModel.java | 93 -
.../hbase/rest/model/TestTableSchemaModel.java | 117 -
.../hbase/rest/model/TestVersionModel.java | 80 -
pom.xml | 25 +-
219 files changed, 37077 insertions(+), 35795 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
new file mode 100644
index 0000000..583492e
--- /dev/null
+++ b/hbase-rest/pom.xml
@@ -0,0 +1,296 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>hbase</artifactId>
+ <groupId>org.apache.hbase</groupId>
+ <version>0.98.7</version>
+ <relativePath>..</relativePath>
+ </parent>
+ <artifactId>hbase-rest</artifactId>
+ <name>HBase - Rest</name>
+ <description>HBase Rest Server</description>
+
+ <build>
+ <!-- Makes sure the resources get added before they are processed
+ by placing this first -->
+ <resources>
+ <!-- Add the build webabpps to the classpth -->
+ <resource>
+ <directory>${project.build.directory}</directory>
+ <includes>
+ <include>hbase-webapps/**</include>
+ </includes>
+ </resource>
+ </resources>
+ <testResources>
+ <testResource>
+ <directory>src/test/resources</directory>
+ <includes>
+ <include>**/**</include>
+ </includes>
+ </testResource>
+ </testResources>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-site-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <plugin>
+ <!--Make it so assembly:single does nothing in here-->
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>${maven.assembly.version}</version>
+ <configuration>
+ <skipAssembly>true</skipAssembly>
+ </configuration>
+ </plugin>
+ <!-- Make a jar and put the sources in the jar -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ </plugin>
+
+ <!-- General ant tasks, bound to different build phases -->
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <!-- Generate web app sources -->
+ <execution>
+ <id>generate</id>
+ <phase>generate-sources</phase>
+ <configuration>
+ <target>
+ <property name="build.webapps" location="${project.build.directory}/hbase-webapps"/>
+ <property name="src.webapps"
+ location="${basedir}/src/main/resources/hbase-webapps"/>
+ <property name="generated.sources"
+ location="${project.build.directory}/generated-sources"/>
+ <mkdir dir="${build.webapps}"/>
+ <copy todir="${build.webapps}">
+ <fileset dir="${src.webapps}">
+ <exclude name="**/*.jsp"/>
+ <exclude name="**/.*"/>
+ <exclude name="**/*~"/>
+ </fileset>
+ </copy>
+ <!--The compile.classpath is passed in by maven -->
+ <taskdef classname="org.apache.jasper.JspC" name="jspcompiler"
+ classpathref="maven.compile.classpath"/>
+ <mkdir dir="${build.webapps}/rest/WEB-INF"/>
+ <jspcompiler uriroot="${src.webapps}/rest" outputdir="${generated.sources}/java"
+ package="org.apache.hadoop.hbase.generated.rest"
+ webxml="${build.webapps}/rest/WEB-INF/web.xml"/>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <!-- Add the generated sources -->
+ <execution>
+ <id>jspcSource-packageInfo-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${project.build.directory}/generated-sources/java</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <!-- Testing plugins -->
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <properties>
+ <property>
+ <name>listener</name>
+ <value>org.apache.hadoop.hbase.ServerResourceCheckerJUnitListener</value>
+ </property>
+ </properties>
+ <systemPropertyVariables>
+ <test.build.webapps>target/test-classes/webapps</test.build.webapps>
+ </systemPropertyVariables>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <!-- Intra-project dependencies -->
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-common</artifactId>
+ <type>jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-client</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-server</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-hadoop-compat</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>${compat.module}</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-server</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-testing-util</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-annotations</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mortbay.jetty</groupId>
+ <artifactId>jsp-2.1</artifactId>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>tomcat</groupId>
+ <artifactId>jasper-compiler</artifactId>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>tomcat</groupId>
+ <artifactId>jasper-runtime</artifactId>
+ </dependency>
+ <!-- REST dependencies -->
+ <dependency>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-json</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-server</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>javax.xml.bind</groupId>
+ <artifactId>jaxb-api</artifactId>
+ </dependency>
+ </dependencies>
+ <profiles>
+ <!-- Skip the tests in this module -->
+ <profile>
+ <id>skipRestTets</id>
+ <activation>
+ <property>
+ <name>skipRestTests</name>
+ </property>
+ </activation>
+ <properties>
+ <surefire.skipFirstPart>true</surefire.skipFirstPart>
+ <surefire.skipSecondPart>true</surefire.skipSecondPart>
+ </properties>
+ </profile>
+ <profile>
+ <id>compile-protobuf</id>
+ <activation>
+ <property>
+ <name>compile-protobuf</name>
+ </property>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-maven-plugins</artifactId>
+ <executions>
+ <execution>
+ <id>compile-protoc</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>protoc</goal>
+ </goals>
+ <configuration>
+ <imports>
+ <param>${basedir}/src/main/resources/org/apache/hadoop/hbase/rest/protobuf
+ </param>
+ </imports>
+ <source>
+ <!-- These should be under src/main/protobuf -->
+ <directory>${basedir}/src/main/resources/org/apache/hadoop/hbase/rest/protobuf
+ </directory>
+ <includes>
+ <include>CellMessage.proto</include>
+ <include>CellSetMessage.proto</include>
+ <include>ColumnSchemaMessage.proto</include>
+ <include>ScannerMessage.proto</include>
+ <include>StorageClusterStatusMessage.proto</include>
+ <include>TableInfoMessage.proto</include>
+ <include>TableListMessage.proto</include>
+ <include>TableSchemaMessage.proto</include>
+ <include>VersionMessage.proto</include>
+ </includes>
+ </source>
+ <output>${basedir}/src/main/java/</output>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+</project>
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
new file mode 100644
index 0000000..f3dba9a
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Common constants for org.apache.hadoop.hbase.rest
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public interface Constants {
+ // All constants in a public interface are 'public static final'
+
+ String VERSION_STRING = "0.0.3";
+
+ int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours
+
+ int DEFAULT_LISTEN_PORT = 8080;
+
+ String MIMETYPE_TEXT = "text/plain";
+ String MIMETYPE_HTML = "text/html";
+ String MIMETYPE_XML = "text/xml";
+ String MIMETYPE_BINARY = "application/octet-stream";
+ String MIMETYPE_PROTOBUF = "application/x-protobuf";
+ String MIMETYPE_PROTOBUF_IETF = "application/protobuf";
+ String MIMETYPE_JSON = "application/json";
+
+ String CRLF = "\r\n";
+
+ String REST_KEYTAB_FILE = "hbase.rest.keytab.file";
+ String REST_KERBEROS_PRINCIPAL = "hbase.rest.kerberos.principal";
+ String REST_AUTHENTICATION_TYPE = "hbase.rest.authentication.type";
+ String REST_AUTHENTICATION_PRINCIPAL = "hbase.rest.authentication.kerberos.principal";
+
+ String REST_SSL_ENABLED = "hbase.rest.ssl.enabled";
+ String REST_SSL_KEYSTORE_STORE = "hbase.rest.ssl.keystore.store";
+ String REST_SSL_KEYSTORE_PASSWORD = "hbase.rest.ssl.keystore.password";
+ String REST_SSL_KEYSTORE_KEYPASSWORD = "hbase.rest.ssl.keystore.keypassword";
+
+ String REST_DNS_NAMESERVER = "hbase.rest.dns.nameserver";
+ String REST_DNS_INTERFACE = "hbase.rest.dns.interface";
+
+ String FILTER_CLASSES = "hbase.rest.filter.classes";
+ String SCAN_START_ROW = "startrow";
+ String SCAN_END_ROW = "endrow";
+ String SCAN_COLUMN = "column";
+ String SCAN_START_TIME = "starttime";
+ String SCAN_END_TIME = "endtime";
+ String SCAN_MAX_VERSIONS = "maxversions";
+ String SCAN_BATCH_SIZE = "batchsize";
+ String SCAN_LIMIT = "limit";
+ String SCAN_FETCH_SIZE = "hbase.rest.scan.fetchsize";
+
+ String ROW_KEYS_PARAM_NAME = "row";
+ /** If this query parameter is present when processing row or scanner resources,
+ it disables server side block caching */
+ String NOCACHE_PARAM_NAME = "nocache";
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
new file mode 100644
index 0000000..90b3302
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
@@ -0,0 +1,75 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class ExistsResource extends ResourceBase {
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ TableResource tableResource;
+
+ /**
+ * Constructor
+ * @param tableResource
+ * @throws IOException
+ */
+ public ExistsResource(TableResource tableResource) throws IOException {
+ super();
+ this.tableResource = tableResource;
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF, MIMETYPE_BINARY})
+ public Response get(final @Context UriInfo uriInfo) {
+ try {
+ if (!tableResource.exists()) {
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ }
+ } catch (IOException e) {
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ ResponseBuilder response = Response.ok();
+ response.cacheControl(cacheControl);
+ return response.build();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
new file mode 100644
index 0000000..e31037a
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
@@ -0,0 +1,103 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+
+import org.apache.hadoop.hbase.rest.MetricsRESTSource;
+
+@InterfaceAudience.Private
+public class MetricsREST {
+
+ public MetricsRESTSource getSource() {
+ return source;
+ }
+
+ private MetricsRESTSource source;
+
+ public MetricsREST() {
+ source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class);
+ }
+
+ /**
+ * @param inc How much to add to requests.
+ */
+ public void incrementRequests(final int inc) {
+ source.incrementRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulGetCount.
+ */
+ public void incrementSucessfulGetRequests(final int inc) {
+ source.incrementSucessfulGetRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulPutCount.
+ */
+ public void incrementSucessfulPutRequests(final int inc) {
+ source.incrementSucessfulPutRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedPutCount.
+ */
+ public void incrementFailedPutRequests(final int inc) {
+ source.incrementFailedPutRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedGetCount.
+ */
+ public void incrementFailedGetRequests(final int inc) {
+ source.incrementFailedGetRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulDeleteCount.
+ */
+ public void incrementSucessfulDeleteRequests(final int inc) {
+ source.incrementSucessfulDeleteRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedDeleteCount.
+ */
+ public void incrementFailedDeleteRequests(final int inc) {
+ source.incrementFailedDeleteRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulScanCount.
+ */
+ public synchronized void incrementSucessfulScanRequests(final int inc) {
+ source.incrementSucessfulScanRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedScanCount.
+ */
+ public void incrementFailedScanRequests(final int inc) {
+ source.incrementFailedScanRequests(inc);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
new file mode 100644
index 0000000..c88ac91
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -0,0 +1,108 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+
+@InterfaceAudience.Private
+public class MultiRowResource extends ResourceBase implements Constants {
+ private static final Log LOG = LogFactory.getLog(MultiRowResource.class);
+
+ TableResource tableResource;
+ Integer versions = null;
+
+ /**
+ * Constructor
+ *
+ * @param tableResource
+ * @param versions
+ * @throws java.io.IOException
+ */
+ public MultiRowResource(TableResource tableResource, String versions) throws IOException {
+ super();
+ this.tableResource = tableResource;
+
+ if (versions != null) {
+ this.versions = Integer.valueOf(versions);
+
+ }
+ }
+
+ @GET
+ @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
+ public Response get(final @Context UriInfo uriInfo) {
+ MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
+
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ CellSetModel model = new CellSetModel();
+ for (String rk : params.get(ROW_KEYS_PARAM_NAME)) {
+ RowSpec rowSpec = new RowSpec(rk);
+
+ if (this.versions != null) {
+ rowSpec.setMaxVersions(this.versions);
+ }
+ ResultGenerator generator =
+ ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, null,
+ !params.containsKey(NOCACHE_PARAM_NAME));
+ Cell value = null;
+ RowModel rowModel = new RowModel(rk);
+ if (generator.hasNext()) {
+ while ((value = generator.next()) != null) {
+ rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil
+ .cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value)));
+ }
+ model.addRow(rowModel);
+ } else {
+ LOG.trace("The row : " + rk + " not found in the table.");
+ }
+ }
+
+ if (model.getRows().size() == 0) {
+ //If no rows found.
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("No rows found." + CRLF)
+ .build();
+ } else {
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return Response.ok(model).build();
+ }
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return processException(e);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
new file mode 100644
index 0000000..bbaf1f7
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
@@ -0,0 +1,46 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Common interface for models capable of supporting protobuf marshalling
+ * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and
+ * ProtobufMessageBodyProducer adapters.
+ */
+@InterfaceAudience.Private
+public interface ProtobufMessageHandler {
+ /**
+ * @return the protobuf represention of the model
+ */
+ byte[] createProtobufOutput();
+
+ /**
+ * Initialize the model from a protobuf representation.
+ * @param message the raw bytes of the protobuf message
+ * @return reference to self for convenience
+ * @throws IOException
+ */
+ ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
new file mode 100644
index 0000000..93bb940
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.List;
+
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.StreamingOutput;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+public class ProtobufStreamingUtil implements StreamingOutput {
+
+ private static final Log LOG = LogFactory.getLog(ProtobufStreamingUtil.class);
+ private String contentType;
+ private ResultScanner resultScanner;
+ private int limit;
+ private int fetchSize;
+
+ protected ProtobufStreamingUtil(ResultScanner scanner, String type, int limit, int fetchSize) {
+ this.resultScanner = scanner;
+ this.contentType = type;
+ this.limit = limit;
+ this.fetchSize = fetchSize;
+ LOG.debug("Created ScanStreamingUtil with content type = " + this.contentType + " user limit : "
+ + this.limit + " scan fetch size : " + this.fetchSize);
+ }
+
+ @Override
+ public void write(OutputStream outStream) throws IOException, WebApplicationException {
+ Result[] rowsToSend;
+ if(limit < fetchSize){
+ rowsToSend = this.resultScanner.next(limit);
+ writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream);
+ } else {
+ int count = limit;
+ while (count > 0) {
+ if (count < fetchSize) {
+ rowsToSend = this.resultScanner.next(count);
+ } else {
+ rowsToSend = this.resultScanner.next(this.fetchSize);
+ }
+ if(rowsToSend.length == 0){
+ break;
+ }
+ count = count - rowsToSend.length;
+ writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream);
+ }
+ }
+ }
+
+ private void writeToStream(CellSetModel model, String contentType, OutputStream outStream)
+ throws IOException {
+ byte[] objectBytes = model.createProtobufOutput();
+ outStream.write(Bytes.toBytes((short)objectBytes.length));
+ outStream.write(objectBytes);
+ outStream.flush();
+ LOG.trace("Wrote " + model.getRows().size() + " rows to stream successfully.");
+ }
+
+ private CellSetModel createModelFromResults(Result[] results) {
+ CellSetModel cellSetModel = new CellSetModel();
+ for (Result rs : results) {
+ byte[] rowKey = rs.getRow();
+ RowModel rModel = new RowModel(rowKey);
+ List<Cell> kvs = rs.listCells();
+ for (Cell kv : kvs) {
+ rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv
+ .getTimestamp(), CellUtil.cloneValue(kv)));
+ }
+ cellSetModel.addRow(rModel);
+ }
+ return cellSetModel;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
new file mode 100644
index 0000000..878b30a
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
@@ -0,0 +1,253 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.rest.filter.AuthFilter;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.util.HttpServerUtil;
+import org.apache.hadoop.hbase.util.InfoServer;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.hadoop.net.DNS;
+import org.mortbay.jetty.Connector;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.nio.SelectChannelConnector;
+import org.mortbay.jetty.security.SslSelectChannelConnector;
+import org.mortbay.jetty.servlet.Context;
+import org.mortbay.jetty.servlet.FilterHolder;
+import org.mortbay.jetty.servlet.ServletHolder;
+import org.mortbay.thread.QueuedThreadPool;
+
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
+/**
+ * Main class for launching REST gateway as a servlet hosted by Jetty.
+ * <p>
+ * The following options are supported:
+ * <ul>
+ * <li>-p --port : service port</li>
+ * <li>-ro --readonly : server mode</li>
+ * </ul>
+ */
+@InterfaceAudience.Private
+public class RESTServer implements Constants {
+
+ private static void printUsageAndExit(Options options, int exitCode) {
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("bin/hbase rest start", "", options,
+ "\nTo run the REST server as a daemon, execute " +
+ "bin/hbase-daemon.sh start|stop rest [--infoport <port>] [-p <port>] [-ro]\n", true);
+ System.exit(exitCode);
+ }
+
+ /**
+ * The main method for the HBase rest server.
+ * @param args command-line arguments
+ * @throws Exception exception
+ */
+ public static void main(String[] args) throws Exception {
+ Log LOG = LogFactory.getLog("RESTServer");
+
+ VersionInfo.logVersion();
+ FilterHolder authFilter = null;
+ Configuration conf = HBaseConfiguration.create();
+ Class<? extends ServletContainer> containerClass = ServletContainer.class;
+ UserProvider userProvider = UserProvider.instantiate(conf);
+ // login the server principal (if using secure Hadoop)
+ if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) {
+ String machineName = Strings.domainNamePointerToHostName(
+ DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"),
+ conf.get(REST_DNS_NAMESERVER, "default")));
+ String keytabFilename = conf.get(REST_KEYTAB_FILE);
+ Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(),
+ REST_KEYTAB_FILE + " should be set if security is enabled");
+ String principalConfig = conf.get(REST_KERBEROS_PRINCIPAL);
+ Preconditions.checkArgument(principalConfig != null && !principalConfig.isEmpty(),
+ REST_KERBEROS_PRINCIPAL + " should be set if security is enabled");
+ userProvider.login(REST_KEYTAB_FILE, REST_KERBEROS_PRINCIPAL, machineName);
+ if (conf.get(REST_AUTHENTICATION_TYPE) != null) {
+ containerClass = RESTServletContainer.class;
+ authFilter = new FilterHolder();
+ authFilter.setClassName(AuthFilter.class.getName());
+ authFilter.setName("AuthenticationFilter");
+ }
+ }
+
+ RESTServlet servlet = RESTServlet.getInstance(conf, userProvider);
+
+ Options options = new Options();
+ options.addOption("p", "port", true, "Port to bind to [default: 8080]");
+ options.addOption("ro", "readonly", false, "Respond only to GET HTTP " +
+ "method requests [default: false]");
+ options.addOption(null, "infoport", true, "Port for web UI");
+
+ CommandLine commandLine = null;
+ try {
+ commandLine = new PosixParser().parse(options, args);
+ } catch (ParseException e) {
+ LOG.error("Could not parse: ", e);
+ printUsageAndExit(options, -1);
+ }
+
+ // check for user-defined port setting, if so override the conf
+ if (commandLine != null && commandLine.hasOption("port")) {
+ String val = commandLine.getOptionValue("port");
+ servlet.getConfiguration()
+ .setInt("hbase.rest.port", Integer.valueOf(val));
+ LOG.debug("port set to " + val);
+ }
+
+ // check if server should only process GET requests, if so override the conf
+ if (commandLine != null && commandLine.hasOption("readonly")) {
+ servlet.getConfiguration().setBoolean("hbase.rest.readonly", true);
+ LOG.debug("readonly set to true");
+ }
+
+ // check for user-defined info server port setting, if so override the conf
+ if (commandLine != null && commandLine.hasOption("infoport")) {
+ String val = commandLine.getOptionValue("infoport");
+ servlet.getConfiguration()
+ .setInt("hbase.rest.info.port", Integer.valueOf(val));
+ LOG.debug("Web UI port set to " + val);
+ }
+
+ @SuppressWarnings("unchecked")
+ List<String> remainingArgs = commandLine != null ?
+ commandLine.getArgList() : new ArrayList<String>();
+ if (remainingArgs.size() != 1) {
+ printUsageAndExit(options, 1);
+ }
+
+ String command = remainingArgs.get(0);
+ if ("start".equals(command)) {
+ // continue and start container
+ } else if ("stop".equals(command)) {
+ System.exit(1);
+ } else {
+ printUsageAndExit(options, 1);
+ }
+
+ // set up the Jersey servlet container for Jetty
+ ServletHolder sh = new ServletHolder(containerClass);
+ sh.setInitParameter(
+ "com.sun.jersey.config.property.resourceConfigClass",
+ ResourceConfig.class.getCanonicalName());
+ sh.setInitParameter("com.sun.jersey.config.property.packages",
+ "jetty");
+ // The servlet holder below is instantiated to only handle the case
+ // of the /status/cluster returning arrays of nodes (live/dead). Without
+ // this servlet holder, the problem is that the node arrays in the response
+ // are collapsed to single nodes. We want to be able to treat the
+ // node lists as POJO in the response to /status/cluster servlet call,
+ // but not change the behavior for any of the other servlets
+ // Hence we don't use the servlet holder for all servlets / paths
+ ServletHolder shPojoMap = new ServletHolder(containerClass);
+ @SuppressWarnings("unchecked")
+ Map<String, String> shInitMap = sh.getInitParameters();
+ for (Entry<String, String> e : shInitMap.entrySet()) {
+ shPojoMap.setInitParameter(e.getKey(), e.getValue());
+ }
+ shPojoMap.setInitParameter(JSONConfiguration.FEATURE_POJO_MAPPING, "true");
+
+ // set up Jetty and run the embedded server
+
+ Server server = new Server();
+
+ Connector connector = new SelectChannelConnector();
+ if(conf.getBoolean(REST_SSL_ENABLED, false)) {
+ SslSelectChannelConnector sslConnector = new SslSelectChannelConnector();
+ String keystore = conf.get(REST_SSL_KEYSTORE_STORE);
+ String password = HBaseConfiguration.getPassword(conf,
+ REST_SSL_KEYSTORE_PASSWORD, null);
+ String keyPassword = HBaseConfiguration.getPassword(conf,
+ REST_SSL_KEYSTORE_KEYPASSWORD, password);
+ sslConnector.setKeystore(keystore);
+ sslConnector.setPassword(password);
+ sslConnector.setKeyPassword(keyPassword);
+ connector = sslConnector;
+ }
+ connector.setPort(servlet.getConfiguration().getInt("hbase.rest.port", 8080));
+ connector.setHost(servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0"));
+
+ server.addConnector(connector);
+
+ // Set the default max thread number to 100 to limit
+ // the number of concurrent requests so that REST server doesn't OOM easily.
+ // Jetty set the default max thread number to 250, if we don't set it.
+ //
+ // Our default min thread number 2 is the same as that used by Jetty.
+ int maxThreads = servlet.getConfiguration().getInt("hbase.rest.threads.max", 100);
+ int minThreads = servlet.getConfiguration().getInt("hbase.rest.threads.min", 2);
+ QueuedThreadPool threadPool = new QueuedThreadPool(maxThreads);
+ threadPool.setMinThreads(minThreads);
+ server.setThreadPool(threadPool);
+
+ server.setSendServerVersion(false);
+ server.setSendDateHeader(false);
+ server.setStopAtShutdown(true);
+ // set up context
+ Context context = new Context(server, "/", Context.SESSIONS);
+ context.addServlet(shPojoMap, "/status/cluster");
+ context.addServlet(sh, "/*");
+ if (authFilter != null) {
+ context.addFilter(authFilter, "/*", 1);
+ }
+
+ // Load filters from configuration.
+ String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES,
+ ArrayUtils.EMPTY_STRING_ARRAY);
+ for (String filter : filterClasses) {
+ filter = filter.trim();
+ context.addFilter(Class.forName(filter), "/*", 0);
+ }
+ HttpServerUtil.constrainHttpMethods(context);
+
+ // Put up info server.
+ int port = conf.getInt("hbase.rest.info.port", 8085);
+ if (port >= 0) {
+ conf.setLong("startcode", System.currentTimeMillis());
+ String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
+ InfoServer infoServer = new InfoServer("rest", a, port, false, conf);
+ infoServer.setAttribute("hbase.conf", conf);
+ infoServer.start();
+ }
+
+ // start server
+ server.start();
+ server.join();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
new file mode 100644
index 0000000..a98663e
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.util.ConnectionCache;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+
+/**
+ * Singleton class encapsulating global REST servlet state and functions.
+ */
+@InterfaceAudience.Private
+public class RESTServlet implements Constants {
+ private static RESTServlet INSTANCE;
+ private final Configuration conf;
+ private final MetricsREST metrics = new MetricsREST();
+ private final ConnectionCache connectionCache;
+ private final UserGroupInformation realUser;
+
+ static final String CLEANUP_INTERVAL = "hbase.rest.connection.cleanup-interval";
+ static final String MAX_IDLETIME = "hbase.rest.connection.max-idletime";
+ static final String HBASE_REST_SUPPORT_PROXYUSER = "hbase.rest.support.proxyuser";
+
+ UserGroupInformation getRealUser() {
+ return realUser;
+ }
+
+ /**
+ * @return the RESTServlet singleton instance
+ */
+ public synchronized static RESTServlet getInstance() {
+ assert(INSTANCE != null);
+ return INSTANCE;
+ }
+
+ /**
+ * @param conf Existing configuration to use in rest servlet
+ * @param userProvider the login user provider
+ * @return the RESTServlet singleton instance
+ * @throws IOException
+ */
+ public synchronized static RESTServlet getInstance(Configuration conf,
+ UserProvider userProvider) throws IOException {
+ if (INSTANCE == null) {
+ INSTANCE = new RESTServlet(conf, userProvider);
+ }
+ return INSTANCE;
+ }
+
+ public synchronized static void stop() {
+ if (INSTANCE != null) INSTANCE = null;
+ }
+
+ /**
+ * Constructor with existing configuration
+ * @param conf existing configuration
+ * @param userProvider the login user provider
+ * @throws IOException
+ */
+ RESTServlet(final Configuration conf,
+ final UserProvider userProvider) throws IOException {
+ this.realUser = userProvider.getCurrent().getUGI();
+ this.conf = conf;
+
+ int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
+ int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
+ connectionCache = new ConnectionCache(
+ conf, userProvider, cleanInterval, maxIdleTime);
+ if (supportsProxyuser()) {
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+ }
+ }
+
+ HBaseAdmin getAdmin() throws IOException {
+ return connectionCache.getAdmin();
+ }
+
+ /**
+ * Caller closes the table afterwards.
+ */
+ HTableInterface getTable(String tableName) throws IOException {
+ return connectionCache.getTable(tableName);
+ }
+
+ Configuration getConfiguration() {
+ return conf;
+ }
+
+ MetricsREST getMetrics() {
+ return metrics;
+ }
+
+ /**
+ * Helper method to determine if server should
+ * only respond to GET HTTP method requests.
+ * @return boolean for server read-only state
+ */
+ boolean isReadOnly() {
+ return getConfiguration().getBoolean("hbase.rest.readonly", false);
+ }
+
+ void setEffectiveUser(String effectiveUser) {
+ connectionCache.setEffectiveUser(effectiveUser);
+ }
+
+ boolean supportsProxyuser() {
+ return conf.getBoolean(HBASE_REST_SUPPORT_PROXYUSER, false);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
new file mode 100644
index 0000000..2ce8ede
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * REST servlet container. It is used to get the remote request user
+ * without going through @HttpContext, so that we can minimize code changes.
+ */
+@InterfaceAudience.Private
+public class RESTServletContainer extends ServletContainer {
+ private static final long serialVersionUID = -2474255003443394314L;
+
+ /**
+ * This container is used only if authentication and
+ * impersonation is enabled. The remote request user is used
+ * as a proxy user for impersonation in invoking any REST service.
+ */
+ @Override
+ public void service(final HttpServletRequest request,
+ final HttpServletResponse response) throws ServletException, IOException {
+ final String doAsUserFromQuery = request.getParameter("doAs");
+ RESTServlet servlet = RESTServlet.getInstance();
+ if (doAsUserFromQuery != null) {
+ Configuration conf = servlet.getConfiguration();
+ if (!servlet.supportsProxyuser()) {
+ throw new ServletException("Support for proxyuser is not configured");
+ }
+ UserGroupInformation ugi = servlet.getRealUser();
+ // create and attempt to authorize a proxy user (the client is attempting
+ // to do proxy user)
+ ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
+ // validate the proxy user authorization
+ try {
+ ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
+ } catch(AuthorizationException e) {
+ throw new ServletException(e.getMessage());
+ }
+ servlet.setEffectiveUser(doAsUserFromQuery);
+ } else {
+ String effectiveUser = request.getRemoteUser();
+ servlet.setEffectiveUser(effectiveUser);
+ }
+ super.service(request, response);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
new file mode 100644
index 0000000..ddc2f56
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
@@ -0,0 +1,104 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Map;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.MetaScanner;
+import org.apache.hadoop.hbase.rest.model.TableInfoModel;
+import org.apache.hadoop.hbase.rest.model.TableRegionModel;
+
+@InterfaceAudience.Private
+public class RegionsResource extends ResourceBase {
+ private static final Log LOG = LogFactory.getLog(RegionsResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ TableResource tableResource;
+
+ /**
+ * Constructor
+ * @param tableResource
+ * @throws IOException
+ */
+ public RegionsResource(TableResource tableResource) throws IOException {
+ super();
+ this.tableResource = tableResource;
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ TableName tableName = TableName.valueOf(tableResource.getName());
+ TableInfoModel model = new TableInfoModel(tableName.getNameAsString());
+ Map<HRegionInfo,ServerName> regions = MetaScanner.allTableRegions(
+ servlet.getConfiguration(), null, tableName, false);
+ for (Map.Entry<HRegionInfo,ServerName> e: regions.entrySet()) {
+ HRegionInfo hri = e.getKey();
+ ServerName addr = e.getValue();
+ model.add(
+ new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(),
+ hri.getStartKey(), hri.getEndKey(), addr.getHostAndPort()));
+ }
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (TableNotFoundException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
new file mode 100644
index 0000000..f71d848
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
@@ -0,0 +1,96 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Response;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
+import org.apache.hadoop.util.StringUtils;
+
+@InterfaceAudience.Private
+public class ResourceBase implements Constants {
+
+ RESTServlet servlet;
+ Class<?> accessDeniedClazz;
+
+ public ResourceBase() throws IOException {
+ servlet = RESTServlet.getInstance();
+ try {
+ accessDeniedClazz = Class.forName("org.apache.hadoop.hbase.security.AccessDeniedException");
+ } catch (ClassNotFoundException e) {
+ }
+ }
+
+ protected Response processException(Throwable exp) {
+ Throwable curr = exp;
+ if(accessDeniedClazz != null) {
+ //some access denied exceptions are buried
+ while (curr != null) {
+ if(accessDeniedClazz.isAssignableFrom(curr.getClass())) {
+ throw new WebApplicationException(
+ Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+ curr = curr.getCause();
+ }
+ }
+ //TableNotFound may also be buried one level deep
+ if (exp instanceof TableNotFoundException ||
+ exp.getCause() instanceof TableNotFoundException) {
+ throw new WebApplicationException(
+ Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+ if (exp instanceof NoSuchColumnFamilyException){
+ throw new WebApplicationException(
+ Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+ if (exp instanceof RuntimeException) {
+ throw new WebApplicationException(
+ Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+ if (exp instanceof RetriesExhaustedWithDetailsException) {
+ RetriesExhaustedWithDetailsException retryException =
+ (RetriesExhaustedWithDetailsException) exp;
+ processException(retryException.getCause(0));
+ }
+ throw new WebApplicationException(
+ Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java
new file mode 100644
index 0000000..d397399
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java
@@ -0,0 +1,31 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+import com.sun.jersey.api.core.PackagesResourceConfig;
+
+@InterfaceAudience.Private
+public class ResourceConfig extends PackagesResourceConfig {
+ public ResourceConfig() {
+ super("org.apache.hadoop.hbase.rest");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java
new file mode 100644
index 0000000..989c59e
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java
@@ -0,0 +1,50 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+
+@InterfaceAudience.Private
+public abstract class ResultGenerator implements Iterator<Cell> {
+
+ public static ResultGenerator fromRowSpec(final String table,
+ final RowSpec rowspec, final Filter filter, final boolean cacheBlocks)
+ throws IOException {
+ if (rowspec.isSingleRow()) {
+ return new RowResultGenerator(table, rowspec, filter, cacheBlocks);
+ } else {
+ return new ScannerResultGenerator(table, rowspec, filter, cacheBlocks);
+ }
+ }
+
+ public static Filter buildFilter(final String filter) throws Exception {
+ return ScannerModel.buildFilter(filter);
+ }
+
+ public abstract void putBack(Cell kv);
+
+ public abstract void close();
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
new file mode 100644
index 0000000..c425e84
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
@@ -0,0 +1,106 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.rest.model.TableListModel;
+import org.apache.hadoop.hbase.rest.model.TableModel;
+
+@Path("/")
+@InterfaceAudience.Private
+public class RootResource extends ResourceBase {
+ private static final Log LOG = LogFactory.getLog(RootResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ /**
+ * Constructor
+ * @throws IOException
+ */
+ public RootResource() throws IOException {
+ super();
+ }
+
+ private final TableListModel getTableList() throws IOException {
+ TableListModel tableList = new TableListModel();
+ TableName[] tableNames = servlet.getAdmin().listTableNames();
+ for (TableName name: tableNames) {
+ tableList.add(new TableModel(name.getNameAsString()));
+ }
+ return tableList;
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ ResponseBuilder response = Response.ok(getTableList());
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return processException(e);
+ }
+ }
+
+ @Path("status/cluster")
+ public StorageClusterStatusResource getClusterStatusResource()
+ throws IOException {
+ return new StorageClusterStatusResource();
+ }
+
+ @Path("version")
+ public VersionResource getVersionResource() throws IOException {
+ return new VersionResource();
+ }
+
+ @Path("{table}")
+ public TableResource getTableResource(
+ final @PathParam("table") String table) throws IOException {
+ return new TableResource(table);
+ }
+}
[32/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java
new file mode 100644
index 0000000..f35a25f
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java
@@ -0,0 +1,1521 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: CellSetMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class CellSetMessage {
+ private CellSetMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface CellSetOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row>
+ getRowsList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index);
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ int getRowsCount();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>
+ getRowsOrBuilderList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder getRowsOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.CellSet}
+ */
+ public static final class CellSet extends
+ com.google.protobuf.GeneratedMessage
+ implements CellSetOrBuilder {
+ // Use CellSet.newBuilder() to construct.
+ private CellSet(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CellSet(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CellSet defaultInstance;
+ public static CellSet getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CellSet getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CellSet(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ rows_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row>();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ rows_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ rows_ = java.util.Collections.unmodifiableList(rows_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<CellSet> PARSER =
+ new com.google.protobuf.AbstractParser<CellSet>() {
+ public CellSet parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CellSet(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<CellSet> getParserForType() {
+ return PARSER;
+ }
+
+ public interface RowOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes key = 1;
+ /**
+ * <code>required bytes key = 1;</code>
+ */
+ boolean hasKey();
+ /**
+ * <code>required bytes key = 1;</code>
+ */
+ com.google.protobuf.ByteString getKey();
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell>
+ getValuesList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell getValues(int index);
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ int getValuesCount();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>
+ getValuesOrBuilderList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder getValuesOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row}
+ */
+ public static final class Row extends
+ com.google.protobuf.GeneratedMessage
+ implements RowOrBuilder {
+ // Use Row.newBuilder() to construct.
+ private Row(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Row(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Row defaultInstance;
+ public static Row getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Row getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Row(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ key_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ values_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell>();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ values_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ values_ = java.util.Collections.unmodifiableList(values_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Row> PARSER =
+ new com.google.protobuf.AbstractParser<Row>() {
+ public Row parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Row(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Row> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required bytes key = 1;
+ public static final int KEY_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString key_;
+ /**
+ * <code>required bytes key = 1;</code>
+ */
+ public boolean hasKey() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bytes key = 1;</code>
+ */
+ public com.google.protobuf.ByteString getKey() {
+ return key_;
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
+ public static final int VALUES_FIELD_NUMBER = 2;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> values_;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> getValuesList() {
+ return values_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>
+ getValuesOrBuilderList() {
+ return values_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public int getValuesCount() {
+ return values_.size();
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell getValues(int index) {
+ return values_.get(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder getValuesOrBuilder(
+ int index) {
+ return values_.get(index);
+ }
+
+ private void initFields() {
+ key_ = com.google.protobuf.ByteString.EMPTY;
+ values_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasKey()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, key_);
+ }
+ for (int i = 0; i < values_.size(); i++) {
+ output.writeMessage(2, values_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, key_);
+ }
+ for (int i = 0; i < values_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, values_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getValuesFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ key_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (valuesBuilder_ == null) {
+ values_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ valuesBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row result = new org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.key_ = key_;
+ if (valuesBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ values_ = java.util.Collections.unmodifiableList(values_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.values_ = values_;
+ } else {
+ result.values_ = valuesBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance()) return this;
+ if (other.hasKey()) {
+ setKey(other.getKey());
+ }
+ if (valuesBuilder_ == null) {
+ if (!other.values_.isEmpty()) {
+ if (values_.isEmpty()) {
+ values_ = other.values_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureValuesIsMutable();
+ values_.addAll(other.values_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.values_.isEmpty()) {
+ if (valuesBuilder_.isEmpty()) {
+ valuesBuilder_.dispose();
+ valuesBuilder_ = null;
+ values_ = other.values_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ valuesBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getValuesFieldBuilder() : null;
+ } else {
+ valuesBuilder_.addAllMessages(other.values_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasKey()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required bytes key = 1;
+ private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>required bytes key = 1;</code>
+ */
+ public boolean hasKey() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bytes key = 1;</code>
+ */
+ public com.google.protobuf.ByteString getKey() {
+ return key_;
+ }
+ /**
+ * <code>required bytes key = 1;</code>
+ */
+ public Builder setKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ key_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bytes key = 1;</code>
+ */
+ public Builder clearKey() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ key_ = getDefaultInstance().getKey();
+ onChanged();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> values_ =
+ java.util.Collections.emptyList();
+ private void ensureValuesIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ values_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell>(values_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder> valuesBuilder_;
+
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> getValuesList() {
+ if (valuesBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(values_);
+ } else {
+ return valuesBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public int getValuesCount() {
+ if (valuesBuilder_ == null) {
+ return values_.size();
+ } else {
+ return valuesBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell getValues(int index) {
+ if (valuesBuilder_ == null) {
+ return values_.get(index);
+ } else {
+ return valuesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public Builder setValues(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell value) {
+ if (valuesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureValuesIsMutable();
+ values_.set(index, value);
+ onChanged();
+ } else {
+ valuesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public Builder setValues(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
+ if (valuesBuilder_ == null) {
+ ensureValuesIsMutable();
+ values_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ valuesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public Builder addValues(org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell value) {
+ if (valuesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureValuesIsMutable();
+ values_.add(value);
+ onChanged();
+ } else {
+ valuesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public Builder addValues(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell value) {
+ if (valuesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureValuesIsMutable();
+ values_.add(index, value);
+ onChanged();
+ } else {
+ valuesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public Builder addValues(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
+ if (valuesBuilder_ == null) {
+ ensureValuesIsMutable();
+ values_.add(builderForValue.build());
+ onChanged();
+ } else {
+ valuesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public Builder addValues(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
+ if (valuesBuilder_ == null) {
+ ensureValuesIsMutable();
+ values_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ valuesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public Builder addAllValues(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> values) {
+ if (valuesBuilder_ == null) {
+ ensureValuesIsMutable();
+ super.addAll(values, values_);
+ onChanged();
+ } else {
+ valuesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public Builder clearValues() {
+ if (valuesBuilder_ == null) {
+ values_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ valuesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public Builder removeValues(int index) {
+ if (valuesBuilder_ == null) {
+ ensureValuesIsMutable();
+ values_.remove(index);
+ onChanged();
+ } else {
+ valuesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder getValuesBuilder(
+ int index) {
+ return getValuesFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder getValuesOrBuilder(
+ int index) {
+ if (valuesBuilder_ == null) {
+ return values_.get(index); } else {
+ return valuesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>
+ getValuesOrBuilderList() {
+ if (valuesBuilder_ != null) {
+ return valuesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(values_);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder addValuesBuilder() {
+ return getValuesFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder addValuesBuilder(
+ int index) {
+ return getValuesFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder>
+ getValuesBuilderList() {
+ return getValuesFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>
+ getValuesFieldBuilder() {
+ if (valuesBuilder_ == null) {
+ valuesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>(
+ values_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ values_ = null;
+ }
+ return valuesBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row)
+ }
+
+ static {
+ defaultInstance = new Row(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row)
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
+ public static final int ROWS_FIELD_NUMBER = 1;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> rows_;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> getRowsList() {
+ return rows_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>
+ getRowsOrBuilderList() {
+ return rows_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public int getRowsCount() {
+ return rows_.size();
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
+ return rows_.get(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder getRowsOrBuilder(
+ int index) {
+ return rows_.get(index);
+ }
+
+ private void initFields() {
+ rows_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getRowsCount(); i++) {
+ if (!getRows(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < rows_.size(); i++) {
+ output.writeMessage(1, rows_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < rows_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, rows_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.CellSet}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSetOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getRowsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (rowsBuilder_ == null) {
+ rows_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ rowsBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet result = new org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet(this);
+ int from_bitField0_ = bitField0_;
+ if (rowsBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ rows_ = java.util.Collections.unmodifiableList(rows_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.rows_ = rows_;
+ } else {
+ result.rows_ = rowsBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance()) return this;
+ if (rowsBuilder_ == null) {
+ if (!other.rows_.isEmpty()) {
+ if (rows_.isEmpty()) {
+ rows_ = other.rows_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureRowsIsMutable();
+ rows_.addAll(other.rows_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.rows_.isEmpty()) {
+ if (rowsBuilder_.isEmpty()) {
+ rowsBuilder_.dispose();
+ rowsBuilder_ = null;
+ rows_ = other.rows_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ rowsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRowsFieldBuilder() : null;
+ } else {
+ rowsBuilder_.addAllMessages(other.rows_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getRowsCount(); i++) {
+ if (!getRows(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> rows_ =
+ java.util.Collections.emptyList();
+ private void ensureRowsIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ rows_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row>(rows_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder> rowsBuilder_;
+
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> getRowsList() {
+ if (rowsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(rows_);
+ } else {
+ return rowsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public int getRowsCount() {
+ if (rowsBuilder_ == null) {
+ return rows_.size();
+ } else {
+ return rowsBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
+ if (rowsBuilder_ == null) {
+ return rows_.get(index);
+ } else {
+ return rowsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public Builder setRows(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row value) {
+ if (rowsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRowsIsMutable();
+ rows_.set(index, value);
+ onChanged();
+ } else {
+ rowsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public Builder setRows(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
+ if (rowsBuilder_ == null) {
+ ensureRowsIsMutable();
+ rows_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ rowsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public Builder addRows(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row value) {
+ if (rowsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRowsIsMutable();
+ rows_.add(value);
+ onChanged();
+ } else {
+ rowsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public Builder addRows(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row value) {
+ if (rowsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRowsIsMutable();
+ rows_.add(index, value);
+ onChanged();
+ } else {
+ rowsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public Builder addRows(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
+ if (rowsBuilder_ == null) {
+ ensureRowsIsMutable();
+ rows_.add(builderForValue.build());
+ onChanged();
+ } else {
+ rowsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public Builder addRows(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
+ if (rowsBuilder_ == null) {
+ ensureRowsIsMutable();
+ rows_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ rowsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public Builder addAllRows(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> values) {
+ if (rowsBuilder_ == null) {
+ ensureRowsIsMutable();
+ super.addAll(values, rows_);
+ onChanged();
+ } else {
+ rowsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public Builder clearRows() {
+ if (rowsBuilder_ == null) {
+ rows_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ rowsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public Builder removeRows(int index) {
+ if (rowsBuilder_ == null) {
+ ensureRowsIsMutable();
+ rows_.remove(index);
+ onChanged();
+ } else {
+ rowsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder getRowsBuilder(
+ int index) {
+ return getRowsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder getRowsOrBuilder(
+ int index) {
+ if (rowsBuilder_ == null) {
+ return rows_.get(index); } else {
+ return rowsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>
+ getRowsOrBuilderList() {
+ if (rowsBuilder_ != null) {
+ return rowsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(rows_);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder addRowsBuilder() {
+ return getRowsFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder addRowsBuilder(
+ int index) {
+ return getRowsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder>
+ getRowsBuilderList() {
+ return getRowsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>
+ getRowsFieldBuilder() {
+ if (rowsBuilder_ == null) {
+ rowsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>(
+ rows_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ rows_ = null;
+ }
+ return rowsBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.CellSet)
+ }
+
+ static {
+ defaultInstance = new CellSet(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.CellSet)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\024CellSetMessage.proto\022/org.apache.hadoo" +
+ "p.hbase.rest.protobuf.generated\032\021CellMes" +
+ "sage.proto\"\260\001\n\007CellSet\022J\n\004rows\030\001 \003(\0132<.o" +
+ "rg.apache.hadoop.hbase.rest.protobuf.gen" +
+ "erated.CellSet.Row\032Y\n\003Row\022\013\n\003key\030\001 \002(\014\022E" +
+ "\n\006values\030\002 \003(\01325.org.apache.hadoop.hbase" +
+ ".rest.protobuf.generated.Cell"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor,
+ new java.lang.String[] { "Rows", });
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor =
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor,
+ new java.lang.String[] { "Key", "Values", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.getDescriptor(),
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
[37/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
new file mode 100644
index 0000000..7db5328
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -0,0 +1,598 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public class RowResource extends ResourceBase {
+ private static final Log LOG = LogFactory.getLog(RowResource.class);
+
+ static final String CHECK_PUT = "put";
+ static final String CHECK_DELETE = "delete";
+
+ TableResource tableResource;
+ RowSpec rowspec;
+ private String check = null;
+
+ /**
+ * Constructor
+ * @param tableResource
+ * @param rowspec
+ * @param versions
+ * @throws IOException
+ */
+ public RowResource(TableResource tableResource, String rowspec,
+ String versions, String check) throws IOException {
+ super();
+ this.tableResource = tableResource;
+ this.rowspec = new RowSpec(rowspec);
+ if (versions != null) {
+ this.rowspec.setMaxVersions(Integer.valueOf(versions));
+ }
+ this.check = check;
+ }
+
+ @GET
+ @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
+ try {
+ ResultGenerator generator =
+ ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null,
+ !params.containsKey(NOCACHE_PARAM_NAME));
+ if (!generator.hasNext()) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ }
+ int count = 0;
+ CellSetModel model = new CellSetModel();
+ Cell value = generator.next();
+ byte[] rowKey = CellUtil.cloneRow(value);
+ RowModel rowModel = new RowModel(rowKey);
+ do {
+ if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
+ model.addRow(rowModel);
+ rowKey = CellUtil.cloneRow(value);
+ rowModel = new RowModel(rowKey);
+ }
+ rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value),
+ value.getTimestamp(), CellUtil.cloneValue(value)));
+ if (++count > rowspec.getMaxValues()) {
+ break;
+ }
+ value = generator.next();
+ } while (value != null);
+ model.addRow(rowModel);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return Response.ok(model).build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ }
+ }
+
+ @GET
+ @Produces(MIMETYPE_BINARY)
+ public Response getBinary(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+ }
+ servlet.getMetrics().incrementRequests(1);
+ // doesn't make sense to use a non specific coordinate as this can only
+ // return a single cell
+ if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
+ .entity("Bad request: Either 0 or more than 1 columns specified." + CRLF).build();
+ }
+ MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
+ try {
+ ResultGenerator generator =
+ ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null,
+ !params.containsKey(NOCACHE_PARAM_NAME));
+ if (!generator.hasNext()) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ }
+ Cell value = generator.next();
+ ResponseBuilder response = Response.ok(CellUtil.cloneValue(value));
+ response.header("X-Timestamp", value.getTimestamp());
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return processException(e);
+ }
+ }
+
+ Response update(final CellSetModel model, final boolean replace) {
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+
+ if (CHECK_PUT.equalsIgnoreCase(check)) {
+ return checkAndPut(model);
+ } else if (CHECK_DELETE.equalsIgnoreCase(check)) {
+ return checkAndDelete(model);
+ } else if (check != null && check.length() > 0) {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Invalid check value '" + check + "'" + CRLF)
+ .build();
+ }
+
+ HTableInterface table = null;
+ try {
+ List<RowModel> rows = model.getRows();
+ List<Put> puts = new ArrayList<Put>();
+ for (RowModel row: rows) {
+ byte[] key = row.getKey();
+ if (key == null) {
+ key = rowspec.getRow();
+ }
+ if (key == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF)
+ .build();
+ }
+ Put put = new Put(key);
+ int i = 0;
+ for (CellModel cell: row.getCells()) {
+ byte[] col = cell.getColumn();
+ if (col == null) try {
+ col = rowspec.getColumns()[i++];
+ } catch (ArrayIndexOutOfBoundsException e) {
+ col = null;
+ }
+ if (col == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+ .build();
+ }
+ byte [][] parts = KeyValue.parseColumn(col);
+ if (parts.length != 2) {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ put.addImmutable(parts[0], parts[1], cell.getTimestamp(), cell.getValue());
+ }
+ puts.add(put);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + put.toString());
+ }
+ }
+ table = servlet.getTable(tableResource.getName());
+ table.put(puts);
+ table.flushCommits();
+ ResponseBuilder response = Response.ok();
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table", ioe);
+ }
+ }
+ }
+
+ // This currently supports only update of one row at a time.
+ Response updateBinary(final byte[] message, final HttpHeaders headers,
+ final boolean replace) {
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ HTableInterface table = null;
+ try {
+ byte[] row = rowspec.getRow();
+ byte[][] columns = rowspec.getColumns();
+ byte[] column = null;
+ if (columns != null) {
+ column = columns[0];
+ }
+ long timestamp = HConstants.LATEST_TIMESTAMP;
+ List<String> vals = headers.getRequestHeader("X-Row");
+ if (vals != null && !vals.isEmpty()) {
+ row = Bytes.toBytes(vals.get(0));
+ }
+ vals = headers.getRequestHeader("X-Column");
+ if (vals != null && !vals.isEmpty()) {
+ column = Bytes.toBytes(vals.get(0));
+ }
+ vals = headers.getRequestHeader("X-Timestamp");
+ if (vals != null && !vals.isEmpty()) {
+ timestamp = Long.valueOf(vals.get(0));
+ }
+ if (column == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+ .build();
+ }
+ Put put = new Put(row);
+ byte parts[][] = KeyValue.parseColumn(column);
+ if (parts.length != 2) {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ put.addImmutable(parts[0], parts[1], timestamp, message);
+ table = servlet.getTable(tableResource.getName());
+ table.put(put);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + put.toString());
+ }
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return Response.ok().build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug(ioe);
+ }
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response put(final CellSetModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath()
+ + " " + uriInfo.getQueryParameters());
+ }
+ return update(model, true);
+ }
+
+ @PUT
+ @Consumes(MIMETYPE_BINARY)
+ public Response putBinary(final byte[] message,
+ final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+ }
+ return updateBinary(message, headers, true);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response post(final CellSetModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("POST " + uriInfo.getAbsolutePath()
+ + " " + uriInfo.getQueryParameters());
+ }
+ return update(model, false);
+ }
+
+ @POST
+ @Consumes(MIMETYPE_BINARY)
+ public Response postBinary(final byte[] message,
+ final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY);
+ }
+ return updateBinary(message, headers, false);
+ }
+
+ @DELETE
+ public Response delete(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ Delete delete = null;
+ if (rowspec.hasTimestamp())
+ delete = new Delete(rowspec.getRow(), rowspec.getTimestamp());
+ else
+ delete = new Delete(rowspec.getRow());
+
+ for (byte[] column: rowspec.getColumns()) {
+ byte[][] split = KeyValue.parseColumn(column);
+ if (rowspec.hasTimestamp()) {
+ if (split.length == 1) {
+ delete.deleteFamily(split[0], rowspec.getTimestamp());
+ } else if (split.length == 2) {
+ delete.deleteColumns(split[0], split[1], rowspec.getTimestamp());
+ } else {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ } else {
+ if (split.length == 1) {
+ delete.deleteFamily(split[0]);
+ } else if (split.length == 2) {
+ delete.deleteColumns(split[0], split[1]);
+ } else {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ }
+ }
+ HTableInterface table = null;
+ try {
+ table = servlet.getTable(tableResource.getName());
+ table.delete(delete);
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DELETE " + delete.toString());
+ }
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug(ioe);
+ }
+ }
+ return Response.ok().build();
+ }
+
+ /**
+ * Validates the input request parameters, parses columns from CellSetModel,
+ * and invokes checkAndPut on HTable.
+ *
+ * @param model instance of CellSetModel
+ * @return Response 200 OK, 304 Not modified, 400 Bad request
+ */
+ Response checkAndPut(final CellSetModel model) {
+ HTableInterface table = null;
+ try {
+ table = servlet.getTable(tableResource.getName());
+ if (model.getRows().size() != 1) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
+ .entity("Bad request: Number of rows specified is not 1." + CRLF).build();
+ }
+
+ RowModel rowModel = model.getRows().get(0);
+ byte[] key = rowModel.getKey();
+ if (key == null) {
+ key = rowspec.getRow();
+ }
+
+ List<CellModel> cellModels = rowModel.getCells();
+ int cellModelCount = cellModels.size();
+ if (key == null || cellModelCount <= 1) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response
+ .status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT)
+ .entity(
+ "Bad request: Either row key is null or no data found for columns specified." + CRLF)
+ .build();
+ }
+
+ Put put = new Put(key);
+ boolean retValue;
+ CellModel valueToCheckCell = cellModels.get(cellModelCount - 1);
+ byte[] valueToCheckColumn = valueToCheckCell.getColumn();
+ byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
+ if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
+ CellModel valueToPutCell = null;
+ for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
+ if(Bytes.equals(cellModels.get(i).getColumn(),
+ valueToCheckCell.getColumn())) {
+ valueToPutCell = cellModels.get(i);
+ break;
+ }
+ }
+ if (valueToPutCell == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
+ .entity("Bad request: The column to put and check do not match." + CRLF).build();
+ } else {
+ put.addImmutable(valueToPutParts[0], valueToPutParts[1], valueToPutCell.getTimestamp(),
+ valueToPutCell.getValue());
+ retValue = table.checkAndPut(key, valueToPutParts[0], valueToPutParts[1],
+ valueToCheckCell.getValue(), put);
+ }
+ } else {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF)
+ .build();
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("CHECK-AND-PUT " + put.toString() + ", returns " + retValue);
+ }
+ if (!retValue) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.NOT_MODIFIED)
+ .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF)
+ .build();
+ }
+ table.flushCommits();
+ ResponseBuilder response = Response.ok();
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table", ioe);
+ }
+ }
+ }
+
+ /**
+ * Validates the input request parameters, parses columns from CellSetModel,
+ * and invokes checkAndDelete on HTable.
+ *
+ * @param model instance of CellSetModel
+ * @return Response 200 OK, 304 Not modified, 400 Bad request
+ */
+ Response checkAndDelete(final CellSetModel model) {
+ HTableInterface table = null;
+ Delete delete = null;
+ try {
+ table = servlet.getTable(tableResource.getName());
+ if (model.getRows().size() != 1) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ RowModel rowModel = model.getRows().get(0);
+ byte[] key = rowModel.getKey();
+ if (key == null) {
+ key = rowspec.getRow();
+ }
+ if (key == null) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF)
+ .build();
+ }
+
+ delete = new Delete(key);
+ boolean retValue;
+ CellModel valueToDeleteCell = rowModel.getCells().get(0);
+ byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
+ if (valueToDeleteColumn == null) {
+ try {
+ valueToDeleteColumn = rowspec.getColumns()[0];
+ } catch (final ArrayIndexOutOfBoundsException e) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF)
+ .build();
+ }
+ }
+ byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
+ if (parts.length == 2) {
+ if (parts[1].length != 0) {
+ delete.deleteColumns(parts[0], parts[1]);
+ retValue = table.checkAndDelete(key, parts[0], parts[1],
+ valueToDeleteCell.getValue(), delete);
+ } else {
+ // The case of empty qualifier.
+ delete.deleteColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+ retValue = table.checkAndDelete(key, parts[0], Bytes.toBytes(StringUtils.EMPTY),
+ valueToDeleteCell.getValue(), delete);
+ }
+ } else {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF)
+ .build();
+ }
+ delete.deleteColumns(parts[0], parts[1]);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("CHECK-AND-DELETE " + delete.toString() + ", returns "
+ + retValue);
+ }
+
+ if (!retValue) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.NOT_MODIFIED)
+ .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF)
+ .build();
+ }
+ table.flushCommits();
+ ResponseBuilder response = Response.ok();
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table", ioe);
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
new file mode 100644
index 0000000..b9492dd
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
@@ -0,0 +1,122 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.util.StringUtils;
+
+@InterfaceAudience.Private
+public class RowResultGenerator extends ResultGenerator {
+ private static final Log LOG = LogFactory.getLog(RowResultGenerator.class);
+
+ private Iterator<Cell> valuesI;
+ private Cell cache;
+
+ public RowResultGenerator(final String tableName, final RowSpec rowspec,
+ final Filter filter, final boolean cacheBlocks)
+ throws IllegalArgumentException, IOException {
+ HTableInterface table = RESTServlet.getInstance().getTable(tableName);
+ try {
+ Get get = new Get(rowspec.getRow());
+ if (rowspec.hasColumns()) {
+ for (byte[] col: rowspec.getColumns()) {
+ byte[][] split = KeyValue.parseColumn(col);
+ if (split.length == 1) {
+ get.addFamily(split[0]);
+ } else if (split.length == 2) {
+ get.addColumn(split[0], split[1]);
+ } else {
+ throw new IllegalArgumentException("Invalid column specifier.");
+ }
+ }
+ }
+ get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
+ get.setMaxVersions(rowspec.getMaxVersions());
+ if (filter != null) {
+ get.setFilter(filter);
+ }
+ get.setCacheBlocks(cacheBlocks);
+ Result result = table.get(get);
+ if (result != null && !result.isEmpty()) {
+ valuesI = result.listCells().iterator();
+ }
+ } catch (DoNotRetryIOException e) {
+ // Warn here because Stargate will return 404 in the case if multiple
+ // column families were specified but one did not exist -- currently
+ // HBase will fail the whole Get.
+ // Specifying multiple columns in a URI should be uncommon usage but
+ // help to avoid confusion by leaving a record of what happened here in
+ // the log.
+ LOG.warn(StringUtils.stringifyException(e));
+ } finally {
+ table.close();
+ }
+ }
+
+ public void close() {
+ }
+
+ public boolean hasNext() {
+ if (cache != null) {
+ return true;
+ }
+ if (valuesI == null) {
+ return false;
+ }
+ return valuesI.hasNext();
+ }
+
+ public Cell next() {
+ if (cache != null) {
+ Cell kv = cache;
+ cache = null;
+ return kv;
+ }
+ if (valuesI == null) {
+ return null;
+ }
+ try {
+ return valuesI.next();
+ } catch (NoSuchElementException e) {
+ return null;
+ }
+ }
+
+ public void putBack(Cell kv) {
+ this.cache = kv;
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException("remove not supported");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
new file mode 100644
index 0000000..b6c1ca8
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
@@ -0,0 +1,407 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Parses a path based row/column/timestamp specification into its component
+ * elements.
+ * <p>
+ *
+ */
+@InterfaceAudience.Private
+public class RowSpec {
+ public static final long DEFAULT_START_TIMESTAMP = 0;
+ public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
+
+ private byte[] row = HConstants.EMPTY_START_ROW;
+ private byte[] endRow = null;
+ private TreeSet<byte[]> columns =
+ new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+ private List<String> labels = new ArrayList<String>();
+ private long startTime = DEFAULT_START_TIMESTAMP;
+ private long endTime = DEFAULT_END_TIMESTAMP;
+ private int maxVersions = 1;
+ private int maxValues = Integer.MAX_VALUE;
+
+ public RowSpec(String path) throws IllegalArgumentException {
+ int i = 0;
+ while (path.charAt(i) == '/') {
+ i++;
+ }
+ i = parseRowKeys(path, i);
+ i = parseColumns(path, i);
+ i = parseTimestamp(path, i);
+ i = parseQueryParams(path, i);
+ }
+
+ private int parseRowKeys(final String path, int i)
+ throws IllegalArgumentException {
+ String startRow = null, endRow = null;
+ try {
+ StringBuilder sb = new StringBuilder();
+ char c;
+ while (i < path.length() && (c = path.charAt(i)) != '/') {
+ sb.append(c);
+ i++;
+ }
+ i++;
+ String row = startRow = sb.toString();
+ int idx = startRow.indexOf(',');
+ if (idx != -1) {
+ startRow = URLDecoder.decode(row.substring(0, idx),
+ HConstants.UTF8_ENCODING);
+ endRow = URLDecoder.decode(row.substring(idx + 1),
+ HConstants.UTF8_ENCODING);
+ } else {
+ startRow = URLDecoder.decode(row, HConstants.UTF8_ENCODING);
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e);
+ }
+ // HBase does not support wildcards on row keys so we will emulate a
+ // suffix glob by synthesizing appropriate start and end row keys for
+ // table scanning
+ if (startRow.charAt(startRow.length() - 1) == '*') {
+ if (endRow != null)
+ throw new IllegalArgumentException("invalid path: start row "+
+ "specified with wildcard");
+ this.row = Bytes.toBytes(startRow.substring(0,
+ startRow.lastIndexOf("*")));
+ this.endRow = new byte[this.row.length + 1];
+ System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
+ this.endRow[this.row.length] = (byte)255;
+ } else {
+ this.row = Bytes.toBytes(startRow.toString());
+ if (endRow != null) {
+ this.endRow = Bytes.toBytes(endRow.toString());
+ }
+ }
+ return i;
+ }
+
+ private int parseColumns(final String path, int i) throws IllegalArgumentException {
+ if (i >= path.length()) {
+ return i;
+ }
+ try {
+ char c;
+ StringBuilder column = new StringBuilder();
+ while (i < path.length() && (c = path.charAt(i)) != '/') {
+ if (c == ',') {
+ if (column.length() < 1) {
+ throw new IllegalArgumentException("invalid path");
+ }
+ String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING);
+ this.columns.add(Bytes.toBytes(s));
+ column.setLength(0);
+ i++;
+ continue;
+ }
+ column.append(c);
+ i++;
+ }
+ i++;
+ // trailing list entry
+ if (column.length() > 0) {
+ String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING);
+ this.columns.add(Bytes.toBytes(s));
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ } catch (UnsupportedEncodingException e) {
+ // shouldn't happen
+ throw new RuntimeException(e);
+ }
+ return i;
+ }
+
+ private int parseTimestamp(final String path, int i)
+ throws IllegalArgumentException {
+ if (i >= path.length()) {
+ return i;
+ }
+ long time0 = 0, time1 = 0;
+ try {
+ char c = 0;
+ StringBuilder stamp = new StringBuilder();
+ while (i < path.length()) {
+ c = path.charAt(i);
+ if (c == '/' || c == ',') {
+ break;
+ }
+ stamp.append(c);
+ i++;
+ }
+ try {
+ time0 = Long.valueOf(URLDecoder.decode(stamp.toString(),
+ HConstants.UTF8_ENCODING));
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException(e);
+ }
+ if (c == ',') {
+ stamp = new StringBuilder();
+ i++;
+ while (i < path.length() && ((c = path.charAt(i)) != '/')) {
+ stamp.append(c);
+ i++;
+ }
+ try {
+ time1 = Long.valueOf(URLDecoder.decode(stamp.toString(),
+ HConstants.UTF8_ENCODING));
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+ if (c == '/') {
+ i++;
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ } catch (UnsupportedEncodingException e) {
+ // shouldn't happen
+ throw new RuntimeException(e);
+ }
+ if (time1 != 0) {
+ startTime = time0;
+ endTime = time1;
+ } else {
+ endTime = time0;
+ }
+ return i;
+ }
+
+ private int parseQueryParams(final String path, int i) {
+ if (i >= path.length()) {
+ return i;
+ }
+ StringBuilder query = new StringBuilder();
+ try {
+ query.append(URLDecoder.decode(path.substring(i),
+ HConstants.UTF8_ENCODING));
+ } catch (UnsupportedEncodingException e) {
+ // should not happen
+ throw new RuntimeException(e);
+ }
+ i += query.length();
+ int j = 0;
+ while (j < query.length()) {
+ char c = query.charAt(j);
+ if (c != '?' && c != '&') {
+ break;
+ }
+ if (++j > query.length()) {
+ throw new IllegalArgumentException("malformed query parameter");
+ }
+ char what = query.charAt(j);
+ if (++j > query.length()) {
+ break;
+ }
+ c = query.charAt(j);
+ if (c != '=') {
+ throw new IllegalArgumentException("malformed query parameter");
+ }
+ if (++j > query.length()) {
+ break;
+ }
+ switch (what) {
+ case 'm': {
+ StringBuilder sb = new StringBuilder();
+ while (j <= query.length()) {
+ c = query.charAt(j);
+ if (c < '0' || c > '9') {
+ j--;
+ break;
+ }
+ sb.append(c);
+ }
+ maxVersions = Integer.valueOf(sb.toString());
+ } break;
+ case 'n': {
+ StringBuilder sb = new StringBuilder();
+ while (j <= query.length()) {
+ c = query.charAt(j);
+ if (c < '0' || c > '9') {
+ j--;
+ break;
+ }
+ sb.append(c);
+ }
+ maxValues = Integer.valueOf(sb.toString());
+ } break;
+ default:
+ throw new IllegalArgumentException("unknown parameter '" + c + "'");
+ }
+ }
+ return i;
+ }
+
+ public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
+ long startTime, long endTime, int maxVersions) {
+ this.row = startRow;
+ this.endRow = endRow;
+ if (columns != null) {
+ Collections.addAll(this.columns, columns);
+ }
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ }
+
+ public RowSpec(byte[] startRow, byte[] endRow, Collection<byte[]> columns,
+ long startTime, long endTime, int maxVersions, Collection<String> labels) {
+ this(startRow, endRow, columns, startTime, endTime, maxVersions);
+ if(labels != null) {
+ this.labels.addAll(labels);
+ }
+ }
+ public RowSpec(byte[] startRow, byte[] endRow, Collection<byte[]> columns,
+ long startTime, long endTime, int maxVersions) {
+ this.row = startRow;
+ this.endRow = endRow;
+ if (columns != null) {
+ this.columns.addAll(columns);
+ }
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ }
+
+ public boolean isSingleRow() {
+ return endRow == null;
+ }
+
+ public int getMaxVersions() {
+ return maxVersions;
+ }
+
+ public void setMaxVersions(final int maxVersions) {
+ this.maxVersions = maxVersions;
+ }
+
+ public int getMaxValues() {
+ return maxValues;
+ }
+
+ public void setMaxValues(final int maxValues) {
+ this.maxValues = maxValues;
+ }
+
+ public boolean hasColumns() {
+ return !columns.isEmpty();
+ }
+
+ public boolean hasLabels() {
+ return !labels.isEmpty();
+ }
+
+ public byte[] getRow() {
+ return row;
+ }
+
+ public byte[] getStartRow() {
+ return row;
+ }
+
+ public boolean hasEndRow() {
+ return endRow != null;
+ }
+
+ public byte[] getEndRow() {
+ return endRow;
+ }
+
+ public void addColumn(final byte[] column) {
+ columns.add(column);
+ }
+
+ public byte[][] getColumns() {
+ return columns.toArray(new byte[columns.size()][]);
+ }
+
+ public List<String> getLabels() {
+ return labels;
+ }
+
+ public boolean hasTimestamp() {
+ return (startTime == 0) && (endTime != Long.MAX_VALUE);
+ }
+
+ public long getTimestamp() {
+ return endTime;
+ }
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ public void setStartTime(final long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long getEndTime() {
+ return endTime;
+ }
+
+ public void setEndTime(long endTime) {
+ this.endTime = endTime;
+ }
+
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("{startRow => '");
+ if (row != null) {
+ result.append(Bytes.toString(row));
+ }
+ result.append("', endRow => '");
+ if (endRow != null) {
+ result.append(Bytes.toString(endRow));
+ }
+ result.append("', columns => [");
+ for (byte[] col: columns) {
+ result.append(" '");
+ result.append(Bytes.toString(col));
+ result.append("'");
+ }
+ result.append(" ], startTime => ");
+ result.append(Long.toString(startTime));
+ result.append(", endTime => ");
+ result.append(Long.toString(endTime));
+ result.append(", maxVersions => ");
+ result.append(Integer.toString(maxVersions));
+ result.append(", maxValues => ");
+ result.append(Integer.toString(maxValues));
+ result.append("}");
+ return result.toString();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
new file mode 100644
index 0000000..ffb2fae
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
@@ -0,0 +1,201 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public class ScannerInstanceResource extends ResourceBase {
+ private static final Log LOG =
+ LogFactory.getLog(ScannerInstanceResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ ResultGenerator generator = null;
+ String id = null;
+ int batch = 1;
+
+ public ScannerInstanceResource() throws IOException { }
+
+ public ScannerInstanceResource(String table, String id,
+ ResultGenerator generator, int batch) throws IOException {
+ this.id = id;
+ this.generator = generator;
+ this.batch = batch;
+ }
+
+ @GET
+ @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo,
+ @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ if (generator == null) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ }
+ CellSetModel model = new CellSetModel();
+ RowModel rowModel = null;
+ byte[] rowKey = null;
+ int limit = batch;
+ if (maxValues > 0) {
+ limit = maxValues;
+ }
+ int count = limit;
+ do {
+ Cell value = null;
+ try {
+ value = generator.next();
+ } catch (IllegalStateException e) {
+ if (ScannerResource.delete(id)) {
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ } else {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ }
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.GONE)
+ .type(MIMETYPE_TEXT).entity("Gone" + CRLF)
+ .build();
+ }
+ if (value == null) {
+ LOG.info("generator exhausted");
+ // respond with 204 (No Content) if an empty cell set would be
+ // returned
+ if (count == limit) {
+ return Response.noContent().build();
+ }
+ break;
+ }
+ if (rowKey == null) {
+ rowKey = CellUtil.cloneRow(value);
+ rowModel = new RowModel(rowKey);
+ }
+ if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
+ // if maxRows was given as a query param, stop if we would exceed the
+ // specified number of rows
+ if (maxRows > 0) {
+ if (--maxRows == 0) {
+ generator.putBack(value);
+ break;
+ }
+ }
+ model.addRow(rowModel);
+ rowKey = CellUtil.cloneRow(value);
+ rowModel = new RowModel(rowKey);
+ }
+ rowModel.addCell(
+ new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value),
+ value.getTimestamp(), CellUtil.cloneValue(value)));
+ } while (--count > 0);
+ model.addRow(rowModel);
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ }
+
+ @GET
+ @Produces(MIMETYPE_BINARY)
+ public Response getBinary(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
+ MIMETYPE_BINARY);
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ Cell value = generator.next();
+ if (value == null) {
+ LOG.info("generator exhausted");
+ return Response.noContent().build();
+ }
+ ResponseBuilder response = Response.ok(CellUtil.cloneValue(value));
+ response.cacheControl(cacheControl);
+ response.header("X-Row", Base64.encodeBytes(CellUtil.cloneRow(value)));
+ response.header("X-Column",
+ Base64.encodeBytes(
+ KeyValue.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))));
+ response.header("X-Timestamp", value.getTimestamp());
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (IllegalStateException e) {
+ if (ScannerResource.delete(id)) {
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ } else {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ }
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.GONE)
+ .type(MIMETYPE_TEXT).entity("Gone" + CRLF)
+ .build();
+ }
+ }
+
+ @DELETE
+ public Response delete(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ if (ScannerResource.delete(id)) {
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ } else {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ }
+ return Response.ok().build();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
new file mode 100644
index 0000000..6c424ce
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
@@ -0,0 +1,164 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+
+@InterfaceAudience.Private
+public class ScannerResource extends ResourceBase {
+
+ private static final Log LOG = LogFactory.getLog(ScannerResource.class);
+
+ static final Map<String,ScannerInstanceResource> scanners =
+ Collections.synchronizedMap(new HashMap<String,ScannerInstanceResource>());
+
+ TableResource tableResource;
+
+ /**
+ * Constructor
+ * @param tableResource
+ * @throws IOException
+ */
+ public ScannerResource(TableResource tableResource)throws IOException {
+ super();
+ this.tableResource = tableResource;
+ }
+
+ static boolean delete(final String id) {
+ ScannerInstanceResource instance = scanners.remove(id);
+ if (instance != null) {
+ instance.generator.close();
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ Response update(final ScannerModel model, final boolean replace,
+ final UriInfo uriInfo) {
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
+ RowSpec spec = null;
+ if (model.getLabels() != null) {
+ spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(),
+ model.getEndTime(), model.getMaxVersions(), model.getLabels());
+ } else {
+ spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(),
+ model.getEndTime(), model.getMaxVersions());
+ }
+ MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
+
+ try {
+ Filter filter = ScannerResultGenerator.buildFilterFromModel(model);
+ String tableName = tableResource.getName();
+ ScannerResultGenerator gen =
+ new ScannerResultGenerator(tableName, spec, filter, model.getCaching(),
+ model.getCacheBlocks());
+ String id = gen.getID();
+ ScannerInstanceResource instance =
+ new ScannerInstanceResource(tableName, id, gen, model.getBatch());
+ scanners.put(id, instance);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("new scanner: " + id);
+ }
+ UriBuilder builder = uriInfo.getAbsolutePathBuilder();
+ URI uri = builder.path(id).build();
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return Response.created(uri).build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ if (e instanceof TableNotFoundException) {
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ } else if (e instanceof RuntimeException) {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response put(final ScannerModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ return update(model, true, uriInfo);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response post(final ScannerModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("POST " + uriInfo.getAbsolutePath());
+ }
+ return update(model, false, uriInfo);
+ }
+
+ @Path("{scanner: .+}")
+ public ScannerInstanceResource getScannerInstanceResource(
+ final @PathParam("scanner") String id) throws IOException {
+ ScannerInstanceResource instance = scanners.get(id);
+ if (instance == null) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return new ScannerInstanceResource();
+ } else {
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ }
+ return instance;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
new file mode 100644
index 0000000..055c971
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
@@ -0,0 +1,191 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.security.visibility.Authorizations;
+import org.apache.hadoop.util.StringUtils;
+
+@InterfaceAudience.Private
+public class ScannerResultGenerator extends ResultGenerator {
+
+ private static final Log LOG =
+ LogFactory.getLog(ScannerResultGenerator.class);
+
+ public static Filter buildFilterFromModel(final ScannerModel model)
+ throws Exception {
+ String filter = model.getFilter();
+ if (filter == null || filter.length() == 0) {
+ return null;
+ }
+ return buildFilter(filter);
+ }
+
+ private String id;
+ private Iterator<Cell> rowI;
+ private Cell cache;
+ private ResultScanner scanner;
+ private Result cached;
+
+ public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
+ final Filter filter, final boolean cacheBlocks)
+ throws IllegalArgumentException, IOException {
+ this(tableName, rowspec, filter, -1, cacheBlocks);
+ }
+
+ public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
+ final Filter filter, final int caching, final boolean cacheBlocks)
+ throws IllegalArgumentException, IOException {
+ HTableInterface table = RESTServlet.getInstance().getTable(tableName);
+ try {
+ Scan scan;
+ if (rowspec.hasEndRow()) {
+ scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
+ } else {
+ scan = new Scan(rowspec.getStartRow());
+ }
+ if (rowspec.hasColumns()) {
+ byte[][] columns = rowspec.getColumns();
+ for (byte[] column: columns) {
+ byte[][] split = KeyValue.parseColumn(column);
+ if (split.length == 1) {
+ scan.addFamily(split[0]);
+ } else if (split.length == 2) {
+ scan.addColumn(split[0], split[1]);
+ } else {
+ throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
+ }
+ }
+ }
+ scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
+ scan.setMaxVersions(rowspec.getMaxVersions());
+ if (filter != null) {
+ scan.setFilter(filter);
+ }
+ if (caching > 0 ) {
+ scan.setCaching(caching);
+ }
+ scan.setCacheBlocks(cacheBlocks);
+ if (rowspec.hasLabels()) {
+ scan.setAuthorizations(new Authorizations(rowspec.getLabels()));
+ }
+ scanner = table.getScanner(scan);
+ cached = null;
+ id = Long.toString(System.currentTimeMillis()) +
+ Integer.toHexString(scanner.hashCode());
+ } finally {
+ table.close();
+ }
+ }
+
+ public String getID() {
+ return id;
+ }
+
+ public void close() {
+ if (scanner != null) {
+ scanner.close();
+ scanner = null;
+ }
+ }
+
+ public boolean hasNext() {
+ if (cache != null) {
+ return true;
+ }
+ if (rowI != null && rowI.hasNext()) {
+ return true;
+ }
+ if (cached != null) {
+ return true;
+ }
+ try {
+ Result result = scanner.next();
+ if (result != null && !result.isEmpty()) {
+ cached = result;
+ }
+ } catch (UnknownScannerException e) {
+ throw new IllegalArgumentException(e);
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ return cached != null;
+ }
+
+ public Cell next() {
+ if (cache != null) {
+ Cell kv = cache;
+ cache = null;
+ return kv;
+ }
+ boolean loop;
+ do {
+ loop = false;
+ if (rowI != null) {
+ if (rowI.hasNext()) {
+ return rowI.next();
+ } else {
+ rowI = null;
+ }
+ }
+ if (cached != null) {
+ rowI = cached.listCells().iterator();
+ loop = true;
+ cached = null;
+ } else {
+ Result result = null;
+ try {
+ result = scanner.next();
+ } catch (UnknownScannerException e) {
+ throw new IllegalArgumentException(e);
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ if (result != null && !result.isEmpty()) {
+ rowI = result.listCells().iterator();
+ loop = true;
+ }
+ }
+ } while (loop);
+ return null;
+ }
+
+ public void putBack(Cell kv) {
+ this.cache = kv;
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException("remove not supported");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
new file mode 100644
index 0000000..5de6b38
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
@@ -0,0 +1,246 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Map;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import javax.xml.namespace.QName;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public class SchemaResource extends ResourceBase {
+ private static final Log LOG = LogFactory.getLog(SchemaResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ TableResource tableResource;
+
+ /**
+ * Constructor
+ * @param tableResource
+ * @throws IOException
+ */
+ public SchemaResource(TableResource tableResource) throws IOException {
+ super();
+ this.tableResource = tableResource;
+ }
+
+ private HTableDescriptor getTableSchema() throws IOException,
+ TableNotFoundException {
+ HTableInterface table = servlet.getTable(tableResource.getName());
+ try {
+ return table.getTableDescriptor();
+ } finally {
+ table.close();
+ }
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ ResponseBuilder response =
+ Response.ok(new TableSchemaModel(getTableSchema()));
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return processException(e);
+ }
+ }
+
+ private Response replace(final byte[] name, final TableSchemaModel model,
+ final UriInfo uriInfo, final HBaseAdmin admin) {
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ try {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+ for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
+ htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ for (ColumnSchemaModel family: model.getColumns()) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
+ for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
+ hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ htd.addFamily(hcd);
+ }
+ if (admin.tableExists(name)) {
+ admin.disableTable(name);
+ admin.modifyTable(name, htd);
+ admin.enableTable(name);
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ } else try {
+ admin.createTable(htd);
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ } catch (TableExistsException e) {
+ // race, someone else created a table with the same name
+ return Response.status(Response.Status.NOT_MODIFIED)
+ .type(MIMETYPE_TEXT).entity("Not modified" + CRLF)
+ .build();
+ }
+ return Response.created(uriInfo.getAbsolutePath()).build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ }
+ }
+
+ private Response update(final byte[] name, final TableSchemaModel model,
+ final UriInfo uriInfo, final HBaseAdmin admin) {
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ try {
+ HTableDescriptor htd = admin.getTableDescriptor(name);
+ admin.disableTable(name);
+ try {
+ for (ColumnSchemaModel family: model.getColumns()) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
+ for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
+ hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ if (htd.hasFamily(hcd.getName())) {
+ admin.modifyColumn(name, hcd);
+ } else {
+ admin.addColumn(name, hcd);
+ }
+ }
+ } catch (IOException e) {
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ } finally {
+ admin.enableTable(tableResource.getName());
+ }
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return Response.ok().build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ }
+ }
+
+ private Response update(final TableSchemaModel model, final boolean replace,
+ final UriInfo uriInfo) {
+ try {
+ byte[] name = Bytes.toBytes(tableResource.getName());
+ HBaseAdmin admin = servlet.getAdmin();
+ if (replace || !admin.tableExists(name)) {
+ return replace(name, model, uriInfo, admin);
+ } else {
+ return update(name, model, uriInfo, admin);
+ }
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response put(final TableSchemaModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ return update(model, true, uriInfo);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response post(final TableSchemaModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ return update(model, false, uriInfo);
+ }
+
+ @DELETE
+ public Response delete(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT)
+ .entity("Forbidden" + CRLF).build();
+ }
+ try {
+ HBaseAdmin admin = servlet.getAdmin();
+ try {
+ admin.disableTable(tableResource.getName());
+ } catch (TableNotEnabledException e) { /* this is what we want anyway */ }
+ admin.deleteTable(tableResource.getName());
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ return Response.ok().build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return processException(e);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
new file mode 100644
index 0000000..a7e52bd
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -0,0 +1,109 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
+
+@InterfaceAudience.Private
+public class StorageClusterStatusResource extends ResourceBase {
+ private static final Log LOG =
+ LogFactory.getLog(StorageClusterStatusResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ /**
+ * Constructor
+ * @throws IOException
+ */
+ public StorageClusterStatusResource() throws IOException {
+ super();
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ ClusterStatus status = servlet.getAdmin().getClusterStatus();
+ StorageClusterStatusModel model = new StorageClusterStatusModel();
+ model.setRegions(status.getRegionsCount());
+ model.setRequests(status.getRequestsCount());
+ model.setAverageLoad(status.getAverageLoad());
+ for (ServerName info: status.getServers()) {
+ ServerLoad load = status.getLoad(info);
+ StorageClusterStatusModel.Node node =
+ model.addLiveNode(
+ info.getHostname() + ":" +
+ Integer.toString(info.getPort()),
+ info.getStartcode(), load.getUsedHeapMB(),
+ load.getMaxHeapMB());
+ node.setRequests(load.getNumberOfRequests());
+ for (RegionLoad region: load.getRegionsLoad().values()) {
+ node.addRegion(region.getName(), region.getStores(),
+ region.getStorefiles(), region.getStorefileSizeMB(),
+ region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB(),
+ region.getReadRequestsCount(), region.getWriteRequestsCount(),
+ region.getRootIndexSizeKB(), region.getTotalStaticIndexSizeKB(),
+ region.getTotalStaticBloomSizeKB(), region.getTotalCompactingKVs(),
+ region.getCurrentCompactedKVs());
+ }
+ }
+ for (ServerName name: status.getDeadServerNames()) {
+ model.addDeadNode(name.toString());
+ }
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
new file mode 100644
index 0000000..85e81f8
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
@@ -0,0 +1,79 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
+
+@InterfaceAudience.Private
+public class StorageClusterVersionResource extends ResourceBase {
+ private static final Log LOG =
+ LogFactory.getLog(StorageClusterVersionResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ /**
+ * Constructor
+ * @throws IOException
+ */
+ public StorageClusterVersionResource() throws IOException {
+ super();
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ StorageClusterVersionModel model = new StorageClusterVersionModel();
+ model.setVersion(servlet.getAdmin().getClusterStatus().getHBaseVersion());
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
new file mode 100644
index 0000000..c458cfa
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
@@ -0,0 +1,180 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.List;
+
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.Encoded;
+import javax.ws.rs.HeaderParam;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public class TableResource extends ResourceBase {
+
+ String table;
+ private static final Log LOG = LogFactory.getLog(TableResource.class);
+
+ /**
+ * Constructor
+ * @param table
+ * @throws IOException
+ */
+ public TableResource(String table) throws IOException {
+ super();
+ this.table = table;
+ }
+
+ /** @return the table name */
+ String getName() {
+ return table;
+ }
+
+ /**
+ * @return true if the table exists
+ * @throws IOException
+ */
+ boolean exists() throws IOException {
+ return servlet.getAdmin().tableExists(table);
+ }
+
+ @Path("exists")
+ public ExistsResource getExistsResource() throws IOException {
+ return new ExistsResource(this);
+ }
+
+ @Path("regions")
+ public RegionsResource getRegionsResource() throws IOException {
+ return new RegionsResource(this);
+ }
+
+ @Path("scanner")
+ public ScannerResource getScannerResource() throws IOException {
+ return new ScannerResource(this);
+ }
+
+ @Path("schema")
+ public SchemaResource getSchemaResource() throws IOException {
+ return new SchemaResource(this);
+ }
+
+ @Path("multiget")
+ public MultiRowResource getMultipleRowResource(
+ final @QueryParam("v") String versions) throws IOException {
+ return new MultiRowResource(this, versions);
+ }
+
+ @Path("{rowspec: [^*]+}")
+ public RowResource getRowResource(
+ // We need the @Encoded decorator so Jersey won't urldecode before
+ // the RowSpec constructor has a chance to parse
+ final @PathParam("rowspec") @Encoded String rowspec,
+ final @QueryParam("v") String versions,
+ final @QueryParam("check") String check) throws IOException {
+ return new RowResource(this, rowspec, versions, check);
+ }
+
+ @Path("{suffixglobbingspec: .*\\*/.+}")
+ public RowResource getRowResourceWithSuffixGlobbing(
+ // We need the @Encoded decorator so Jersey won't urldecode before
+ // the RowSpec constructor has a chance to parse
+ final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec,
+ final @QueryParam("v") String versions,
+ final @QueryParam("check") String check) throws IOException {
+ return new RowResource(this, suffixglobbingspec, versions, check);
+ }
+
+ @Path("{scanspec: .*[*]$}")
+ public TableScanResource getScanResource(
+ final @Context UriInfo uriInfo,
+ final @PathParam("scanspec") String scanSpec,
+ final @HeaderParam("Accept") String contentType,
+ @DefaultValue(Integer.MAX_VALUE + "")
+ @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit,
+ @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow,
+ @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow,
+ @DefaultValue("") @QueryParam(Constants.SCAN_COLUMN) List<String> column,
+ @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions,
+ @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize,
+ @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime,
+ @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime,
+ @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks) {
+ try {
+ Filter filter = null;
+ if (scanSpec.indexOf('*') > 0) {
+ String prefix = scanSpec.substring(0, scanSpec.indexOf('*'));
+ filter = new PrefixFilter(Bytes.toBytes(prefix));
+ }
+ LOG.debug("Query parameters : Table Name = > " + this.table + " Start Row => " + startRow
+ + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime
+ + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => "
+ + maxVersions + " Batch Size => " + batchSize);
+ HTableInterface hTable = RESTServlet.getInstance().getTable(this.table);
+ Scan tableScan = new Scan();
+ tableScan.setBatch(batchSize);
+ tableScan.setMaxVersions(maxVersions);
+ tableScan.setTimeRange(startTime, endTime);
+ tableScan.setStartRow(Bytes.toBytes(startRow));
+ tableScan.setStopRow(Bytes.toBytes(endRow));
+ for (String csplit : column) {
+ String[] familysplit = csplit.trim().split(":");
+ if (familysplit.length == 2) {
+ if (familysplit[1].length() > 0) {
+ LOG.debug("Scan family and column : " + familysplit[0] + " " + familysplit[1]);
+ tableScan.addColumn(Bytes.toBytes(familysplit[0]), Bytes.toBytes(familysplit[1]));
+ } else {
+ tableScan.addFamily(Bytes.toBytes(familysplit[0]));
+ LOG.debug("Scan family : " + familysplit[0] + " and empty qualifier.");
+ tableScan.addColumn(Bytes.toBytes(familysplit[0]), null);
+ }
+ } else if (StringUtils.isNotEmpty(familysplit[0])){
+ LOG.debug("Scan family : " + familysplit[0]);
+ tableScan.addFamily(Bytes.toBytes(familysplit[0]));
+ }
+ }
+ if (filter != null) {
+ tableScan.setFilter(filter);
+ }
+ int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
+ tableScan.setCaching(fetchSize);
+ return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit);
+ } catch (Exception exp) {
+ servlet.getMetrics().incrementFailedScanRequests(1);
+ processException(exp);
+ LOG.warn(exp);
+ return null;
+ }
+ }
+}
[24/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java
new file mode 100644
index 0000000..ef53f46
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class DummyFilter implements Filter {
+ private Log LOG = LogFactory.getLog(getClass());
+
+ @Override
+ public void destroy() {
+ }
+
+ @Override
+ public void doFilter(ServletRequest paramServletRequest, ServletResponse paramServletResponse,
+ FilterChain paramFilterChain) throws IOException, ServletException {
+ if (paramServletRequest instanceof HttpServletRequest
+ && paramServletResponse instanceof HttpServletResponse) {
+ HttpServletRequest request = (HttpServletRequest) paramServletRequest;
+ HttpServletResponse response = (HttpServletResponse) paramServletResponse;
+
+ String path = request.getRequestURI();
+ LOG.info(path);
+ if (path.indexOf("/status/cluster") >= 0) {
+ LOG.info("Blocking cluster status request");
+ response.sendError(HttpServletResponse.SC_NOT_FOUND, "Cluster status cannot be requested.");
+ } else {
+ paramFilterChain.doFilter(request, response);
+ }
+ }
+ }
+
+ @Override
+ public void init(FilterConfig filterChain) throws ServletException {
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java
new file mode 100644
index 0000000..8a399e9
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java
@@ -0,0 +1,98 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.util.HttpServerUtil;
+import org.apache.hadoop.util.StringUtils;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.servlet.Context;
+import org.mortbay.jetty.servlet.ServletHolder;
+
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
+public class HBaseRESTTestingUtility {
+
+ static final Log LOG = LogFactory.getLog(HBaseRESTTestingUtility.class);
+
+ private int testServletPort;
+ private Server server;
+
+ public int getServletPort() {
+ return testServletPort;
+ }
+
+ public void startServletContainer(Configuration conf) throws Exception {
+ if (server != null) {
+ LOG.error("ServletContainer already running");
+ return;
+ }
+
+ // Inject the conf for the test by being first to make singleton
+ RESTServlet.getInstance(conf, UserProvider.instantiate(conf));
+
+ // set up the Jersey servlet container for Jetty
+ ServletHolder sh = new ServletHolder(ServletContainer.class);
+ sh.setInitParameter(
+ "com.sun.jersey.config.property.resourceConfigClass",
+ ResourceConfig.class.getCanonicalName());
+ sh.setInitParameter("com.sun.jersey.config.property.packages",
+ "jetty");
+
+ LOG.info("configured " + ServletContainer.class.getName());
+
+ // set up Jetty and run the embedded server
+ server = new Server(0);
+ server.setSendServerVersion(false);
+ server.setSendDateHeader(false);
+ // set up context
+ Context context = new Context(server, "/", Context.SESSIONS);
+ context.addServlet(sh, "/*");
+ // Load filters specified from configuration.
+ String[] filterClasses = conf.getStrings(Constants.FILTER_CLASSES,
+ ArrayUtils.EMPTY_STRING_ARRAY);
+ for (String filter : filterClasses) {
+ filter = filter.trim();
+ context.addFilter(Class.forName(filter), "/*", 0);
+ }
+ HttpServerUtil.constrainHttpMethods(context);
+ LOG.info("Loaded filter classes :" + filterClasses);
+ // start the server
+ server.start();
+ // get the port
+ testServletPort = server.getConnectors()[0].getLocalPort();
+
+ LOG.info("started " + server.getClass().getName() + " on port " +
+ testServletPort);
+ }
+
+ public void shutdownServletContainer() {
+ if (server != null) try {
+ server.stop();
+ server = null;
+ RESTServlet.stop();
+ } catch (Exception e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
new file mode 100644
index 0000000..bd65bc4
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
@@ -0,0 +1,1520 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.lang.reflect.Constructor;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.TreeMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.WhileMatchFilter;
+import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.RemoteAdmin;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Hash;
+import org.apache.hadoop.hbase.util.MurmurHash;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
+import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
+import org.apache.hadoop.util.LineReader;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * Script used evaluating Stargate performance and scalability. Runs a SG
+ * client that steps through one of a set of hardcoded tests or 'experiments'
+ * (e.g. a random reads test, a random writes test, etc.). Pass on the
+ * command-line which test to run and how many clients are participating in
+ * this experiment. Run <code>java PerformanceEvaluation --help</code> to
+ * obtain usage.
+ *
+ * <p>This class sets up and runs the evaluation programs described in
+ * Section 7, <i>Performance Evaluation</i>, of the <a
+ * href="http://labs.google.com/papers/bigtable.html">Bigtable</a>
+ * paper, pages 8-10.
+ *
+ * <p>If number of clients > 1, we start up a MapReduce job. Each map task
+ * runs an individual client. Each client does about 1GB of data.
+ */
+public class PerformanceEvaluation extends Configured implements Tool {
+ protected static final Log LOG = LogFactory.getLog(PerformanceEvaluation.class.getName());
+
+ private static final int DEFAULT_ROW_PREFIX_LENGTH = 16;
+ private static final int ROW_LENGTH = 1000;
+ private static final int TAG_LENGTH = 256;
+ private static final int ONE_GB = 1024 * 1024 * 1000;
+ private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH;
+
+ public static final TableName TABLE_NAME = TableName.valueOf("TestTable");
+ public static final byte [] FAMILY_NAME = Bytes.toBytes("info");
+ public static final byte [] QUALIFIER_NAME = Bytes.toBytes("data");
+ private TableName tableName = TABLE_NAME;
+
+ protected HTableDescriptor TABLE_DESCRIPTOR;
+ protected Map<String, CmdDescriptor> commands = new TreeMap<String, CmdDescriptor>();
+ protected static Cluster cluster = new Cluster();
+
+ volatile Configuration conf;
+ private boolean nomapred = false;
+ private int N = 1;
+ private int R = ROWS_PER_GB;
+ private Compression.Algorithm compression = Compression.Algorithm.NONE;
+ private DataBlockEncoding blockEncoding = DataBlockEncoding.NONE;
+ private boolean flushCommits = true;
+ private boolean writeToWAL = true;
+ private boolean inMemoryCF = false;
+ private int presplitRegions = 0;
+ private boolean useTags = false;
+ private int noOfTags = 1;
+ private HConnection connection;
+
+ private static final Path PERF_EVAL_DIR = new Path("performance_evaluation");
+ /**
+ * Regex to parse lines in input file passed to mapreduce task.
+ */
+ public static final Pattern LINE_PATTERN =
+ Pattern.compile("tableName=(\\w+),\\s+" +
+ "startRow=(\\d+),\\s+" +
+ "perClientRunRows=(\\d+),\\s+" +
+ "totalRows=(\\d+),\\s+" +
+ "clients=(\\d+),\\s+" +
+ "flushCommits=(\\w+),\\s+" +
+ "writeToWAL=(\\w+),\\s+" +
+ "useTags=(\\w+),\\s+" +
+ "noOfTags=(\\d+)");
+
+ /**
+ * Enum for map metrics. Keep it out here rather than inside in the Map
+ * inner-class so we can find associated properties.
+ */
+ protected static enum Counter {
+ /** elapsed time */
+ ELAPSED_TIME,
+ /** number of rows */
+ ROWS}
+
+ /**
+ * Constructor
+ * @param c Configuration object
+ */
+ public PerformanceEvaluation(final Configuration c) {
+ this.conf = c;
+
+ addCommandDescriptor(RandomReadTest.class, "randomRead",
+ "Run random read test");
+ addCommandDescriptor(RandomSeekScanTest.class, "randomSeekScan",
+ "Run random seek and scan 100 test");
+ addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
+ "Run random seek scan with both start and stop row (max 10 rows)");
+ addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
+ "Run random seek scan with both start and stop row (max 100 rows)");
+ addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
+ "Run random seek scan with both start and stop row (max 1000 rows)");
+ addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000",
+ "Run random seek scan with both start and stop row (max 10000 rows)");
+ addCommandDescriptor(RandomWriteTest.class, "randomWrite",
+ "Run random write test");
+ addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
+ "Run sequential read test");
+ addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
+ "Run sequential write test");
+ addCommandDescriptor(ScanTest.class, "scan",
+ "Run scan test (read every row)");
+ addCommandDescriptor(FilteredScanTest.class, "filterScan",
+ "Run scan test using a filter to find a specific row based on it's value (make sure to use --rows=20)");
+ }
+
+ protected void addCommandDescriptor(Class<? extends Test> cmdClass,
+ String name, String description) {
+ CmdDescriptor cmdDescriptor =
+ new CmdDescriptor(cmdClass, name, description);
+ commands.put(name, cmdDescriptor);
+ }
+
+ /**
+ * Implementations can have their status set.
+ */
+ interface Status {
+ /**
+ * Sets status
+ * @param msg status message
+ * @throws IOException
+ */
+ void setStatus(final String msg) throws IOException;
+ }
+
+ /**
+ * This class works as the InputSplit of Performance Evaluation
+ * MapReduce InputFormat, and the Record Value of RecordReader.
+ * Each map task will only read one record from a PeInputSplit,
+ * the record value is the PeInputSplit itself.
+ */
+ public static class PeInputSplit extends InputSplit implements Writable {
+ private TableName tableName = TABLE_NAME;
+ private int startRow = 0;
+ private int rows = 0;
+ private int totalRows = 0;
+ private int clients = 0;
+ private boolean flushCommits = false;
+ private boolean writeToWAL = true;
+ private boolean useTags = false;
+ private int noOfTags = 0;
+
+ public PeInputSplit() {
+ }
+
+ public PeInputSplit(TableName tableName, int startRow, int rows, int totalRows, int clients,
+ boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags) {
+ this.tableName = tableName;
+ this.startRow = startRow;
+ this.rows = rows;
+ this.totalRows = totalRows;
+ this.clients = clients;
+ this.flushCommits = flushCommits;
+ this.writeToWAL = writeToWAL;
+ this.useTags = useTags;
+ this.noOfTags = noOfTags;
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ int tableNameLen = in.readInt();
+ byte[] name = new byte[tableNameLen];
+ in.readFully(name);
+ this.tableName = TableName.valueOf(name);
+ this.startRow = in.readInt();
+ this.rows = in.readInt();
+ this.totalRows = in.readInt();
+ this.clients = in.readInt();
+ this.flushCommits = in.readBoolean();
+ this.writeToWAL = in.readBoolean();
+ this.useTags = in.readBoolean();
+ this.noOfTags = in.readInt();
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ byte[] name = this.tableName.toBytes();
+ out.writeInt(name.length);
+ out.write(name);
+ out.writeInt(startRow);
+ out.writeInt(rows);
+ out.writeInt(totalRows);
+ out.writeInt(clients);
+ out.writeBoolean(flushCommits);
+ out.writeBoolean(writeToWAL);
+ out.writeBoolean(useTags);
+ out.writeInt(noOfTags);
+ }
+
+ @Override
+ public long getLength() throws IOException, InterruptedException {
+ return 0;
+ }
+
+ @Override
+ public String[] getLocations() throws IOException, InterruptedException {
+ return new String[0];
+ }
+
+ public int getStartRow() {
+ return startRow;
+ }
+
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ public int getRows() {
+ return rows;
+ }
+
+ public int getTotalRows() {
+ return totalRows;
+ }
+
+ public int getClients() {
+ return clients;
+ }
+
+ public boolean isFlushCommits() {
+ return flushCommits;
+ }
+
+ public boolean isWriteToWAL() {
+ return writeToWAL;
+ }
+
+ public boolean isUseTags() {
+ return useTags;
+ }
+
+ public int getNoOfTags() {
+ return noOfTags;
+ }
+ }
+
+ /**
+ * InputFormat of Performance Evaluation MapReduce job.
+ * It extends from FileInputFormat, want to use it's methods such as setInputPaths().
+ */
+ public static class PeInputFormat extends FileInputFormat<NullWritable, PeInputSplit> {
+
+ @Override
+ public List<InputSplit> getSplits(JobContext job) throws IOException {
+ // generate splits
+ List<InputSplit> splitList = new ArrayList<InputSplit>();
+
+ for (FileStatus file: listStatus(job)) {
+ if (file.isDir()) {
+ continue;
+ }
+ Path path = file.getPath();
+ FileSystem fs = path.getFileSystem(job.getConfiguration());
+ FSDataInputStream fileIn = fs.open(path);
+ LineReader in = new LineReader(fileIn, job.getConfiguration());
+ int lineLen = 0;
+ while(true) {
+ Text lineText = new Text();
+ lineLen = in.readLine(lineText);
+ if(lineLen <= 0) {
+ break;
+ }
+ Matcher m = LINE_PATTERN.matcher(lineText.toString());
+ if((m != null) && m.matches()) {
+ TableName tableName = TableName.valueOf(m.group(1));
+ int startRow = Integer.parseInt(m.group(2));
+ int rows = Integer.parseInt(m.group(3));
+ int totalRows = Integer.parseInt(m.group(4));
+ int clients = Integer.parseInt(m.group(5));
+ boolean flushCommits = Boolean.parseBoolean(m.group(6));
+ boolean writeToWAL = Boolean.parseBoolean(m.group(7));
+ boolean useTags = Boolean.parseBoolean(m.group(8));
+ int noOfTags = Integer.parseInt(m.group(9));
+
+ LOG.debug("tableName=" + tableName +
+ " split["+ splitList.size() + "] " +
+ " startRow=" + startRow +
+ " rows=" + rows +
+ " totalRows=" + totalRows +
+ " clients=" + clients +
+ " flushCommits=" + flushCommits +
+ " writeToWAL=" + writeToWAL +
+ " useTags=" + useTags +
+ " noOfTags=" + noOfTags);
+
+ PeInputSplit newSplit =
+ new PeInputSplit(tableName, startRow, rows, totalRows, clients,
+ flushCommits, writeToWAL, useTags, noOfTags);
+ splitList.add(newSplit);
+ }
+ }
+ in.close();
+ }
+
+ LOG.info("Total # of splits: " + splitList.size());
+ return splitList;
+ }
+
+ @Override
+ public RecordReader<NullWritable, PeInputSplit> createRecordReader(InputSplit split,
+ TaskAttemptContext context) {
+ return new PeRecordReader();
+ }
+
+ public static class PeRecordReader extends RecordReader<NullWritable, PeInputSplit> {
+ private boolean readOver = false;
+ private PeInputSplit split = null;
+ private NullWritable key = null;
+ private PeInputSplit value = null;
+
+ @Override
+ public void initialize(InputSplit split, TaskAttemptContext context)
+ throws IOException, InterruptedException {
+ this.readOver = false;
+ this.split = (PeInputSplit)split;
+ }
+
+ @Override
+ public boolean nextKeyValue() throws IOException, InterruptedException {
+ if(readOver) {
+ return false;
+ }
+
+ key = NullWritable.get();
+ value = (PeInputSplit)split;
+
+ readOver = true;
+ return true;
+ }
+
+ @Override
+ public NullWritable getCurrentKey() throws IOException, InterruptedException {
+ return key;
+ }
+
+ @Override
+ public PeInputSplit getCurrentValue() throws IOException, InterruptedException {
+ return value;
+ }
+
+ @Override
+ public float getProgress() throws IOException, InterruptedException {
+ if(readOver) {
+ return 1.0f;
+ } else {
+ return 0.0f;
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ // do nothing
+ }
+ }
+ }
+
+ /**
+ * MapReduce job that runs a performance evaluation client in each map task.
+ */
+ public static class EvaluationMapTask
+ extends Mapper<NullWritable, PeInputSplit, LongWritable, LongWritable> {
+
+ /** configuration parameter name that contains the command */
+ public final static String CMD_KEY = "EvaluationMapTask.command";
+ /** configuration parameter name that contains the PE impl */
+ public static final String PE_KEY = "EvaluationMapTask.performanceEvalImpl";
+
+ private Class<? extends Test> cmd;
+ private PerformanceEvaluation pe;
+
+ @Override
+ protected void setup(Context context) throws IOException, InterruptedException {
+ this.cmd = forName(context.getConfiguration().get(CMD_KEY), Test.class);
+
+ // this is required so that extensions of PE are instantiated within the
+ // map reduce task...
+ Class<? extends PerformanceEvaluation> peClass =
+ forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class);
+ try {
+ this.pe = peClass.getConstructor(Configuration.class)
+ .newInstance(context.getConfiguration());
+ } catch (Exception e) {
+ throw new IllegalStateException("Could not instantiate PE instance", e);
+ }
+ }
+
+ private <Type> Class<? extends Type> forName(String className, Class<Type> type) {
+ Class<? extends Type> clazz = null;
+ try {
+ clazz = Class.forName(className).asSubclass(type);
+ } catch (ClassNotFoundException e) {
+ throw new IllegalStateException("Could not find class for name: " + className, e);
+ }
+ return clazz;
+ }
+
+ protected void map(NullWritable key, PeInputSplit value, final Context context)
+ throws IOException, InterruptedException {
+
+ Status status = new Status() {
+ public void setStatus(String msg) {
+ context.setStatus(msg);
+ }
+ };
+
+ // Evaluation task
+ pe.tableName = value.getTableName();
+ long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(),
+ value.getRows(), value.getTotalRows(),
+ value.isFlushCommits(), value.isWriteToWAL(),
+ value.isUseTags(), value.getNoOfTags(),
+ HConnectionManager.createConnection(context.getConfiguration()), status);
+ // Collect how much time the thing took. Report as map output and
+ // to the ELAPSED_TIME counter.
+ context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime);
+ context.getCounter(Counter.ROWS).increment(value.rows);
+ context.write(new LongWritable(value.startRow), new LongWritable(elapsedTime));
+ context.progress();
+ }
+ }
+
+ /*
+ * If table does not already exist, create.
+ * @param c Client to use checking.
+ * @return True if we created the table.
+ * @throws IOException
+ */
+ private boolean checkTable(RemoteAdmin admin) throws IOException {
+ HTableDescriptor tableDescriptor = getTableDescriptor();
+ if (this.presplitRegions > 0) {
+ // presplit requested
+ if (admin.isTableAvailable(tableDescriptor.getTableName().getName())) {
+ admin.deleteTable(tableDescriptor.getTableName().getName());
+ }
+
+ byte[][] splits = getSplits();
+ for (int i=0; i < splits.length; i++) {
+ LOG.debug(" split " + i + ": " + Bytes.toStringBinary(splits[i]));
+ }
+ admin.createTable(tableDescriptor);
+ LOG.info ("Table created with " + this.presplitRegions + " splits");
+ } else {
+ boolean tableExists = admin.isTableAvailable(tableDescriptor.getTableName().getName());
+ if (!tableExists) {
+ admin.createTable(tableDescriptor);
+ LOG.info("Table " + tableDescriptor + " created");
+ }
+ }
+ boolean tableExists = admin.isTableAvailable(tableDescriptor.getTableName().getName());
+ return tableExists;
+ }
+
+ protected HTableDescriptor getTableDescriptor() {
+ if (TABLE_DESCRIPTOR == null) {
+ TABLE_DESCRIPTOR = new HTableDescriptor(tableName);
+ HColumnDescriptor family = new HColumnDescriptor(FAMILY_NAME);
+ family.setDataBlockEncoding(blockEncoding);
+ family.setCompressionType(compression);
+ if (inMemoryCF) {
+ family.setInMemory(true);
+ }
+ TABLE_DESCRIPTOR.addFamily(family);
+ }
+ return TABLE_DESCRIPTOR;
+ }
+
+ /**
+ * Generates splits based on total number of rows and specified split regions
+ *
+ * @return splits : array of byte []
+ */
+ protected byte[][] getSplits() {
+ if (this.presplitRegions == 0)
+ return new byte [0][];
+
+ int numSplitPoints = presplitRegions - 1;
+ byte[][] splits = new byte[numSplitPoints][];
+ int jump = this.R / this.presplitRegions;
+ for (int i=0; i < numSplitPoints; i++) {
+ int rowkey = jump * (1 + i);
+ splits[i] = format(rowkey);
+ }
+ return splits;
+ }
+
+ /*
+ * We're to run multiple clients concurrently. Setup a mapreduce job. Run
+ * one map per client. Then run a single reduce to sum the elapsed times.
+ * @param cmd Command to run.
+ * @throws IOException
+ */
+ private void runNIsMoreThanOne(final Class<? extends Test> cmd)
+ throws IOException, InterruptedException, ClassNotFoundException {
+ RemoteAdmin remoteAdmin = new RemoteAdmin(new Client(cluster), getConf());
+ checkTable(remoteAdmin);
+ if (nomapred) {
+ doMultipleClients(cmd);
+ } else {
+ doMapReduce(cmd);
+ }
+ }
+
+ /*
+ * Run all clients in this vm each to its own thread.
+ * @param cmd Command to run.
+ * @throws IOException
+ */
+ private void doMultipleClients(final Class<? extends Test> cmd) throws IOException {
+ final List<Thread> threads = new ArrayList<Thread>(this.N);
+ final long[] timings = new long[this.N];
+ final int perClientRows = R/N;
+ final TableName tableName = this.tableName;
+ final DataBlockEncoding encoding = this.blockEncoding;
+ final boolean flushCommits = this.flushCommits;
+ final Compression.Algorithm compression = this.compression;
+ final boolean writeToWal = this.writeToWAL;
+ final int preSplitRegions = this.presplitRegions;
+ final boolean useTags = this.useTags;
+ final int numTags = this.noOfTags;
+ final HConnection connection = HConnectionManager.createConnection(getConf());
+ for (int i = 0; i < this.N; i++) {
+ final int index = i;
+ Thread t = new Thread ("TestClient-" + i) {
+ @Override
+ public void run() {
+ super.run();
+ PerformanceEvaluation pe = new PerformanceEvaluation(getConf());
+ pe.tableName = tableName;
+ pe.blockEncoding = encoding;
+ pe.flushCommits = flushCommits;
+ pe.compression = compression;
+ pe.writeToWAL = writeToWal;
+ pe.presplitRegions = preSplitRegions;
+ pe.N = N;
+ pe.connection = connection;
+ pe.useTags = useTags;
+ pe.noOfTags = numTags;
+ try {
+ long elapsedTime = pe.runOneClient(cmd, index * perClientRows,
+ perClientRows, R,
+ flushCommits, writeToWAL, useTags, noOfTags, connection, new Status() {
+ public void setStatus(final String msg) throws IOException {
+ LOG.info("client-" + getName() + " " + msg);
+ }
+ });
+ timings[index] = elapsedTime;
+ LOG.info("Finished " + getName() + " in " + elapsedTime +
+ "ms writing " + perClientRows + " rows");
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+ threads.add(t);
+ }
+ for (Thread t: threads) {
+ t.start();
+ }
+ for (Thread t: threads) {
+ while(t.isAlive()) {
+ try {
+ t.join();
+ } catch (InterruptedException e) {
+ LOG.debug("Interrupted, continuing" + e.toString());
+ }
+ }
+ }
+ final String test = cmd.getSimpleName();
+ LOG.info("[" + test + "] Summary of timings (ms): "
+ + Arrays.toString(timings));
+ Arrays.sort(timings);
+ long total = 0;
+ for (int i = 0; i < this.N; i++) {
+ total += timings[i];
+ }
+ LOG.info("[" + test + "]"
+ + "\tMin: " + timings[0] + "ms"
+ + "\tMax: " + timings[this.N - 1] + "ms"
+ + "\tAvg: " + (total / this.N) + "ms");
+ }
+
+ /*
+ * Run a mapreduce job. Run as many maps as asked-for clients.
+ * Before we start up the job, write out an input file with instruction
+ * per client regards which row they are to start on.
+ * @param cmd Command to run.
+ * @throws IOException
+ */
+ private void doMapReduce(final Class<? extends Test> cmd) throws IOException,
+ InterruptedException, ClassNotFoundException {
+ Configuration conf = getConf();
+ Path inputDir = writeInputFile(conf);
+ conf.set(EvaluationMapTask.CMD_KEY, cmd.getName());
+ conf.set(EvaluationMapTask.PE_KEY, getClass().getName());
+ Job job = new Job(conf);
+ job.setJarByClass(PerformanceEvaluation.class);
+ job.setJobName("HBase Performance Evaluation");
+
+ job.setInputFormatClass(PeInputFormat.class);
+ PeInputFormat.setInputPaths(job, inputDir);
+
+ job.setOutputKeyClass(LongWritable.class);
+ job.setOutputValueClass(LongWritable.class);
+
+ job.setMapperClass(EvaluationMapTask.class);
+ job.setReducerClass(LongSumReducer.class);
+ job.setNumReduceTasks(1);
+
+ job.setOutputFormatClass(TextOutputFormat.class);
+ TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs"));
+ TableMapReduceUtil.addDependencyJars(job);
+ TableMapReduceUtil.initCredentials(job);
+ job.waitForCompletion(true);
+ }
+
+ /*
+ * Write input file of offsets-per-client for the mapreduce job.
+ * @param c Configuration
+ * @return Directory that contains file written.
+ * @throws IOException
+ */
+ private Path writeInputFile(final Configuration c) throws IOException {
+ SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss");
+ Path jobdir = new Path(PERF_EVAL_DIR, formatter.format(new Date()));
+ Path inputDir = new Path(jobdir, "inputs");
+
+ FileSystem fs = FileSystem.get(c);
+ fs.mkdirs(inputDir);
+ Path inputFile = new Path(inputDir, "input.txt");
+ PrintStream out = new PrintStream(fs.create(inputFile));
+ // Make input random.
+ Map<Integer, String> m = new TreeMap<Integer, String>();
+ Hash h = MurmurHash.getInstance();
+ int perClientRows = (this.R / this.N);
+ try {
+ for (int i = 0; i < 10; i++) {
+ for (int j = 0; j < N; j++) {
+ String s = "tableName=" + this.tableName +
+ ", startRow=" + ((j * perClientRows) + (i * (perClientRows/10))) +
+ ", perClientRunRows=" + (perClientRows / 10) +
+ ", totalRows=" + this.R +
+ ", clients=" + this.N +
+ ", flushCommits=" + this.flushCommits +
+ ", writeToWAL=" + this.writeToWAL +
+ ", useTags=" + this.useTags +
+ ", noOfTags=" + this.noOfTags;
+ int hash = h.hash(Bytes.toBytes(s));
+ m.put(hash, s);
+ }
+ }
+ for (Map.Entry<Integer, String> e: m.entrySet()) {
+ out.println(e.getValue());
+ }
+ } finally {
+ out.close();
+ }
+ return inputDir;
+ }
+
+ /**
+ * Describes a command.
+ */
+ static class CmdDescriptor {
+ private Class<? extends Test> cmdClass;
+ private String name;
+ private String description;
+
+ CmdDescriptor(Class<? extends Test> cmdClass, String name, String description) {
+ this.cmdClass = cmdClass;
+ this.name = name;
+ this.description = description;
+ }
+
+ public Class<? extends Test> getCmdClass() {
+ return cmdClass;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public String getDescription() {
+ return description;
+ }
+ }
+
+ /**
+ * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation.Test
+ * tests}. This makes the reflection logic a little easier to understand...
+ */
+ static class TestOptions {
+ private int startRow;
+ private int perClientRunRows;
+ private int totalRows;
+ private int numClientThreads;
+ private TableName tableName;
+ private boolean flushCommits;
+ private boolean writeToWAL = true;
+ private boolean useTags = false;
+ private int noOfTags = 0;
+ private HConnection connection;
+
+ TestOptions() {
+ }
+
+ TestOptions(int startRow, int perClientRunRows, int totalRows, int numClientThreads,
+ TableName tableName, boolean flushCommits, boolean writeToWAL, boolean useTags,
+ int noOfTags, HConnection connection) {
+ this.startRow = startRow;
+ this.perClientRunRows = perClientRunRows;
+ this.totalRows = totalRows;
+ this.numClientThreads = numClientThreads;
+ this.tableName = tableName;
+ this.flushCommits = flushCommits;
+ this.writeToWAL = writeToWAL;
+ this.useTags = useTags;
+ this.noOfTags = noOfTags;
+ this.connection = connection;
+ }
+
+ public int getStartRow() {
+ return startRow;
+ }
+
+ public int getPerClientRunRows() {
+ return perClientRunRows;
+ }
+
+ public int getTotalRows() {
+ return totalRows;
+ }
+
+ public int getNumClientThreads() {
+ return numClientThreads;
+ }
+
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ public boolean isFlushCommits() {
+ return flushCommits;
+ }
+
+ public boolean isWriteToWAL() {
+ return writeToWAL;
+ }
+
+ public HConnection getConnection() {
+ return connection;
+ }
+
+ public boolean isUseTags() {
+ return this.useTags;
+ }
+
+ public int getNumTags() {
+ return this.noOfTags;
+ }
+ }
+
+ /*
+ * A test.
+ * Subclass to particularize what happens per row.
+ */
+ static abstract class Test {
+ // Below is make it so when Tests are all running in the one
+ // jvm, that they each have a differently seeded Random.
+ private static final Random randomSeed =
+ new Random(System.currentTimeMillis());
+ private static long nextRandomSeed() {
+ return randomSeed.nextLong();
+ }
+ protected final Random rand = new Random(nextRandomSeed());
+
+ protected final int startRow;
+ protected final int perClientRunRows;
+ protected final int totalRows;
+ private final Status status;
+ protected TableName tableName;
+ protected HTableInterface table;
+ protected volatile Configuration conf;
+ protected boolean flushCommits;
+ protected boolean writeToWAL;
+ protected boolean useTags;
+ protected int noOfTags;
+ protected HConnection connection;
+
+ /**
+ * Note that all subclasses of this class must provide a public contructor
+ * that has the exact same list of arguments.
+ */
+ Test(final Configuration conf, final TestOptions options, final Status status) {
+ super();
+ this.startRow = options.getStartRow();
+ this.perClientRunRows = options.getPerClientRunRows();
+ this.totalRows = options.getTotalRows();
+ this.status = status;
+ this.tableName = options.getTableName();
+ this.table = null;
+ this.conf = conf;
+ this.flushCommits = options.isFlushCommits();
+ this.writeToWAL = options.isWriteToWAL();
+ this.useTags = options.isUseTags();
+ this.noOfTags = options.getNumTags();
+ this.connection = options.getConnection();
+ }
+
+ protected String generateStatus(final int sr, final int i, final int lr) {
+ return sr + "/" + i + "/" + lr;
+ }
+
+ protected int getReportingPeriod() {
+ int period = this.perClientRunRows / 10;
+ return period == 0? this.perClientRunRows: period;
+ }
+
+ void testSetup() throws IOException {
+ this.table = connection.getTable(tableName);
+ this.table.setAutoFlush(false, true);
+ }
+
+ void testTakedown() throws IOException {
+ if (flushCommits) {
+ this.table.flushCommits();
+ }
+ table.close();
+ }
+
+ /*
+ * Run test
+ * @return Elapsed time.
+ * @throws IOException
+ */
+ long test() throws IOException {
+ testSetup();
+ LOG.info("Timed test starting in thread " + Thread.currentThread().getName());
+ final long startTime = System.nanoTime();
+ try {
+ testTimed();
+ } finally {
+ testTakedown();
+ }
+ return (System.nanoTime() - startTime) / 1000000;
+ }
+
+ /**
+ * Provides an extension point for tests that don't want a per row invocation.
+ */
+ void testTimed() throws IOException {
+ int lastRow = this.startRow + this.perClientRunRows;
+ // Report on completion of 1/10th of total.
+ for (int i = this.startRow; i < lastRow; i++) {
+ testRow(i);
+ if (status != null && i > 0 && (i % getReportingPeriod()) == 0) {
+ status.setStatus(generateStatus(this.startRow, i, lastRow));
+ }
+ }
+ }
+
+ /*
+ * Test for individual row.
+ * @param i Row index.
+ */
+ abstract void testRow(final int i) throws IOException;
+ }
+
+ @SuppressWarnings("unused")
+ static class RandomSeekScanTest extends Test {
+ RandomSeekScanTest(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ void testRow(final int i) throws IOException {
+ Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
+ scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+ scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
+ ResultScanner s = this.table.getScanner(scan);
+ s.close();
+ }
+
+ @Override
+ protected int getReportingPeriod() {
+ int period = this.perClientRunRows / 100;
+ return period == 0? this.perClientRunRows: period;
+ }
+
+ }
+
+ @SuppressWarnings("unused")
+ static abstract class RandomScanWithRangeTest extends Test {
+ RandomScanWithRangeTest(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ void testRow(final int i) throws IOException {
+ Pair<byte[], byte[]> startAndStopRow = getStartAndStopRow();
+ Scan scan = new Scan(startAndStopRow.getFirst(), startAndStopRow.getSecond());
+ scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+ ResultScanner s = this.table.getScanner(scan);
+ int count = 0;
+ for (Result rr = null; (rr = s.next()) != null;) {
+ count++;
+ }
+
+ if (i % 100 == 0) {
+ LOG.info(String.format("Scan for key range %s - %s returned %s rows",
+ Bytes.toString(startAndStopRow.getFirst()),
+ Bytes.toString(startAndStopRow.getSecond()), count));
+ }
+
+ s.close();
+ }
+
+ protected abstract Pair<byte[],byte[]> getStartAndStopRow();
+
+ protected Pair<byte[], byte[]> generateStartAndStopRows(int maxRange) {
+ int start = this.rand.nextInt(Integer.MAX_VALUE) % totalRows;
+ int stop = start + maxRange;
+ return new Pair<byte[],byte[]>(format(start), format(stop));
+ }
+
+ @Override
+ protected int getReportingPeriod() {
+ int period = this.perClientRunRows / 100;
+ return period == 0? this.perClientRunRows: period;
+ }
+ }
+
+ static class RandomScanWithRange10Test extends RandomScanWithRangeTest {
+ RandomScanWithRange10Test(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ protected Pair<byte[], byte[]> getStartAndStopRow() {
+ return generateStartAndStopRows(10);
+ }
+ }
+
+ static class RandomScanWithRange100Test extends RandomScanWithRangeTest {
+ RandomScanWithRange100Test(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ protected Pair<byte[], byte[]> getStartAndStopRow() {
+ return generateStartAndStopRows(100);
+ }
+ }
+
+ static class RandomScanWithRange1000Test extends RandomScanWithRangeTest {
+ RandomScanWithRange1000Test(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ protected Pair<byte[], byte[]> getStartAndStopRow() {
+ return generateStartAndStopRows(1000);
+ }
+ }
+
+ static class RandomScanWithRange10000Test extends RandomScanWithRangeTest {
+ RandomScanWithRange10000Test(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ protected Pair<byte[], byte[]> getStartAndStopRow() {
+ return generateStartAndStopRows(10000);
+ }
+ }
+
+ static class RandomReadTest extends Test {
+ RandomReadTest(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ void testRow(final int i) throws IOException {
+ Get get = new Get(getRandomRow(this.rand, this.totalRows));
+ get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+ this.table.get(get);
+ }
+
+ @Override
+ protected int getReportingPeriod() {
+ int period = this.perClientRunRows / 100;
+ return period == 0? this.perClientRunRows: period;
+ }
+
+ }
+
+ static class RandomWriteTest extends Test {
+ RandomWriteTest(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ void testRow(final int i) throws IOException {
+ byte[] row = getRandomRow(this.rand, this.totalRows);
+ Put put = new Put(row);
+ byte[] value = generateData(this.rand, ROW_LENGTH);
+ if (useTags) {
+ byte[] tag = generateData(this.rand, TAG_LENGTH);
+ Tag[] tags = new Tag[noOfTags];
+ for (int n = 0; n < noOfTags; n++) {
+ Tag t = new Tag((byte) n, tag);
+ tags[n] = t;
+ }
+ KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
+ value, tags);
+ put.add(kv);
+ } else {
+ put.add(FAMILY_NAME, QUALIFIER_NAME, value);
+ }
+ put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
+ table.put(put);
+ }
+ }
+
+ static class ScanTest extends Test {
+ private ResultScanner testScanner;
+
+ ScanTest(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ void testTakedown() throws IOException {
+ if (this.testScanner != null) {
+ this.testScanner.close();
+ }
+ super.testTakedown();
+ }
+
+
+ @Override
+ void testRow(final int i) throws IOException {
+ if (this.testScanner == null) {
+ Scan scan = new Scan(format(this.startRow));
+ scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+ this.testScanner = table.getScanner(scan);
+ }
+ testScanner.next();
+ }
+
+ }
+
+ static class SequentialReadTest extends Test {
+ SequentialReadTest(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ void testRow(final int i) throws IOException {
+ Get get = new Get(format(i));
+ get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+ table.get(get);
+ }
+
+ }
+
+ static class SequentialWriteTest extends Test {
+
+ SequentialWriteTest(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ void testRow(final int i) throws IOException {
+ byte[] row = format(i);
+ Put put = new Put(row);
+ byte[] value = generateData(this.rand, ROW_LENGTH);
+ if (useTags) {
+ byte[] tag = generateData(this.rand, TAG_LENGTH);
+ Tag[] tags = new Tag[noOfTags];
+ for (int n = 0; n < noOfTags; n++) {
+ Tag t = new Tag((byte) n, tag);
+ tags[n] = t;
+ }
+ KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
+ value, tags);
+ put.add(kv);
+ } else {
+ put.add(FAMILY_NAME, QUALIFIER_NAME, value);
+ }
+ put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
+ table.put(put);
+ }
+ }
+
+ static class FilteredScanTest extends Test {
+ protected static final Log LOG = LogFactory.getLog(FilteredScanTest.class.getName());
+
+ FilteredScanTest(Configuration conf, TestOptions options, Status status) {
+ super(conf, options, status);
+ }
+
+ @Override
+ void testRow(int i) throws IOException {
+ byte[] value = generateValue(this.rand);
+ Scan scan = constructScan(value);
+ ResultScanner scanner = null;
+ try {
+ scanner = this.table.getScanner(scan);
+ while (scanner.next() != null) {
+ }
+ } finally {
+ if (scanner != null) scanner.close();
+ }
+ }
+
+ protected Scan constructScan(byte[] valuePrefix) throws IOException {
+ Filter filter = new SingleColumnValueFilter(
+ FAMILY_NAME, QUALIFIER_NAME, CompareFilter.CompareOp.EQUAL,
+ new BinaryComparator(valuePrefix)
+ );
+ Scan scan = new Scan();
+ scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
+ scan.setFilter(filter);
+ return scan;
+ }
+ }
+
+ /*
+ * Format passed integer.
+ * @param number
+ * @return Returns zero-prefixed 10-byte wide decimal version of passed
+ * number (Does absolute in case number is negative).
+ */
+ public static byte [] format(final int number) {
+ byte [] b = new byte[DEFAULT_ROW_PREFIX_LENGTH + 10];
+ int d = Math.abs(number);
+ for (int i = b.length - 1; i >= 0; i--) {
+ b[i] = (byte)((d % 10) + '0');
+ d /= 10;
+ }
+ return b;
+ }
+
+ public static byte[] generateData(final Random r, int length) {
+ byte [] b = new byte [length];
+ int i = 0;
+
+ for(i = 0; i < (length-8); i += 8) {
+ b[i] = (byte) (65 + r.nextInt(26));
+ b[i+1] = b[i];
+ b[i+2] = b[i];
+ b[i+3] = b[i];
+ b[i+4] = b[i];
+ b[i+5] = b[i];
+ b[i+6] = b[i];
+ b[i+7] = b[i];
+ }
+
+ byte a = (byte) (65 + r.nextInt(26));
+ for(; i < length; i++) {
+ b[i] = a;
+ }
+ return b;
+ }
+
+ public static byte[] generateValue(final Random r) {
+ byte [] b = new byte [ROW_LENGTH];
+ r.nextBytes(b);
+ return b;
+ }
+
+ static byte [] getRandomRow(final Random random, final int totalRows) {
+ return format(random.nextInt(Integer.MAX_VALUE) % totalRows);
+ }
+
+ long runOneClient(final Class<? extends Test> cmd, final int startRow,
+ final int perClientRunRows, final int totalRows,
+ boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags,
+ HConnection connection, final Status status)
+ throws IOException {
+ status.setStatus("Start " + cmd + " at offset " + startRow + " for " +
+ perClientRunRows + " rows");
+ long totalElapsedTime = 0;
+
+ TestOptions options = new TestOptions(startRow, perClientRunRows,
+ totalRows, N, tableName, flushCommits, writeToWAL, useTags, noOfTags, connection);
+ final Test t;
+ try {
+ Constructor<? extends Test> constructor = cmd.getDeclaredConstructor(
+ Configuration.class, TestOptions.class, Status.class);
+ t = constructor.newInstance(this.conf, options, status);
+ } catch (NoSuchMethodException e) {
+ throw new IllegalArgumentException("Invalid command class: " +
+ cmd.getName() + ". It does not provide a constructor as described by" +
+ "the javadoc comment. Available constructors are: " +
+ Arrays.toString(cmd.getConstructors()));
+ } catch (Exception e) {
+ throw new IllegalStateException("Failed to construct command class", e);
+ }
+ totalElapsedTime = t.test();
+
+ status.setStatus("Finished " + cmd + " in " + totalElapsedTime +
+ "ms at offset " + startRow + " for " + perClientRunRows + " rows");
+ return totalElapsedTime;
+ }
+
+ private void runNIsOne(final Class<? extends Test> cmd) {
+ Status status = new Status() {
+ public void setStatus(String msg) throws IOException {
+ LOG.info(msg);
+ }
+ };
+
+ RemoteAdmin admin = null;
+ try {
+ Client client = new Client(cluster);
+ admin = new RemoteAdmin(client, getConf());
+ checkTable(admin);
+ runOneClient(cmd, 0, this.R, this.R, this.flushCommits, this.writeToWAL,
+ this.useTags, this.noOfTags, this.connection, status);
+ } catch (Exception e) {
+ LOG.error("Failed", e);
+ }
+ }
+
+ private void runTest(final Class<? extends Test> cmd) throws IOException,
+ InterruptedException, ClassNotFoundException {
+ if (N == 1) {
+ // If there is only one client and one HRegionServer, we assume nothing
+ // has been set up at all.
+ runNIsOne(cmd);
+ } else {
+ // Else, run
+ runNIsMoreThanOne(cmd);
+ }
+ }
+
+ protected void printUsage() {
+ printUsage(null);
+ }
+
+ protected void printUsage(final String message) {
+ if (message != null && message.length() > 0) {
+ System.err.println(message);
+ }
+ System.err.println("Usage: java " + this.getClass().getName() + " \\");
+ System.err.println(" [--nomapred] [--rows=ROWS] [--table=NAME] \\");
+ System.err.println(" [--compress=TYPE] [--blockEncoding=TYPE] [-D<property=value>]* <command> <nclients>");
+ System.err.println();
+ System.err.println("Options:");
+ System.err.println(" nomapred Run multiple clients using threads " +
+ "(rather than use mapreduce)");
+ System.err.println(" rows Rows each client runs. Default: One million");
+ System.err.println(" table Alternate table name. Default: 'TestTable'");
+ System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+ System.err.println(" flushCommits Used to determine if the test should flush the table. Default: false");
+ System.err.println(" writeToWAL Set writeToWAL on puts. Default: True");
+ System.err.println(" presplit Create presplit table. Recommended for accurate perf analysis (see guide). Default: disabled");
+ System.err
+ .println(" inmemory Tries to keep the HFiles of the CF inmemory as far as possible. Not " +
+ "guaranteed that reads are always served from inmemory. Default: false");
+ System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. Default : false");
+ System.err
+ .println(" numoftags Specify the no of tags that would be needed. This works only if usetags is true.");
+ System.err.println();
+ System.err.println(" Note: -D properties will be applied to the conf used. ");
+ System.err.println(" For example: ");
+ System.err.println(" -Dmapred.output.compress=true");
+ System.err.println(" -Dmapreduce.task.timeout=60000");
+ System.err.println();
+ System.err.println("Command:");
+ for (CmdDescriptor command : commands.values()) {
+ System.err.println(String.format(" %-15s %s", command.getName(), command.getDescription()));
+ }
+ System.err.println();
+ System.err.println("Args:");
+ System.err.println(" nclients Integer. Required. Total number of " +
+ "clients (and HRegionServers)");
+ System.err.println(" running: 1 <= value <= 500");
+ System.err.println("Examples:");
+ System.err.println(" To run a single evaluation client:");
+ System.err.println(" $ bin/hbase " + this.getClass().getName()
+ + " sequentialWrite 1");
+ }
+
+ private void getArgs(final int start, final String[] args) {
+ if(start + 1 > args.length) {
+ throw new IllegalArgumentException("must supply the number of clients");
+ }
+ N = Integer.parseInt(args[start]);
+ if (N < 1) {
+ throw new IllegalArgumentException("Number of clients must be > 1");
+ }
+ // Set total number of rows to write.
+ R = R * N;
+ }
+
+ @Override
+ public int run(String[] args) throws Exception {
+ // Process command-line args. TODO: Better cmd-line processing
+ // (but hopefully something not as painful as cli options).
+ int errCode = -1;
+ if (args.length < 1) {
+ printUsage();
+ return errCode;
+ }
+
+ try {
+ for (int i = 0; i < args.length; i++) {
+ String cmd = args[i];
+ if (cmd.equals("-h") || cmd.startsWith("--h")) {
+ printUsage();
+ errCode = 0;
+ break;
+ }
+
+ final String nmr = "--nomapred";
+ if (cmd.startsWith(nmr)) {
+ nomapred = true;
+ continue;
+ }
+
+ final String rows = "--rows=";
+ if (cmd.startsWith(rows)) {
+ R = Integer.parseInt(cmd.substring(rows.length()));
+ continue;
+ }
+
+ final String table = "--table=";
+ if (cmd.startsWith(table)) {
+ this.tableName = TableName.valueOf(cmd.substring(table.length()));
+ continue;
+ }
+
+ final String compress = "--compress=";
+ if (cmd.startsWith(compress)) {
+ this.compression = Compression.Algorithm.valueOf(cmd.substring(compress.length()));
+ continue;
+ }
+
+ final String blockEncoding = "--blockEncoding=";
+ if (cmd.startsWith(blockEncoding)) {
+ this.blockEncoding = DataBlockEncoding.valueOf(cmd.substring(blockEncoding.length()));
+ continue;
+ }
+
+ final String flushCommits = "--flushCommits=";
+ if (cmd.startsWith(flushCommits)) {
+ this.flushCommits = Boolean.parseBoolean(cmd.substring(flushCommits.length()));
+ continue;
+ }
+
+ final String writeToWAL = "--writeToWAL=";
+ if (cmd.startsWith(writeToWAL)) {
+ this.writeToWAL = Boolean.parseBoolean(cmd.substring(writeToWAL.length()));
+ continue;
+ }
+
+ final String presplit = "--presplit=";
+ if (cmd.startsWith(presplit)) {
+ this.presplitRegions = Integer.parseInt(cmd.substring(presplit.length()));
+ continue;
+ }
+
+ final String inMemory = "--inmemory=";
+ if (cmd.startsWith(inMemory)) {
+ this.inMemoryCF = Boolean.parseBoolean(cmd.substring(inMemory.length()));
+ continue;
+ }
+
+ this.connection = HConnectionManager.createConnection(getConf());
+
+ final String useTags = "--usetags=";
+ if (cmd.startsWith(useTags)) {
+ this.useTags = Boolean.parseBoolean(cmd.substring(useTags.length()));
+ continue;
+ }
+
+ final String noOfTags = "--nooftags=";
+ if (cmd.startsWith(noOfTags)) {
+ this.noOfTags = Integer.parseInt(cmd.substring(noOfTags.length()));
+ continue;
+ }
+
+ final String host = "--host=";
+ if (cmd.startsWith(host)) {
+ cluster.add(cmd.substring(host.length()));
+ continue;
+ }
+
+ Class<? extends Test> cmdClass = determineCommandClass(cmd);
+ if (cmdClass != null) {
+ getArgs(i + 1, args);
+ if (cluster.isEmpty()) {
+ String s = conf.get("stargate.hostname", "localhost");
+ if (s.contains(":")) {
+ cluster.add(s);
+ } else {
+ cluster.add(s, conf.getInt("stargate.port", 8080));
+ }
+ }
+ runTest(cmdClass);
+ errCode = 0;
+ break;
+ }
+
+ printUsage();
+ break;
+ }
+ } catch (Exception e) {
+ LOG.error("Failed", e);
+ }
+
+ return errCode;
+ }
+
+ private Class<? extends Test> determineCommandClass(String cmd) {
+ CmdDescriptor descriptor = commands.get(cmd);
+ return descriptor != null ? descriptor.getCmdClass() : null;
+ }
+
+ /**
+ * @param args
+ */
+ public static void main(final String[] args) throws Exception {
+ int res = ToolRunner.run(new PerformanceEvaluation(HBaseConfiguration.create()), args);
+ System.exit(res);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
new file mode 100644
index 0000000..adebc6b
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
@@ -0,0 +1,481 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.provider.JacksonProvider;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+public class RowResourceBase {
+
+ protected static final String TABLE = "TestRowResource";
+ protected static final String CFA = "a";
+ protected static final String CFB = "b";
+ protected static final String COLUMN_1 = CFA + ":1";
+ protected static final String COLUMN_2 = CFB + ":2";
+ protected static final String COLUMN_3 = CFA + ":";
+ protected static final String ROW_1 = "testrow1";
+ protected static final String VALUE_1 = "testvalue1";
+ protected static final String ROW_2 = "testrow2";
+ protected static final String VALUE_2 = "testvalue2";
+ protected static final String ROW_3 = "testrow3";
+ protected static final String VALUE_3 = "testvalue3";
+ protected static final String ROW_4 = "testrow4";
+ protected static final String VALUE_4 = "testvalue4";
+
+ protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ protected static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ protected static Client client;
+ protected static JAXBContext context;
+ protected static Marshaller xmlMarshaller;
+ protected static Unmarshaller xmlUnmarshaller;
+ protected static Configuration conf;
+ protected static ObjectMapper jsonMapper;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ TEST_UTIL.startMiniCluster(3);
+ REST_TEST_UTIL.startServletContainer(conf);
+ context = JAXBContext.newInstance(
+ CellModel.class,
+ CellSetModel.class,
+ RowModel.class);
+ xmlMarshaller = context.createMarshaller();
+ xmlUnmarshaller = context.createUnmarshaller();
+ jsonMapper = new JacksonProvider()
+ .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Before
+ public void beforeMethod() throws Exception {
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (admin.tableExists(TABLE)) {
+ TEST_UTIL.deleteTable(Bytes.toBytes(TABLE));
+ }
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
+ htd.addFamily(new HColumnDescriptor(CFA));
+ htd.addFamily(new HColumnDescriptor(CFB));
+ admin.createTable(htd);
+ }
+
+ @After
+ public void afterMethod() throws Exception {
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (admin.tableExists(TABLE)) {
+ TEST_UTIL.deleteTable(Bytes.toBytes(TABLE));
+ }
+ }
+
+ static Response putValuePB(String table, String row, String column,
+ String value) throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ return putValuePB(path.toString(), table, row, column, value);
+ }
+
+ static Response putValuePB(String url, String table, String row,
+ String column, String value) throws IOException {
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column),
+ Bytes.toBytes(value)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ Response response = client.put(url, Constants.MIMETYPE_PROTOBUF,
+ cellSetModel.createProtobufOutput());
+ Thread.yield();
+ return response;
+ }
+
+ protected static void checkValueXML(String url, String table, String row,
+ String column, String value) throws IOException, JAXBException {
+ Response response = getValueXML(url);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ CellSetModel cellSet = (CellSetModel)
+ xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+ RowModel rowModel = cellSet.getRows().get(0);
+ CellModel cell = rowModel.getCells().get(0);
+ assertEquals(Bytes.toString(cell.getColumn()), column);
+ assertEquals(Bytes.toString(cell.getValue()), value);
+ }
+
+ protected static void checkValueXML(String table, String row, String column,
+ String value) throws IOException, JAXBException {
+ Response response = getValueXML(table, row, column);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ CellSetModel cellSet = (CellSetModel)
+ xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+ RowModel rowModel = cellSet.getRows().get(0);
+ CellModel cell = rowModel.getCells().get(0);
+ assertEquals(Bytes.toString(cell.getColumn()), column);
+ assertEquals(Bytes.toString(cell.getValue()), value);
+ }
+
+ protected static Response getValuePB(String url) throws IOException {
+ Response response = client.get(url, Constants.MIMETYPE_PROTOBUF);
+ return response;
+ }
+
+ protected static Response putValueXML(String table, String row, String column,
+ String value) throws IOException, JAXBException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ return putValueXML(path.toString(), table, row, column, value);
+ }
+
+ protected static Response putValueXML(String url, String table, String row,
+ String column, String value) throws IOException, JAXBException {
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column),
+ Bytes.toBytes(value)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ xmlMarshaller.marshal(cellSetModel, writer);
+ Response response = client.put(url, Constants.MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ Thread.yield();
+ return response;
+ }
+
+ protected static Response getValuePB(String table, String row, String column)
+ throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ return getValuePB(path.toString());
+ }
+
+ protected static void checkValuePB(String table, String row, String column,
+ String value) throws IOException {
+ Response response = getValuePB(table, row, column);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ CellSetModel cellSet = new CellSetModel();
+ cellSet.getObjectFromMessage(response.getBody());
+ RowModel rowModel = cellSet.getRows().get(0);
+ CellModel cell = rowModel.getCells().get(0);
+ assertEquals(Bytes.toString(cell.getColumn()), column);
+ assertEquals(Bytes.toString(cell.getValue()), value);
+ }
+
+ protected static Response checkAndPutValuePB(String url, String table,
+ String row, String column, String valueToCheck, String valueToPut)
+ throws IOException {
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column),
+ Bytes.toBytes(valueToPut)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(column),
+ Bytes.toBytes(valueToCheck)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ Response response = client.put(url, Constants.MIMETYPE_PROTOBUF,
+ cellSetModel.createProtobufOutput());
+ Thread.yield();
+ return response;
+ }
+
+ protected static Response checkAndPutValuePB(String table, String row,
+ String column, String valueToCheck, String valueToPut) throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append("?check=put");
+ return checkAndPutValuePB(path.toString(), table, row, column,
+ valueToCheck, valueToPut);
+ }
+
+ protected static Response checkAndPutValueXML(String url, String table,
+ String row, String column, String valueToCheck, String valueToPut)
+ throws IOException, JAXBException {
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column),
+ Bytes.toBytes(valueToPut)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(column),
+ Bytes.toBytes(valueToCheck)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ xmlMarshaller.marshal(cellSetModel, writer);
+ Response response = client.put(url, Constants.MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ Thread.yield();
+ return response;
+ }
+
+ protected static Response checkAndPutValueXML(String table, String row,
+ String column, String valueToCheck, String valueToPut)
+ throws IOException, JAXBException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append("?check=put");
+ return checkAndPutValueXML(path.toString(), table, row, column,
+ valueToCheck, valueToPut);
+ }
+
+ protected static Response checkAndDeleteXML(String url, String table,
+ String row, String column, String valueToCheck)
+ throws IOException, JAXBException {
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column),
+ Bytes.toBytes(valueToCheck)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ xmlMarshaller.marshal(cellSetModel, writer);
+ Response response = client.put(url, Constants.MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ Thread.yield();
+ return response;
+ }
+
+ protected static Response checkAndDeleteXML(String table, String row,
+ String column, String valueToCheck) throws IOException, JAXBException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append("?check=delete");
+ return checkAndDeleteXML(path.toString(), table, row, column, valueToCheck);
+ }
+
+ protected static Response checkAndDeleteJson(String table, String row,
+ String column, String valueToCheck) throws IOException, JAXBException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append("?check=delete");
+ return checkAndDeleteJson(path.toString(), table, row, column, valueToCheck);
+ }
+
+ protected static Response checkAndDeleteJson(String url, String table,
+ String row, String column, String valueToCheck)
+ throws IOException, JAXBException {
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column),
+ Bytes.toBytes(valueToCheck)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ String jsonString = jsonMapper.writeValueAsString(cellSetModel);
+ Response response = client.put(url, Constants.MIMETYPE_JSON,
+ Bytes.toBytes(jsonString));
+ Thread.yield();
+ return response;
+ }
+
+ protected static Response checkAndDeletePB(String table, String row,
+ String column, String value) throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append("?check=delete");
+ return checkAndDeleteValuePB(path.toString(), table, row, column, value);
+ }
+
+ protected static Response checkAndDeleteValuePB(String url, String table,
+ String row, String column, String valueToCheck)
+ throws IOException {
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes
+ .toBytes(valueToCheck)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ Response response = client.put(url, Constants.MIMETYPE_PROTOBUF,
+ cellSetModel.createProtobufOutput());
+ Thread.yield();
+ return response;
+ }
+
+ protected static Response getValueXML(String table, String startRow,
+ String endRow, String column) throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(startRow);
+ path.append(",");
+ path.append(endRow);
+ path.append('/');
+ path.append(column);
+ return getValueXML(path.toString());
+ }
+
+ protected static Response getValueXML(String url) throws IOException {
+ Response response = client.get(url, Constants.MIMETYPE_XML);
+ return response;
+ }
+
+ protected static Response getValueJson(String url) throws IOException {
+ Response response = client.get(url, Constants.MIMETYPE_JSON);
+ return response;
+ }
+
+ protected static Response deleteValue(String table, String row, String column)
+ throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ Response response = client.delete(path.toString());
+ Thread.yield();
+ return response;
+ }
+
+ protected static Response getValueXML(String table, String row, String column)
+ throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ return getValueXML(path.toString());
+ }
+
+ protected static Response deleteRow(String table, String row)
+ throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ Response response = client.delete(path.toString());
+ Thread.yield();
+ return response;
+ }
+
+ protected static Response getValueJson(String table, String row,
+ String column) throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ return getValueJson(path.toString());
+ }
+
+ protected static void checkValueJSON(String table, String row, String column,
+ String value) throws IOException, JAXBException {
+ Response response = getValueJson(table, row, column);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ ObjectMapper mapper = new JacksonProvider()
+ .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
+ CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class);
+ RowModel rowModel = cellSet.getRows().get(0);
+ CellModel cell = rowModel.getCells().get(0);
+ assertEquals(Bytes.toString(cell.getColumn()), column);
+ assertEquals(Bytes.toString(cell.getValue()), value);
+ }
+
+ protected static Response putValueJson(String table, String row, String column,
+ String value) throws IOException, JAXBException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ return putValueJson(path.toString(), table, row, column, value);
+ }
+
+ protected static Response putValueJson(String url, String table, String row, String column,
+ String value) throws IOException, JAXBException {
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column),
+ Bytes.toBytes(value)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ String jsonString = jsonMapper.writeValueAsString(cellSetModel);
+ Response response = client.put(url, Constants.MIMETYPE_JSON,
+ Bytes.toBytes(jsonString));
+ Thread.yield();
+ return response;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java
new file mode 100644
index 0000000..89e1b20
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+
+@Category(MediumTests.class)
+public class TestDeleteRow extends RowResourceBase {
+
+ @Test
+ public void testDeleteNonExistentColumn() throws Exception {
+ Response response = putValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+
+ response = checkAndDeleteJson(TABLE, ROW_1, COLUMN_1, VALUE_2);
+ assertEquals(304, response.getCode());
+ assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
+
+ response = checkAndDeleteJson(TABLE, ROW_2, COLUMN_1, VALUE_2);
+ assertEquals(304, response.getCode());
+ assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
+
+ response = checkAndDeleteJson(TABLE, ROW_1, "dummy", VALUE_1);
+ assertEquals(400, response.getCode());
+ assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
+
+ response = checkAndDeleteJson(TABLE, ROW_1, "dummy:test", VALUE_1);
+ assertEquals(404, response.getCode());
+ assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
+
+ response = checkAndDeleteJson(TABLE, ROW_1, "a:test", VALUE_1);
+ assertEquals(304, response.getCode());
+ assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
+ }
+
+ @Test
+ public void testDeleteXML() throws IOException, JAXBException {
+ Response response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ response = putValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
+ assertEquals(response.getCode(), 200);
+ checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
+
+ response = deleteValue(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 200);
+ response = getValueXML(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+ checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
+
+ response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ response = getValueXML(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ response = getValueXML(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+ response = getValueXML(TABLE, ROW_1, COLUMN_2);
+ assertEquals(response.getCode(), 404);
+
+ //Delete a row in non existent table
+ response = deleteValue("dummy", ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+
+ //Delete non existent column
+ response = deleteValue(TABLE, ROW_1, "dummy");
+ assertEquals(response.getCode(), 404);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java
new file mode 100644
index 0000000..763765f
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java
@@ -0,0 +1,117 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+
+import javax.servlet.ServletOutputStream;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.rest.filter.GZIPResponseStream;
+import org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestGZIPResponseWrapper {
+
+ private final HttpServletResponse response = mock(HttpServletResponse.class);
+ private final GZIPResponseWrapper wrapper = new GZIPResponseWrapper(response);
+
+ /**
+ * wrapper should set all headers except "content-length"
+ */
+ @Test
+ public void testHeader() throws IOException {
+ wrapper.setStatus(200);
+ verify(response).setStatus(200);
+ wrapper.addHeader("header", "header value");
+ verify(response).addHeader("header", "header value");
+ wrapper.addHeader("content-length", "header value2");
+ verify(response, never()).addHeader("content-length", "header value");
+
+ wrapper.setIntHeader("header", 5);
+ verify(response).setIntHeader("header", 5);
+ wrapper.setIntHeader("content-length", 4);
+ verify(response, never()).setIntHeader("content-length", 4);
+
+ wrapper.setHeader("set-header", "new value");
+ verify(response).setHeader("set-header", "new value");
+ wrapper.setHeader("content-length", "content length value");
+ verify(response, never()).setHeader("content-length", "content length value");
+
+ wrapper.sendRedirect("location");
+ verify(response).sendRedirect("location");
+
+ wrapper.flushBuffer();
+ verify(response).flushBuffer();
+ }
+
+ @Test
+ public void testResetBuffer() throws IOException {
+ when(response.isCommitted()).thenReturn(false);
+ ServletOutputStream out = mock(ServletOutputStream.class);
+ when(response.getOutputStream()).thenReturn(out);
+
+ ServletOutputStream servletOutput = wrapper.getOutputStream();
+ assertEquals(GZIPResponseStream.class, servletOutput.getClass());
+ wrapper.resetBuffer();
+ verify(response).setHeader("Content-Encoding", null);
+
+ when(response.isCommitted()).thenReturn(true);
+ servletOutput = wrapper.getOutputStream();
+ assertEquals(out.getClass(), servletOutput.getClass());
+ assertNotNull(wrapper.getWriter());
+ }
+
+ @Test
+ public void testReset() throws IOException {
+ when(response.isCommitted()).thenReturn(false);
+ ServletOutputStream out = mock(ServletOutputStream.class);
+ when(response.getOutputStream()).thenReturn(out);
+
+ ServletOutputStream servletOutput = wrapper.getOutputStream();
+ verify(response).addHeader("Content-Encoding", "gzip");
+ assertEquals(GZIPResponseStream.class, servletOutput.getClass());
+ wrapper.reset();
+ verify(response).setHeader("Content-Encoding", null);
+
+ when(response.isCommitted()).thenReturn(true);
+ servletOutput = wrapper.getOutputStream();
+ assertEquals(out.getClass(), servletOutput.getClass());
+ }
+
+ @Test
+ public void testSendError() throws IOException {
+ wrapper.sendError(404);
+ verify(response).sendError(404);
+
+ wrapper.sendError(404, "error message");
+ verify(response).sendError(404, "error message");
+ }
+
+}
[10/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
deleted file mode 100644
index 05ff7a3..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
+++ /dev/null
@@ -1,3955 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: StorageClusterStatusMessage.proto
-
-package org.apache.hadoop.hbase.rest.protobuf.generated;
-
-public final class StorageClusterStatusMessage {
- private StorageClusterStatusMessage() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- public interface StorageClusterStatusOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
- *
- * <pre>
- * node status
- * </pre>
- */
- java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>
- getLiveNodesList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
- *
- * <pre>
- * node status
- * </pre>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index);
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
- *
- * <pre>
- * node status
- * </pre>
- */
- int getLiveNodesCount();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
- *
- * <pre>
- * node status
- * </pre>
- */
- java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
- getLiveNodesOrBuilderList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
- *
- * <pre>
- * node status
- * </pre>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder getLiveNodesOrBuilder(
- int index);
-
- // repeated string deadNodes = 2;
- /**
- * <code>repeated string deadNodes = 2;</code>
- */
- java.util.List<java.lang.String>
- getDeadNodesList();
- /**
- * <code>repeated string deadNodes = 2;</code>
- */
- int getDeadNodesCount();
- /**
- * <code>repeated string deadNodes = 2;</code>
- */
- java.lang.String getDeadNodes(int index);
- /**
- * <code>repeated string deadNodes = 2;</code>
- */
- com.google.protobuf.ByteString
- getDeadNodesBytes(int index);
-
- // optional int32 regions = 3;
- /**
- * <code>optional int32 regions = 3;</code>
- *
- * <pre>
- * summary statistics
- * </pre>
- */
- boolean hasRegions();
- /**
- * <code>optional int32 regions = 3;</code>
- *
- * <pre>
- * summary statistics
- * </pre>
- */
- int getRegions();
-
- // optional int32 requests = 4;
- /**
- * <code>optional int32 requests = 4;</code>
- */
- boolean hasRequests();
- /**
- * <code>optional int32 requests = 4;</code>
- */
- int getRequests();
-
- // optional double averageLoad = 5;
- /**
- * <code>optional double averageLoad = 5;</code>
- */
- boolean hasAverageLoad();
- /**
- * <code>optional double averageLoad = 5;</code>
- */
- double getAverageLoad();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus}
- */
- public static final class StorageClusterStatus extends
- com.google.protobuf.GeneratedMessage
- implements StorageClusterStatusOrBuilder {
- // Use StorageClusterStatus.newBuilder() to construct.
- private StorageClusterStatus(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private StorageClusterStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final StorageClusterStatus defaultInstance;
- public static StorageClusterStatus getDefaultInstance() {
- return defaultInstance;
- }
-
- public StorageClusterStatus getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private StorageClusterStatus(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
- liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>();
- mutable_bitField0_ |= 0x00000001;
- }
- liveNodes_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.PARSER, extensionRegistry));
- break;
- }
- case 18: {
- if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- deadNodes_ = new com.google.protobuf.LazyStringArrayList();
- mutable_bitField0_ |= 0x00000002;
- }
- deadNodes_.add(input.readBytes());
- break;
- }
- case 24: {
- bitField0_ |= 0x00000001;
- regions_ = input.readInt32();
- break;
- }
- case 32: {
- bitField0_ |= 0x00000002;
- requests_ = input.readInt32();
- break;
- }
- case 41: {
- bitField0_ |= 0x00000004;
- averageLoad_ = input.readDouble();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
- liveNodes_ = java.util.Collections.unmodifiableList(liveNodes_);
- }
- if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- deadNodes_ = new com.google.protobuf.UnmodifiableLazyStringList(deadNodes_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
- }
-
- public static com.google.protobuf.Parser<StorageClusterStatus> PARSER =
- new com.google.protobuf.AbstractParser<StorageClusterStatus>() {
- public StorageClusterStatus parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new StorageClusterStatus(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<StorageClusterStatus> getParserForType() {
- return PARSER;
- }
-
- public interface RegionOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required bytes name = 1;
- /**
- * <code>required bytes name = 1;</code>
- */
- boolean hasName();
- /**
- * <code>required bytes name = 1;</code>
- */
- com.google.protobuf.ByteString getName();
-
- // optional int32 stores = 2;
- /**
- * <code>optional int32 stores = 2;</code>
- */
- boolean hasStores();
- /**
- * <code>optional int32 stores = 2;</code>
- */
- int getStores();
-
- // optional int32 storefiles = 3;
- /**
- * <code>optional int32 storefiles = 3;</code>
- */
- boolean hasStorefiles();
- /**
- * <code>optional int32 storefiles = 3;</code>
- */
- int getStorefiles();
-
- // optional int32 storefileSizeMB = 4;
- /**
- * <code>optional int32 storefileSizeMB = 4;</code>
- */
- boolean hasStorefileSizeMB();
- /**
- * <code>optional int32 storefileSizeMB = 4;</code>
- */
- int getStorefileSizeMB();
-
- // optional int32 memstoreSizeMB = 5;
- /**
- * <code>optional int32 memstoreSizeMB = 5;</code>
- */
- boolean hasMemstoreSizeMB();
- /**
- * <code>optional int32 memstoreSizeMB = 5;</code>
- */
- int getMemstoreSizeMB();
-
- // optional int32 storefileIndexSizeMB = 6;
- /**
- * <code>optional int32 storefileIndexSizeMB = 6;</code>
- */
- boolean hasStorefileIndexSizeMB();
- /**
- * <code>optional int32 storefileIndexSizeMB = 6;</code>
- */
- int getStorefileIndexSizeMB();
-
- // optional int64 readRequestsCount = 7;
- /**
- * <code>optional int64 readRequestsCount = 7;</code>
- */
- boolean hasReadRequestsCount();
- /**
- * <code>optional int64 readRequestsCount = 7;</code>
- */
- long getReadRequestsCount();
-
- // optional int64 writeRequestsCount = 8;
- /**
- * <code>optional int64 writeRequestsCount = 8;</code>
- */
- boolean hasWriteRequestsCount();
- /**
- * <code>optional int64 writeRequestsCount = 8;</code>
- */
- long getWriteRequestsCount();
-
- // optional int32 rootIndexSizeKB = 9;
- /**
- * <code>optional int32 rootIndexSizeKB = 9;</code>
- */
- boolean hasRootIndexSizeKB();
- /**
- * <code>optional int32 rootIndexSizeKB = 9;</code>
- */
- int getRootIndexSizeKB();
-
- // optional int32 totalStaticIndexSizeKB = 10;
- /**
- * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
- */
- boolean hasTotalStaticIndexSizeKB();
- /**
- * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
- */
- int getTotalStaticIndexSizeKB();
-
- // optional int32 totalStaticBloomSizeKB = 11;
- /**
- * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
- */
- boolean hasTotalStaticBloomSizeKB();
- /**
- * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
- */
- int getTotalStaticBloomSizeKB();
-
- // optional int64 totalCompactingKVs = 12;
- /**
- * <code>optional int64 totalCompactingKVs = 12;</code>
- */
- boolean hasTotalCompactingKVs();
- /**
- * <code>optional int64 totalCompactingKVs = 12;</code>
- */
- long getTotalCompactingKVs();
-
- // optional int64 currentCompactedKVs = 13;
- /**
- * <code>optional int64 currentCompactedKVs = 13;</code>
- */
- boolean hasCurrentCompactedKVs();
- /**
- * <code>optional int64 currentCompactedKVs = 13;</code>
- */
- long getCurrentCompactedKVs();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region}
- */
- public static final class Region extends
- com.google.protobuf.GeneratedMessage
- implements RegionOrBuilder {
- // Use Region.newBuilder() to construct.
- private Region(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Region(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Region defaultInstance;
- public static Region getDefaultInstance() {
- return defaultInstance;
- }
-
- public Region getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Region(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- name_ = input.readBytes();
- break;
- }
- case 16: {
- bitField0_ |= 0x00000002;
- stores_ = input.readInt32();
- break;
- }
- case 24: {
- bitField0_ |= 0x00000004;
- storefiles_ = input.readInt32();
- break;
- }
- case 32: {
- bitField0_ |= 0x00000008;
- storefileSizeMB_ = input.readInt32();
- break;
- }
- case 40: {
- bitField0_ |= 0x00000010;
- memstoreSizeMB_ = input.readInt32();
- break;
- }
- case 48: {
- bitField0_ |= 0x00000020;
- storefileIndexSizeMB_ = input.readInt32();
- break;
- }
- case 56: {
- bitField0_ |= 0x00000040;
- readRequestsCount_ = input.readInt64();
- break;
- }
- case 64: {
- bitField0_ |= 0x00000080;
- writeRequestsCount_ = input.readInt64();
- break;
- }
- case 72: {
- bitField0_ |= 0x00000100;
- rootIndexSizeKB_ = input.readInt32();
- break;
- }
- case 80: {
- bitField0_ |= 0x00000200;
- totalStaticIndexSizeKB_ = input.readInt32();
- break;
- }
- case 88: {
- bitField0_ |= 0x00000400;
- totalStaticBloomSizeKB_ = input.readInt32();
- break;
- }
- case 96: {
- bitField0_ |= 0x00000800;
- totalCompactingKVs_ = input.readInt64();
- break;
- }
- case 104: {
- bitField0_ |= 0x00001000;
- currentCompactedKVs_ = input.readInt64();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Region> PARSER =
- new com.google.protobuf.AbstractParser<Region>() {
- public Region parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Region(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Region> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // required bytes name = 1;
- public static final int NAME_FIELD_NUMBER = 1;
- private com.google.protobuf.ByteString name_;
- /**
- * <code>required bytes name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required bytes name = 1;</code>
- */
- public com.google.protobuf.ByteString getName() {
- return name_;
- }
-
- // optional int32 stores = 2;
- public static final int STORES_FIELD_NUMBER = 2;
- private int stores_;
- /**
- * <code>optional int32 stores = 2;</code>
- */
- public boolean hasStores() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional int32 stores = 2;</code>
- */
- public int getStores() {
- return stores_;
- }
-
- // optional int32 storefiles = 3;
- public static final int STOREFILES_FIELD_NUMBER = 3;
- private int storefiles_;
- /**
- * <code>optional int32 storefiles = 3;</code>
- */
- public boolean hasStorefiles() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional int32 storefiles = 3;</code>
- */
- public int getStorefiles() {
- return storefiles_;
- }
-
- // optional int32 storefileSizeMB = 4;
- public static final int STOREFILESIZEMB_FIELD_NUMBER = 4;
- private int storefileSizeMB_;
- /**
- * <code>optional int32 storefileSizeMB = 4;</code>
- */
- public boolean hasStorefileSizeMB() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional int32 storefileSizeMB = 4;</code>
- */
- public int getStorefileSizeMB() {
- return storefileSizeMB_;
- }
-
- // optional int32 memstoreSizeMB = 5;
- public static final int MEMSTORESIZEMB_FIELD_NUMBER = 5;
- private int memstoreSizeMB_;
- /**
- * <code>optional int32 memstoreSizeMB = 5;</code>
- */
- public boolean hasMemstoreSizeMB() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional int32 memstoreSizeMB = 5;</code>
- */
- public int getMemstoreSizeMB() {
- return memstoreSizeMB_;
- }
-
- // optional int32 storefileIndexSizeMB = 6;
- public static final int STOREFILEINDEXSIZEMB_FIELD_NUMBER = 6;
- private int storefileIndexSizeMB_;
- /**
- * <code>optional int32 storefileIndexSizeMB = 6;</code>
- */
- public boolean hasStorefileIndexSizeMB() {
- return ((bitField0_ & 0x00000020) == 0x00000020);
- }
- /**
- * <code>optional int32 storefileIndexSizeMB = 6;</code>
- */
- public int getStorefileIndexSizeMB() {
- return storefileIndexSizeMB_;
- }
-
- // optional int64 readRequestsCount = 7;
- public static final int READREQUESTSCOUNT_FIELD_NUMBER = 7;
- private long readRequestsCount_;
- /**
- * <code>optional int64 readRequestsCount = 7;</code>
- */
- public boolean hasReadRequestsCount() {
- return ((bitField0_ & 0x00000040) == 0x00000040);
- }
- /**
- * <code>optional int64 readRequestsCount = 7;</code>
- */
- public long getReadRequestsCount() {
- return readRequestsCount_;
- }
-
- // optional int64 writeRequestsCount = 8;
- public static final int WRITEREQUESTSCOUNT_FIELD_NUMBER = 8;
- private long writeRequestsCount_;
- /**
- * <code>optional int64 writeRequestsCount = 8;</code>
- */
- public boolean hasWriteRequestsCount() {
- return ((bitField0_ & 0x00000080) == 0x00000080);
- }
- /**
- * <code>optional int64 writeRequestsCount = 8;</code>
- */
- public long getWriteRequestsCount() {
- return writeRequestsCount_;
- }
-
- // optional int32 rootIndexSizeKB = 9;
- public static final int ROOTINDEXSIZEKB_FIELD_NUMBER = 9;
- private int rootIndexSizeKB_;
- /**
- * <code>optional int32 rootIndexSizeKB = 9;</code>
- */
- public boolean hasRootIndexSizeKB() {
- return ((bitField0_ & 0x00000100) == 0x00000100);
- }
- /**
- * <code>optional int32 rootIndexSizeKB = 9;</code>
- */
- public int getRootIndexSizeKB() {
- return rootIndexSizeKB_;
- }
-
- // optional int32 totalStaticIndexSizeKB = 10;
- public static final int TOTALSTATICINDEXSIZEKB_FIELD_NUMBER = 10;
- private int totalStaticIndexSizeKB_;
- /**
- * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
- */
- public boolean hasTotalStaticIndexSizeKB() {
- return ((bitField0_ & 0x00000200) == 0x00000200);
- }
- /**
- * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
- */
- public int getTotalStaticIndexSizeKB() {
- return totalStaticIndexSizeKB_;
- }
-
- // optional int32 totalStaticBloomSizeKB = 11;
- public static final int TOTALSTATICBLOOMSIZEKB_FIELD_NUMBER = 11;
- private int totalStaticBloomSizeKB_;
- /**
- * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
- */
- public boolean hasTotalStaticBloomSizeKB() {
- return ((bitField0_ & 0x00000400) == 0x00000400);
- }
- /**
- * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
- */
- public int getTotalStaticBloomSizeKB() {
- return totalStaticBloomSizeKB_;
- }
-
- // optional int64 totalCompactingKVs = 12;
- public static final int TOTALCOMPACTINGKVS_FIELD_NUMBER = 12;
- private long totalCompactingKVs_;
- /**
- * <code>optional int64 totalCompactingKVs = 12;</code>
- */
- public boolean hasTotalCompactingKVs() {
- return ((bitField0_ & 0x00000800) == 0x00000800);
- }
- /**
- * <code>optional int64 totalCompactingKVs = 12;</code>
- */
- public long getTotalCompactingKVs() {
- return totalCompactingKVs_;
- }
-
- // optional int64 currentCompactedKVs = 13;
- public static final int CURRENTCOMPACTEDKVS_FIELD_NUMBER = 13;
- private long currentCompactedKVs_;
- /**
- * <code>optional int64 currentCompactedKVs = 13;</code>
- */
- public boolean hasCurrentCompactedKVs() {
- return ((bitField0_ & 0x00001000) == 0x00001000);
- }
- /**
- * <code>optional int64 currentCompactedKVs = 13;</code>
- */
- public long getCurrentCompactedKVs() {
- return currentCompactedKVs_;
- }
-
- private void initFields() {
- name_ = com.google.protobuf.ByteString.EMPTY;
- stores_ = 0;
- storefiles_ = 0;
- storefileSizeMB_ = 0;
- memstoreSizeMB_ = 0;
- storefileIndexSizeMB_ = 0;
- readRequestsCount_ = 0L;
- writeRequestsCount_ = 0L;
- rootIndexSizeKB_ = 0;
- totalStaticIndexSizeKB_ = 0;
- totalStaticBloomSizeKB_ = 0;
- totalCompactingKVs_ = 0L;
- currentCompactedKVs_ = 0L;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasName()) {
- memoizedIsInitialized = 0;
- return false;
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, name_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeInt32(2, stores_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeInt32(3, storefiles_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeInt32(4, storefileSizeMB_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- output.writeInt32(5, memstoreSizeMB_);
- }
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
- output.writeInt32(6, storefileIndexSizeMB_);
- }
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
- output.writeInt64(7, readRequestsCount_);
- }
- if (((bitField0_ & 0x00000080) == 0x00000080)) {
- output.writeInt64(8, writeRequestsCount_);
- }
- if (((bitField0_ & 0x00000100) == 0x00000100)) {
- output.writeInt32(9, rootIndexSizeKB_);
- }
- if (((bitField0_ & 0x00000200) == 0x00000200)) {
- output.writeInt32(10, totalStaticIndexSizeKB_);
- }
- if (((bitField0_ & 0x00000400) == 0x00000400)) {
- output.writeInt32(11, totalStaticBloomSizeKB_);
- }
- if (((bitField0_ & 0x00000800) == 0x00000800)) {
- output.writeInt64(12, totalCompactingKVs_);
- }
- if (((bitField0_ & 0x00001000) == 0x00001000)) {
- output.writeInt64(13, currentCompactedKVs_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, name_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(2, stores_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(3, storefiles_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(4, storefileSizeMB_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(5, memstoreSizeMB_);
- }
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(6, storefileIndexSizeMB_);
- }
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(7, readRequestsCount_);
- }
- if (((bitField0_ & 0x00000080) == 0x00000080)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(8, writeRequestsCount_);
- }
- if (((bitField0_ & 0x00000100) == 0x00000100)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(9, rootIndexSizeKB_);
- }
- if (((bitField0_ & 0x00000200) == 0x00000200)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(10, totalStaticIndexSizeKB_);
- }
- if (((bitField0_ & 0x00000400) == 0x00000400)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(11, totalStaticBloomSizeKB_);
- }
- if (((bitField0_ & 0x00000800) == 0x00000800)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(12, totalCompactingKVs_);
- }
- if (((bitField0_ & 0x00001000) == 0x00001000)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(13, currentCompactedKVs_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = com.google.protobuf.ByteString.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000001);
- stores_ = 0;
- bitField0_ = (bitField0_ & ~0x00000002);
- storefiles_ = 0;
- bitField0_ = (bitField0_ & ~0x00000004);
- storefileSizeMB_ = 0;
- bitField0_ = (bitField0_ & ~0x00000008);
- memstoreSizeMB_ = 0;
- bitField0_ = (bitField0_ & ~0x00000010);
- storefileIndexSizeMB_ = 0;
- bitField0_ = (bitField0_ & ~0x00000020);
- readRequestsCount_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000040);
- writeRequestsCount_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000080);
- rootIndexSizeKB_ = 0;
- bitField0_ = (bitField0_ & ~0x00000100);
- totalStaticIndexSizeKB_ = 0;
- bitField0_ = (bitField0_ & ~0x00000200);
- totalStaticBloomSizeKB_ = 0;
- bitField0_ = (bitField0_ & ~0x00000400);
- totalCompactingKVs_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000800);
- currentCompactedKVs_ = 0L;
- bitField0_ = (bitField0_ & ~0x00001000);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.stores_ = stores_;
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000004;
- }
- result.storefiles_ = storefiles_;
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000008;
- }
- result.storefileSizeMB_ = storefileSizeMB_;
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
- to_bitField0_ |= 0x00000010;
- }
- result.memstoreSizeMB_ = memstoreSizeMB_;
- if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
- to_bitField0_ |= 0x00000020;
- }
- result.storefileIndexSizeMB_ = storefileIndexSizeMB_;
- if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
- to_bitField0_ |= 0x00000040;
- }
- result.readRequestsCount_ = readRequestsCount_;
- if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
- to_bitField0_ |= 0x00000080;
- }
- result.writeRequestsCount_ = writeRequestsCount_;
- if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
- to_bitField0_ |= 0x00000100;
- }
- result.rootIndexSizeKB_ = rootIndexSizeKB_;
- if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
- to_bitField0_ |= 0x00000200;
- }
- result.totalStaticIndexSizeKB_ = totalStaticIndexSizeKB_;
- if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
- to_bitField0_ |= 0x00000400;
- }
- result.totalStaticBloomSizeKB_ = totalStaticBloomSizeKB_;
- if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
- to_bitField0_ |= 0x00000800;
- }
- result.totalCompactingKVs_ = totalCompactingKVs_;
- if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
- to_bitField0_ |= 0x00001000;
- }
- result.currentCompactedKVs_ = currentCompactedKVs_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance()) return this;
- if (other.hasName()) {
- setName(other.getName());
- }
- if (other.hasStores()) {
- setStores(other.getStores());
- }
- if (other.hasStorefiles()) {
- setStorefiles(other.getStorefiles());
- }
- if (other.hasStorefileSizeMB()) {
- setStorefileSizeMB(other.getStorefileSizeMB());
- }
- if (other.hasMemstoreSizeMB()) {
- setMemstoreSizeMB(other.getMemstoreSizeMB());
- }
- if (other.hasStorefileIndexSizeMB()) {
- setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
- }
- if (other.hasReadRequestsCount()) {
- setReadRequestsCount(other.getReadRequestsCount());
- }
- if (other.hasWriteRequestsCount()) {
- setWriteRequestsCount(other.getWriteRequestsCount());
- }
- if (other.hasRootIndexSizeKB()) {
- setRootIndexSizeKB(other.getRootIndexSizeKB());
- }
- if (other.hasTotalStaticIndexSizeKB()) {
- setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB());
- }
- if (other.hasTotalStaticBloomSizeKB()) {
- setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
- }
- if (other.hasTotalCompactingKVs()) {
- setTotalCompactingKVs(other.getTotalCompactingKVs());
- }
- if (other.hasCurrentCompactedKVs()) {
- setCurrentCompactedKVs(other.getCurrentCompactedKVs());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasName()) {
-
- return false;
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // required bytes name = 1;
- private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
- /**
- * <code>required bytes name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required bytes name = 1;</code>
- */
- public com.google.protobuf.ByteString getName() {
- return name_;
- }
- /**
- * <code>required bytes name = 1;</code>
- */
- public Builder setName(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required bytes name = 1;</code>
- */
- public Builder clearName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- name_ = getDefaultInstance().getName();
- onChanged();
- return this;
- }
-
- // optional int32 stores = 2;
- private int stores_ ;
- /**
- * <code>optional int32 stores = 2;</code>
- */
- public boolean hasStores() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional int32 stores = 2;</code>
- */
- public int getStores() {
- return stores_;
- }
- /**
- * <code>optional int32 stores = 2;</code>
- */
- public Builder setStores(int value) {
- bitField0_ |= 0x00000002;
- stores_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 stores = 2;</code>
- */
- public Builder clearStores() {
- bitField0_ = (bitField0_ & ~0x00000002);
- stores_ = 0;
- onChanged();
- return this;
- }
-
- // optional int32 storefiles = 3;
- private int storefiles_ ;
- /**
- * <code>optional int32 storefiles = 3;</code>
- */
- public boolean hasStorefiles() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional int32 storefiles = 3;</code>
- */
- public int getStorefiles() {
- return storefiles_;
- }
- /**
- * <code>optional int32 storefiles = 3;</code>
- */
- public Builder setStorefiles(int value) {
- bitField0_ |= 0x00000004;
- storefiles_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 storefiles = 3;</code>
- */
- public Builder clearStorefiles() {
- bitField0_ = (bitField0_ & ~0x00000004);
- storefiles_ = 0;
- onChanged();
- return this;
- }
-
- // optional int32 storefileSizeMB = 4;
- private int storefileSizeMB_ ;
- /**
- * <code>optional int32 storefileSizeMB = 4;</code>
- */
- public boolean hasStorefileSizeMB() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional int32 storefileSizeMB = 4;</code>
- */
- public int getStorefileSizeMB() {
- return storefileSizeMB_;
- }
- /**
- * <code>optional int32 storefileSizeMB = 4;</code>
- */
- public Builder setStorefileSizeMB(int value) {
- bitField0_ |= 0x00000008;
- storefileSizeMB_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 storefileSizeMB = 4;</code>
- */
- public Builder clearStorefileSizeMB() {
- bitField0_ = (bitField0_ & ~0x00000008);
- storefileSizeMB_ = 0;
- onChanged();
- return this;
- }
-
- // optional int32 memstoreSizeMB = 5;
- private int memstoreSizeMB_ ;
- /**
- * <code>optional int32 memstoreSizeMB = 5;</code>
- */
- public boolean hasMemstoreSizeMB() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional int32 memstoreSizeMB = 5;</code>
- */
- public int getMemstoreSizeMB() {
- return memstoreSizeMB_;
- }
- /**
- * <code>optional int32 memstoreSizeMB = 5;</code>
- */
- public Builder setMemstoreSizeMB(int value) {
- bitField0_ |= 0x00000010;
- memstoreSizeMB_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 memstoreSizeMB = 5;</code>
- */
- public Builder clearMemstoreSizeMB() {
- bitField0_ = (bitField0_ & ~0x00000010);
- memstoreSizeMB_ = 0;
- onChanged();
- return this;
- }
-
- // optional int32 storefileIndexSizeMB = 6;
- private int storefileIndexSizeMB_ ;
- /**
- * <code>optional int32 storefileIndexSizeMB = 6;</code>
- */
- public boolean hasStorefileIndexSizeMB() {
- return ((bitField0_ & 0x00000020) == 0x00000020);
- }
- /**
- * <code>optional int32 storefileIndexSizeMB = 6;</code>
- */
- public int getStorefileIndexSizeMB() {
- return storefileIndexSizeMB_;
- }
- /**
- * <code>optional int32 storefileIndexSizeMB = 6;</code>
- */
- public Builder setStorefileIndexSizeMB(int value) {
- bitField0_ |= 0x00000020;
- storefileIndexSizeMB_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 storefileIndexSizeMB = 6;</code>
- */
- public Builder clearStorefileIndexSizeMB() {
- bitField0_ = (bitField0_ & ~0x00000020);
- storefileIndexSizeMB_ = 0;
- onChanged();
- return this;
- }
-
- // optional int64 readRequestsCount = 7;
- private long readRequestsCount_ ;
- /**
- * <code>optional int64 readRequestsCount = 7;</code>
- */
- public boolean hasReadRequestsCount() {
- return ((bitField0_ & 0x00000040) == 0x00000040);
- }
- /**
- * <code>optional int64 readRequestsCount = 7;</code>
- */
- public long getReadRequestsCount() {
- return readRequestsCount_;
- }
- /**
- * <code>optional int64 readRequestsCount = 7;</code>
- */
- public Builder setReadRequestsCount(long value) {
- bitField0_ |= 0x00000040;
- readRequestsCount_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int64 readRequestsCount = 7;</code>
- */
- public Builder clearReadRequestsCount() {
- bitField0_ = (bitField0_ & ~0x00000040);
- readRequestsCount_ = 0L;
- onChanged();
- return this;
- }
-
- // optional int64 writeRequestsCount = 8;
- private long writeRequestsCount_ ;
- /**
- * <code>optional int64 writeRequestsCount = 8;</code>
- */
- public boolean hasWriteRequestsCount() {
- return ((bitField0_ & 0x00000080) == 0x00000080);
- }
- /**
- * <code>optional int64 writeRequestsCount = 8;</code>
- */
- public long getWriteRequestsCount() {
- return writeRequestsCount_;
- }
- /**
- * <code>optional int64 writeRequestsCount = 8;</code>
- */
- public Builder setWriteRequestsCount(long value) {
- bitField0_ |= 0x00000080;
- writeRequestsCount_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int64 writeRequestsCount = 8;</code>
- */
- public Builder clearWriteRequestsCount() {
- bitField0_ = (bitField0_ & ~0x00000080);
- writeRequestsCount_ = 0L;
- onChanged();
- return this;
- }
-
- // optional int32 rootIndexSizeKB = 9;
- private int rootIndexSizeKB_ ;
- /**
- * <code>optional int32 rootIndexSizeKB = 9;</code>
- */
- public boolean hasRootIndexSizeKB() {
- return ((bitField0_ & 0x00000100) == 0x00000100);
- }
- /**
- * <code>optional int32 rootIndexSizeKB = 9;</code>
- */
- public int getRootIndexSizeKB() {
- return rootIndexSizeKB_;
- }
- /**
- * <code>optional int32 rootIndexSizeKB = 9;</code>
- */
- public Builder setRootIndexSizeKB(int value) {
- bitField0_ |= 0x00000100;
- rootIndexSizeKB_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 rootIndexSizeKB = 9;</code>
- */
- public Builder clearRootIndexSizeKB() {
- bitField0_ = (bitField0_ & ~0x00000100);
- rootIndexSizeKB_ = 0;
- onChanged();
- return this;
- }
-
- // optional int32 totalStaticIndexSizeKB = 10;
- private int totalStaticIndexSizeKB_ ;
- /**
- * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
- */
- public boolean hasTotalStaticIndexSizeKB() {
- return ((bitField0_ & 0x00000200) == 0x00000200);
- }
- /**
- * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
- */
- public int getTotalStaticIndexSizeKB() {
- return totalStaticIndexSizeKB_;
- }
- /**
- * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
- */
- public Builder setTotalStaticIndexSizeKB(int value) {
- bitField0_ |= 0x00000200;
- totalStaticIndexSizeKB_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
- */
- public Builder clearTotalStaticIndexSizeKB() {
- bitField0_ = (bitField0_ & ~0x00000200);
- totalStaticIndexSizeKB_ = 0;
- onChanged();
- return this;
- }
-
- // optional int32 totalStaticBloomSizeKB = 11;
- private int totalStaticBloomSizeKB_ ;
- /**
- * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
- */
- public boolean hasTotalStaticBloomSizeKB() {
- return ((bitField0_ & 0x00000400) == 0x00000400);
- }
- /**
- * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
- */
- public int getTotalStaticBloomSizeKB() {
- return totalStaticBloomSizeKB_;
- }
- /**
- * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
- */
- public Builder setTotalStaticBloomSizeKB(int value) {
- bitField0_ |= 0x00000400;
- totalStaticBloomSizeKB_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
- */
- public Builder clearTotalStaticBloomSizeKB() {
- bitField0_ = (bitField0_ & ~0x00000400);
- totalStaticBloomSizeKB_ = 0;
- onChanged();
- return this;
- }
-
- // optional int64 totalCompactingKVs = 12;
- private long totalCompactingKVs_ ;
- /**
- * <code>optional int64 totalCompactingKVs = 12;</code>
- */
- public boolean hasTotalCompactingKVs() {
- return ((bitField0_ & 0x00000800) == 0x00000800);
- }
- /**
- * <code>optional int64 totalCompactingKVs = 12;</code>
- */
- public long getTotalCompactingKVs() {
- return totalCompactingKVs_;
- }
- /**
- * <code>optional int64 totalCompactingKVs = 12;</code>
- */
- public Builder setTotalCompactingKVs(long value) {
- bitField0_ |= 0x00000800;
- totalCompactingKVs_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int64 totalCompactingKVs = 12;</code>
- */
- public Builder clearTotalCompactingKVs() {
- bitField0_ = (bitField0_ & ~0x00000800);
- totalCompactingKVs_ = 0L;
- onChanged();
- return this;
- }
-
- // optional int64 currentCompactedKVs = 13;
- private long currentCompactedKVs_ ;
- /**
- * <code>optional int64 currentCompactedKVs = 13;</code>
- */
- public boolean hasCurrentCompactedKVs() {
- return ((bitField0_ & 0x00001000) == 0x00001000);
- }
- /**
- * <code>optional int64 currentCompactedKVs = 13;</code>
- */
- public long getCurrentCompactedKVs() {
- return currentCompactedKVs_;
- }
- /**
- * <code>optional int64 currentCompactedKVs = 13;</code>
- */
- public Builder setCurrentCompactedKVs(long value) {
- bitField0_ |= 0x00001000;
- currentCompactedKVs_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int64 currentCompactedKVs = 13;</code>
- */
- public Builder clearCurrentCompactedKVs() {
- bitField0_ = (bitField0_ & ~0x00001000);
- currentCompactedKVs_ = 0L;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
- }
-
- static {
- defaultInstance = new Region(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
- }
-
- public interface NodeOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required string name = 1;
- /**
- * <code>required string name = 1;</code>
- *
- * <pre>
- * name:port
- * </pre>
- */
- boolean hasName();
- /**
- * <code>required string name = 1;</code>
- *
- * <pre>
- * name:port
- * </pre>
- */
- java.lang.String getName();
- /**
- * <code>required string name = 1;</code>
- *
- * <pre>
- * name:port
- * </pre>
- */
- com.google.protobuf.ByteString
- getNameBytes();
-
- // optional int64 startCode = 2;
- /**
- * <code>optional int64 startCode = 2;</code>
- */
- boolean hasStartCode();
- /**
- * <code>optional int64 startCode = 2;</code>
- */
- long getStartCode();
-
- // optional int32 requests = 3;
- /**
- * <code>optional int32 requests = 3;</code>
- */
- boolean hasRequests();
- /**
- * <code>optional int32 requests = 3;</code>
- */
- int getRequests();
-
- // optional int32 heapSizeMB = 4;
- /**
- * <code>optional int32 heapSizeMB = 4;</code>
- */
- boolean hasHeapSizeMB();
- /**
- * <code>optional int32 heapSizeMB = 4;</code>
- */
- int getHeapSizeMB();
-
- // optional int32 maxHeapSizeMB = 5;
- /**
- * <code>optional int32 maxHeapSizeMB = 5;</code>
- */
- boolean hasMaxHeapSizeMB();
- /**
- * <code>optional int32 maxHeapSizeMB = 5;</code>
- */
- int getMaxHeapSizeMB();
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>
- getRegionsList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index);
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- int getRegionsCount();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
- getRegionsOrBuilderList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
- int index);
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node}
- */
- public static final class Node extends
- com.google.protobuf.GeneratedMessage
- implements NodeOrBuilder {
- // Use Node.newBuilder() to construct.
- private Node(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Node(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Node defaultInstance;
- public static Node getDefaultInstance() {
- return defaultInstance;
- }
-
- public Node getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Node(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- name_ = input.readBytes();
- break;
- }
- case 16: {
- bitField0_ |= 0x00000002;
- startCode_ = input.readInt64();
- break;
- }
- case 24: {
- bitField0_ |= 0x00000004;
- requests_ = input.readInt32();
- break;
- }
- case 32: {
- bitField0_ |= 0x00000008;
- heapSizeMB_ = input.readInt32();
- break;
- }
- case 40: {
- bitField0_ |= 0x00000010;
- maxHeapSizeMB_ = input.readInt32();
- break;
- }
- case 50: {
- if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
- regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
- mutable_bitField0_ |= 0x00000020;
- }
- regions_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.PARSER, extensionRegistry));
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
- regions_ = java.util.Collections.unmodifiableList(regions_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Node> PARSER =
- new com.google.protobuf.AbstractParser<Node>() {
- public Node parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Node(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Node> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // required string name = 1;
- public static final int NAME_FIELD_NUMBER = 1;
- private java.lang.Object name_;
- /**
- * <code>required string name = 1;</code>
- *
- * <pre>
- * name:port
- * </pre>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string name = 1;</code>
- *
- * <pre>
- * name:port
- * </pre>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- }
- }
- /**
- * <code>required string name = 1;</code>
- *
- * <pre>
- * name:port
- * </pre>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // optional int64 startCode = 2;
- public static final int STARTCODE_FIELD_NUMBER = 2;
- private long startCode_;
- /**
- * <code>optional int64 startCode = 2;</code>
- */
- public boolean hasStartCode() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional int64 startCode = 2;</code>
- */
- public long getStartCode() {
- return startCode_;
- }
-
- // optional int32 requests = 3;
- public static final int REQUESTS_FIELD_NUMBER = 3;
- private int requests_;
- /**
- * <code>optional int32 requests = 3;</code>
- */
- public boolean hasRequests() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional int32 requests = 3;</code>
- */
- public int getRequests() {
- return requests_;
- }
-
- // optional int32 heapSizeMB = 4;
- public static final int HEAPSIZEMB_FIELD_NUMBER = 4;
- private int heapSizeMB_;
- /**
- * <code>optional int32 heapSizeMB = 4;</code>
- */
- public boolean hasHeapSizeMB() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional int32 heapSizeMB = 4;</code>
- */
- public int getHeapSizeMB() {
- return heapSizeMB_;
- }
-
- // optional int32 maxHeapSizeMB = 5;
- public static final int MAXHEAPSIZEMB_FIELD_NUMBER = 5;
- private int maxHeapSizeMB_;
- /**
- * <code>optional int32 maxHeapSizeMB = 5;</code>
- */
- public boolean hasMaxHeapSizeMB() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional int32 maxHeapSizeMB = 5;</code>
- */
- public int getMaxHeapSizeMB() {
- return maxHeapSizeMB_;
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
- public static final int REGIONS_FIELD_NUMBER = 6;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> regions_;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
- return regions_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
- getRegionsOrBuilderList() {
- return regions_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- public int getRegionsCount() {
- return regions_.size();
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
- return regions_.get(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
- int index) {
- return regions_.get(index);
- }
-
- private void initFields() {
- name_ = "";
- startCode_ = 0L;
- requests_ = 0;
- heapSizeMB_ = 0;
- maxHeapSizeMB_ = 0;
- regions_ = java.util.Collections.emptyList();
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasName()) {
- memoizedIsInitialized = 0;
- return false;
- }
- for (int i = 0; i < getRegionsCount(); i++) {
- if (!getRegions(i).isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeInt64(2, startCode_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeInt32(3, requests_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeInt32(4, heapSizeMB_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- output.writeInt32(5, maxHeapSizeMB_);
- }
- for (int i = 0; i < regions_.size(); i++) {
- output.writeMessage(6, regions_.get(i));
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(2, startCode_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(3, requests_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(4, heapSizeMB_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(5, maxHeapSizeMB_);
- }
- for (int i = 0; i < regions_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(6, regions_.get(i));
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getRegionsFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- startCode_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000002);
- requests_ = 0;
- bitField0_ = (bitField0_ & ~0x00000004);
- heapSizeMB_ = 0;
- bitField0_ = (bitField0_ & ~0x00000008);
- maxHeapSizeMB_ = 0;
- bitField0_ = (bitField0_ & ~0x00000010);
- if (regionsBuilder_ == null) {
- regions_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000020);
- } else {
- regionsBuilder_.clear();
- }
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.startCode_ = startCode_;
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000004;
- }
- result.requests_ = requests_;
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000008;
- }
- result.heapSizeMB_ = heapSizeMB_;
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
- to_bitField0_ |= 0x00000010;
- }
- result.maxHeapSizeMB_ = maxHeapSizeMB_;
- if (regionsBuilder_ == null) {
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
- regions_ = java.util.Collections.unmodifiableList(regions_);
- bitField0_ = (bitField0_ & ~0x00000020);
- }
- result.regions_ = regions_;
- } else {
- result.regions_ = regionsBuilder_.build();
- }
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance()) return this;
- if (other.hasName()) {
- bitField0_ |= 0x00000001;
- name_ = other.name_;
- onChanged();
- }
- if (other.hasStartCode()) {
- setStartCode(other.getStartCode());
- }
- if (other.hasRequests()) {
- setRequests(other.getRequests());
- }
- if (other.hasHeapSizeMB()) {
- setHeapSizeMB(other.getHeapSizeMB());
- }
- if (other.hasMaxHeapSizeMB()) {
- setMaxHeapSizeMB(other.getMaxHeapSizeMB());
- }
- if (regionsBuilder_ == null) {
- if (!other.regions_.isEmpty()) {
- if (regions_.isEmpty()) {
- regions_ = other.regions_;
- bitField0_ = (bitField0_ & ~0x00000020);
- } else {
- ensureRegionsIsMutable();
- regions_.addAll(other.regions_);
- }
- onChanged();
- }
- } else {
- if (!other.regions_.isEmpty()) {
- if (regionsBuilder_.isEmpty()) {
- regionsBuilder_.dispose();
- regionsBuilder_ = null;
- regions_ = other.regions_;
- bitField0_ = (bitField0_ & ~0x00000
<TRUNCATED>
[33/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html
new file mode 100644
index 0000000..c21e129
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html
@@ -0,0 +1,1660 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<head />
+<body bgcolor="white">
+<h1>HBase REST</h1>
+This package provides a RESTful Web service front end for HBase.
+<p>
+
+<h2>Table Of Contents</h2>
+<ol>
+<li><a href="#deployment">Deployment</a></li>
+ <ol type="a">
+ <li><a href="#deployment_daemon">Daemon</a></li>
+ <li><a href="#deployment_servlet">Servlet</a></li>
+ </ol>
+<li><a href="#rest">Representational State Transfer</a></li>
+<li><a href="#identifiers">Resource Identifiers</a></li>
+<li><a href="#operations">Operations</a></li>
+ <ol type="a">
+ <li><a href="#operation_version">Query Software Version</a></li>
+ <li><a href="#operation_storage_cluster_version">Query Storage Cluster Version</a></li>
+ <li><a href="#operation_storage_cluster_status">Query Storage Cluster Status</a></li>
+ <li><a href="#operation_query_tables">Query Table List</a></li>
+ <li><a href="#operation_query_schema">Query Table Schema</a></li>
+ <li><a href="#operation_create_schema">Create Table Or Update Table Schema</a></li>
+ <li><a href="#operation_table_metadata">Query Table Metadata</a></li>
+ <li><a href="#operation_delete_table">Delete Table</a></li>
+ <li><a href="#operation_cell_query_single">Cell Query (Single Value)</a></li>
+ <li><a href="#operation_cell_query_multiple">Cell or Row Query (Multiple Values)</a></li>
+ <li><a href="#operation_cell_store_single">Cell Store (Single)</a></li>
+ <li><a href="#operation_cell_store_multiple">Cell Store (Multiple)</a></li>
+ <li><a href="#operation_delete">Row, Column, or Cell Delete</a></li>
+ <li><a href="#operation_scanner_create">Scanner Creation</a></li>
+ <li><a href="#operation_scanner_next">Scanner Get Next</a></li>
+ <li><a href="#operation_scanner_delete">Scanner Deletion</a></li>
+ <li><a href="#operation_stateless_scanner">Stateless scanner</a></li>
+ </ol>
+ <li><a href="#xmlschema">XML Schema</a></li>
+ <li><a href="#pbufschema">Protobufs Schema</a></li>
+</ol>
+
+<p>
+<a name="deployment">
+<h2>Deployment</h2>
+</a>
+<p>
+
+<p>
+<a name="deployment_daemon">
+<h3>Daemon</h3>
+</a>
+<p>
+HBase REST can run as a daemon which starts an embedded Jetty servlet container
+and deploys the servlet into it.
+<p>
+<ol>
+<li>Start the embedded Jetty servlet container:
+ <ul>
+ <li>In the foreground:
+ <blockquote>
+ <tt>
+ % ./bin/hbase rest start -p <<i>port</i>>
+ </tt>
+ </blockquote>
+ <p>
+ where <<i>port</i>> is optional, and is the port the connector should
+ listen on. (Default is 8080.)
+ </p>
+ </li>
+ </ul>
+</li>
+</ol>
+
+<p>
+<a name="rest">
+<h2>Representational State Transfer</h2>
+</a>
+<p>
+
+The terms "representational state transfer" and "REST" were introduced in 2000
+in the
+<a href="http://www.ics.uci.edu/~fielding/pubs/dissertation/rest_arch_style.htm">
+doctoral dissertation of Roy Fielding</a>, one of the principal authors of the
+Hypertext Transfer Protocol (HTTP) specification.
+<p>
+A GET to an identifier requests a copy of the information in the supplied
+content type.
+<p>
+A PUT to an identifier replaces the information. The supplied content type
+determines how it is to be interpreted.
+<p>
+POST adds information.
+<p>
+DELETE eliminates information.
+<p>
+<center>
+<table width="90%">
+<tr><td><b>Database Operations</b></td>
+ <td><b>REST/HTTP Equivalents</b></td>
+ </tr>
+<tr><td colspan="2"> </td></tr>
+<tr><td>CREATE</td><td>PUT</td></tr>
+<tr><td>READ</td><td>GET</td></tr>
+<tr><td>UPDATE</td><td>POST (update) or PUT (replace)</td></tr>
+<tr><td>DELETE</td><td>DELETE</td></tr>
+</table>
+</center>
+
+<p>
+<a name="identifiers">
+<h2>Resource Identifiers</h2>
+</a>
+<p>
+<a href="http://www.rfc-editor.org/rfc/rfc3968.txt">RFC 3968</a> defines URL
+syntax:
+<p>
+<pre>
+scheme://user:pass@example.net:8080/path/to/file;type=foo?name=val#frag
+\_____/ \_______/\___________/\__/\______/\____/\______/\________/\___/
+ | | | | | | | | |
+ scheme userinfo hostname port path filename param query fragment
+ \________________________/
+ authority
+</pre>
+<p>
+HBase REST exposes HBase tables, rows, cells, and metadata as URL specified
+resources.
+<p>
+<b>NOTE:</b> The characters <tt>/</tt>, <tt>:</tt>, and <tt>,</tt> are reserved
+within row keys, column names, and column qualifiers. Clients must escape them
+somehow, perhaps by encoding them as hex escapes or by using www-url-encoding. For
+example, the key:
+<p>
+<pre>
+ http://www.google.com/
+</pre>
+<p>
+should first be encoded as:
+<p>
+<pre>
+ http%3A%2F%2Fwww.google.com%2F
+</pre>
+<p>
+to produce a path like:
+<pre>
+ /SomeTable/http%3A%2F%2Fwww.google.com%2F/someColumn:qualifier
+</pre>
+<p>
+<h3>Addressing for cell or row query (GET)</h3>
+<p>
+<pre>
+ path := '/' <table>
+ '/' <row>
+ ( '/' ( <column> ( ':' <qualifier> )?
+ ( ',' <column> ( ':' <qualifier> )? )+ )?
+ ( '/' ( <start-timestamp> ',' )? <end-timestamp> )? )?
+ query := ( '?' 'v' '=' <num-versions> )?
+</pre>
+<p>
+
+<h3>Addressing for single value store (PUT)</h3>
+<p>
+Address with table, row, column (and optional qualifier), and optional timestamp.
+<p>
+<pre>
+ path := '/' <table> '/' <row> '/' <column> ( ':' <qualifier> )?
+ ( '/' <timestamp> )?
+</pre>
+<p>
+
+<h3>Addressing for multiple (batched) value store (PUT)</h3>
+<p>
+<pre>
+ path := '/' <table> '/' <false-row-key>
+</pre>
+<p>
+
+<h3>Addressing for row, column, or cell DELETE</h3>
+<p>
+<pre>
+ path := '/' <table>
+ '/' <row>
+ ( '/' <column> ( ':' <qualifier> )?
+ ( '/' <timestamp> )? )?
+</pre>
+<p>
+
+<h3>Addressing for table creation or schema update (PUT or POST), schema query
+(GET), or delete (DELETE)</h3>
+<p>
+<pre>
+ path := '/' <table> / 'schema'
+</pre>
+<p>
+
+<h3>Addressing for scanner creation (POST)</h3>
+<p>
+<pre>
+ path := '/' <table> '/' 'scanner'
+</pre>
+<p>
+
+<h3>Addressing for scanner next item (GET)</h3>
+<p>
+<pre>
+ path := '/' <table> '/' 'scanner' '/' <scanner-id>
+</pre>
+<p>
+
+<h3>Addressing for scanner deletion (DELETE)</h3>
+<p>
+<pre>
+ path := '/' <table> '/' '%scanner' '/' <scanner-id>
+</pre>
+<p>
+
+<p>
+<a name="operations">
+<h2>Operations</h2>
+</a>
+<p>
+
+<a name="operation_version">
+<h3>Query Software Version</h3>
+</a>
+<p>
+<pre>
+GET /version
+</pre>
+<p>
+Returns the software version.
+Set Accept header to <tt>text/plain</tt> for plain text output.
+Set Accept header to <tt>text/xml</tt> for XML reply.
+Set Accept header to <tt>application/json</tt> for JSON reply.
+Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
+<p>
+If not successful, returns appropriate HTTP error status code.
+If successful, returns the software version.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl http://localhost:8000/version<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 149<br>
+Cache-Control: no-cache<br>
+Content-Type: text/plain<br>
+<br>
+Stargate 0.0.1 [JVM: Sun Microsystems Inc. 1.6.0_13-11.3-b02] [OS: Linux 2.6.<br>
+18-128.1.6.el5.centos.plusxen amd64] [Jetty: 6.1.14] [Jersey: 1.1.0-ea]<br>
+<br>
+% curl -H "Accept: text/xml" http://localhost:8000/version<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: text/xml<br>
+Content-Length: 212<br>
+<br>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
+<Version Stargate="0.0.1" OS="Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"<br>
+ JVM="Sun Microsystems Inc. 1.6.0_13-11.3-b02" Jetty="6.1.14" Jersey="1.1.0-e<br>
+a"/><br>
+<br>
+% curl -H "Accept: application/json" http://localhost:8000/version<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: application/json<br>
+Transfer-Encoding: chunked<br>
+<br>
+{"@Stargate":"0.0.1","@OS":"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64","@<br>
+JVM":"Sun Microsystems Inc. 1.6.0_13-11.3-b02","@Jetty":"6.1.14","@Jersey":"1<br>
+.1.0-ea"}<br>
+<br>
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/version<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 113<br>
+Cache-Control: no-cache<br>
+Content-Type: application/x-protobuf<br>
+<br>
+000000 0a 05 30 2e 30 2e 31 12 27 53 75 6e 20 4d 69 63<br>
+000010 72 6f 73 79 73 74 65 6d 73 20 49 6e 63 2e 20 31<br>
+000020 2e 36 2e 30 5f 31 33 2d 31 31 2e 33 2d 62 30 32<br>
+000030 1a 2d 4c 69 6e 75 78 20 32 2e 36 2e 31 38 2d 31<br>
+000040 32 38 2e 31 2e 36 2e 65 6c 35 2e 63 65 6e 74 6f<br>
+000050 73 2e 70 6c 75 73 78 65 6e 20 61 6d 64 36 34 22<br>
+000060 06 36 2e 31 2e 31 34 2a 08 31 2e 31 2e 30 2d 65<br>
+000070 61<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_storage_cluster_version">
+<h3>Query Storage Cluster Version</h3>
+</a>
+<p>
+<pre>
+GET /version/cluster
+</pre>
+<p>
+Returns version information regarding the HBase cluster backing the Stargate instance.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl http://localhost:8000/version/cluster<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 6<br>
+Cache-Control: no-cache<br>
+Content-Type: text/plain<br>
+<br>
+0.20.0<br>
+<br>
+% curl -H "Accept: text/xml" http://localhost:8000/version/cluster<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: text/xml<br>
+Content-Length: 94<br>
+<br>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
+<ClusterVersion>0.20.0</ClusterVersion><br>
+<br>
+% curl -H "Accept: application/json" http://localhost:8000/version/cluster<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: application/json<br>
+Transfer-Encoding: chunked<br>
+<br>
+"0.20.0"<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_storage_cluster_status">
+<h3>Query Storage Cluster Status</h3>
+</a>
+<p>
+<pre>
+GET /status/cluster
+</pre>
+<p>
+Returns detailed status on the HBase cluster backing the Stargate instance.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl http://localhost:8000/status/cluster<br>
+</tt>
+<pre>
+HTTP/1.1 200 OK
+Content-Length: 839
+Cache-Control: no-cache
+Content-Type: text/plain
+
+1 live servers, 0 dead servers, 13.0000 average load
+
+1 live servers
+ test:37154 1244960965781
+ requests=1, regions=13
+
+ urls,http|www.legacy.com|80|site=Legacy|aamsz=300x250||position=1|prod
+ =1,1244851990859
+ urls,http|weather.boston.com|80|LYNX.js,1244851990859
+ hbase:meta,,1
+ content,601292a839b95e50200d8f8767859864,1244869158156
+ content,9d7f3aeb2a5c1e2b45d690a91de3f23c,1244879698031
+ content,7f6d48830ef51d635e9a5b672e79a083,1244879698031
+ content,3ef16d776603bf9b9e775c9ceb64860f,1244869158156
+ urls,,1244851989250
+ urls,http|groups.google.com|80|groups|img|card_left.gif,1244851989250
+ content,deafed2f90f718d72caaf87bd6c27d04,1244870320343
+ content,bcf91ecf78ea72a33faccfb8e6b5d900,1244870320343
+ -ROOT-,,0
+ content,,1244851999187
+</pre>
+<tt>
+% curl -H "Accept: text/xml" http://localhost:8000/status/cluster<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: text/xml<br>
+Content-Length: 1301<br>
+<br>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
+<ClusterStatus requests="1" regions="13" averageLoad="13.0"><DeadNodes/><LiveN<br>
+odes><Node startCode="1244960965781" requests="1" name="test:37154"><Region na<br>
+me="dXJscyxodHRwfHd3dy5sZWdhY3kuY29tfDgwfHNpdGU9TGVnYWN5fGFhbXN6PTMwMHgyNTB8YX<br>
+JlYT1DSlDQaElDQUdPVFJJQlVORS4yMXx6b25lPUhvbWV8cG9zaXRpb249MXxwcm9kPTEsMTI0NDg1<br>
+MTk5MDg1OQ=="/><Region name="dXJscyxodHRwfHdlYXRoZXIuYm9zdG9uLmNvbXw4MHxMWU5YL<br>
+mpzLDEyNDQ4NTE5OTA4NTk="/><Region name="Lk1FVEEuLCwx"/><Region name="Y29udGVud<br>
+Cw2MDEyOTJhODM5Yjk1ZTUwMjAwZDhmODc2Nzg1OTg2NCwxMjQ0ODY5MTU4MTU2"/><Region name<br>
+="Y29udGVudCw5ZDdmM2FlYjJhNWMxZTJiNDVkNjkwYTkxZGUzZjIzYywxMjQ0ODc5Njk4MDMx"/><<br>
+Region name="Y29udGVudCw3ZjZkNDg4MzBlZjUxZDYzNWU5YTViNjcyZTc5YTA4MywxMjQ0ODc5N<br>
+jk4MDMx"/><Region name="Y29udGVudCwzZWYxNmQ3NzY2MDNiZjliOWU3NzVjOWNlYjY0ODYwZi<br>
+wxMjQ0ODY5MTU4MTU2"/><Region name="dXJscywsMTI0NDg1MTk4OTI1MA=="/><Region name<br>
+="dXJscyxodHRwfGdyb3Vwcy5nb29nbGUuY29tfDgwfGdyb3Vwc3xpbWd8Y2FyZF9sZWZ0LmdpZiwx<br>
+MjQ0ODUxOTg5MjUw"/><Region name="Y29udGVudCxkZWFmZWQyZjkwZjcxOGQ3MmNhYWY4N2JkN<br>
+mMyN2QwNCwxMjQ0ODcwMzIwMzQz"/><Region name="Y29udGVudCxiY2Y5MWVjZjc4ZWE3MmEzM2<br>
+ZhY2NmYjhlNmI1ZDkwMCwxMjQ0ODcwMzIwMzQz"/><Region name="LVJPT1QtLCww"/><Region<br>
+name="Y29udGVudCwsMTI0NDg1MTk5OTE4Nw=="/></Node></LiveNodes></ClusterStatus><br>
+<br>
+% curl -H "Accept: application/json" http://localhost:8000/status/cluster<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: application/json<br>
+Transfer-Encoding: chunked<br>
+<br>
+{"@requests":"1","@regions":"13","@averageLoad":"13.0","DeadNodes":[],"LiveNod<br>
+es":{"Node":{"@startCode":"1244960965781","@requests":"1","@name":"test:37154"<br>
+,"Region":[{"@name":"dXJscyxodHRwfHd3dLmpzy5sZWdhY3kuY29tfDgwfHNpdGU9TGVnYWN5f<br>
+GFhbXN6PTMwMHgyNTB8YXJlYT1DSElDQUdPVFJJQlVORS4yMXx6b25lPUhvbWV8cG9zaXRpb249MXx<br>
+wcm9kPTEsMTI0NDg1MTk5MDg1OQ=="},{"@name":"dXJscyxodHRwfHdlYXRoZXIuYm9zdG9uLmNv<br>
+bXw4MHxMWU5YLmpzLDEyNDQ4NTE5OTA4NTk="},{"@name":"Lk1FVEEuLCwx"},{"@name":"Y29u<br>
+dGVudCw2MDEyOTJhODM5Yjk1ZTUwMjAwZDhmODc2Nzg1OTg2NCwxMjQ0ODY5MTU4MTU2"},{"@name<br>
+":"Y29udGVudCw5ZDdmM2FlYjJhNWMxZTJiNDVkNjkwYTkxZGUzZjIzYywxMjQ0ODc5Njk4MDMx"},<br>
+{"@name":"Y29udGVudCw3ZjZkNDg4MzBlZjUxZDYzNWU5YTViNjcyZTc5YTA4MywxMjQ0ODc5Njk4<br>
+MDMx"},{"@name":"Y29udGVudCwzZWYxNmQ3NzY2MDNiZjliOWU3NzVjOWNlYjY0ODYwZiwxMjQ0O<br>
+DY5MTU4MTU2"},{"@name":"dXJscywsMTI0NDg1MTk4OTI1MA=="},{"@name":"dXJscyxodHRwf<br>
+Gdyb3Vwcy5nb29nbGUuY29tfDgwfGdyb3Vwc3xpbWd8Y2FyZF9sZWZ0LmdpZiwxMjQ0ODUxOTg5MjU<br>
+w"},{"@name":"Y29udGVudCxkZWFmZWQyZjkwZjcxOGQ3MmNhYWY4N2JkNmMyN2QwNCwxMjQ0ODcw<br>
+MzIwMzQz"},{"@name":"Y29udGVudCxiY2Y5MWVjZjc4ZWE3MmEzM2ZhY2NmYjhlNmI1ZDkwMCwxM<br>
+jQ0ODcwMzIwMzQz"},{"@name":"LVJPT1QtLCww"},{"@name":"Y29udGVudCwsMTI0NDg1MTk5O<br>
+TE4Nw=="}]}}}<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_query_tables">
+<h3>Query Table List</h3>
+</a>
+<p>
+<pre>
+GET /
+</pre>
+<p>
+Retrieves the list of available tables.
+Set Accept header to <tt>text/plain</tt> for plain text output.
+Set Accept header to <tt>text/xml</tt> for XML reply.
+Set Accept header to <tt>application/json</tt> for JSON reply.
+Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns the table list in the requested encoding.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl http://localhost:8000/<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 13<br>
+Cache-Control: no-cache<br>
+Content-Type: text/plain<br>
+<br>
+content<br>
+urls<br>
+<br>
+% curl -H "Accept: text/xml" http://localhost:8000/<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: text/xml<br>
+Content-Length: 121<br>
+<br>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
+<TableList><table name="content"/><table name="urls"/></TableList><br>
+<br>
+% curl -H "Accept: application/json" http://localhost:8000/<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: application/json<br>
+Transfer-Encoding: chunked<br>
+<br>
+{"table":[{"name":"content"},{"name":"urls"}]}<br>
+<br>
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 15<br>
+Cache-Control: no-cache<br>
+Content-Type: application/x-protobuf<br>
+<br>
+000000 0a 07 63 6f 6e 74 65 6e 74 0a 04 75 72 6c 73<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_query_schema">
+<h3>Query Table Schema</h3>
+</a>
+<p>
+<pre>
+GET /<table>/schema
+</pre>
+<p>
+Retrieves table schema.
+Set Accept header to <tt>text/plain</tt> for plain text output.
+Set Accept header to <tt>text/xml</tt> for XML reply.
+Set Accept header to <tt>application/json</tt> for JSON reply.
+Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns the table schema in the requested encoding.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl http://localhost:8000/content/schema<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 639<br>
+Cache-Control: no-cache<br>
+Content-Type: text/plain<br>
+<br>
+{ NAME=> 'content', IS_META => 'false', IS_ROOT => 'false', COLUMNS => [ { NA<br>
+ME => 'content', BLOCKSIZE => '65536', BLOOMFILTER => 'false', BLOCKCACHE => <br>
+'false', COMPRESSION => 'GZ', LENGTH => '2147483647', VERSIONS => '1', TTL =><br>
+'-1', IN_MEMORY => 'false' }, { NAME => 'info', BLOCKSIZE => '65536', BLOOMFI<br>
+LTER => 'false', BLOCKCACHE => 'false', COMPRESSION => 'NONE', LENGTH => '214<br>
+7483647', VERSIONS => '1', TTL => '-1', IN_MEMORY => 'false' }, { NAME => 'ur<br>
+l', BLOCKSIZE => '65536', BLOOMFILTER => 'false', BLOCKCACHE => 'false', COMP<br>
+RESSION => 'NONE', LENGTH => '2147483647', VERSIONS => '1', TTL => '-1', IN_<br>
+MEMORY => 'false' } ] }<br>
+<br>
+% curl -H "Accept: text/xml" http://localhost:8000/content/schema<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: text/xml<br>
+Content-Length: 618<br>
+<br>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
+<TableSchema name="content" IS_META="false" IS_ROOT="false"><ColumnSchema nam<br>
+e="content" BLOCKSIZE="65536" BLOOMFILTER="false" BLOCKCACHE="false" COMPRESS<br>
+ION="GZ" LENGTH="2147483647" VERSIONS="1" TTL="-1" IN_MEMORY="false"/><Column<br>
+Schema name="info" BLOCKSIZE="65536" BLOOMFILTER="false" BLOCKCACHE="false" C<br>
+OMPRESSION="NONE" LENGTH="2147483647" VERSIONS="1" TTL="-1" IN_MEMORY="false"<br>
+/><ColumnSchema name="url" BLOCKSIZE="65536" BLOOMFILTER="false"BLOCKCACHE="f<br>
+alse" COMPRESSION="NONE" LENGTH="2147483647" VERSIONS="1" TTL="-1" IN_MEMORY=<br>
+"false"/></TableSchema><br>
+<br>
+% curl -H "Accept: application/json" http://localhost:8000/content/schema<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: application/json<br>
+Transfer-Encoding: chunked<br>
+<br>
+{"@name":"content","@IS_META":"false","@IS_ROOT":"false","ColumnSchema":[{"@n<br>
+ame":"content","@BLOCKSIZE":"65536","@BLOOMFILTER":"false","@BLOCKCACHE":"fal<br>
+se","@COMPRESSION":"GZ","@LENGTH":"2147483647","@VERSIONS":"1","@TTL":"-1","@<br>
+IN_MEMORY":"false"},{"@name":"info","@BLOCKSIZE":"65536","@BLOOMFILTER":"fals<br>
+e","@BLOCKCACHE":"false","@COMPRESSION":"NONE","@LENGTH":"2147483647","@VERSI<br>
+ONS":"1","@TTL":"-1","@IN_MEMORY":"false"},{"@name":"url","@BLOCKSIZE":"65536<br>
+","@BLOOMFILTER":"false","@BLOCKCACHE":"false","@COMPRESSION":"NONE","@LENGTH<br>
+":"2147483647","@VERSIONS":"1","@TTL":"-1","@IN_MEMORY":"false"}]}<br>
+<br>
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/schema<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 563<br>
+Cache-Control: no-cache<br>
+Content-Type: application/x-protobuf<br>
+<br>
+000000 0a 07 63 6f 6e 74 65 6e 74 12 10 0a 07 49 53 5f<br>
+000010 4d 45 54 41 12 05 66 61 6c 73 65 12 10 0a 07 49<br>
+000020 53 5f 52 4f 4f 54 12 05 66 61 6c 73 65 1a a7 01<br>
+000030 12 12 0a 09 42 4c 4f 43 4b 53 49 5a 45 12 05 36<br>
+[...]<br>
+000230 4f 4e 45<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_create_schema">
+<h3>Create Table Or Update Table Schema</h3>
+</a>
+<p>
+<pre>
+PUT /<table>/schema
+
+POST /<table>/schema
+</pre>
+<p>
+Uploads table schema.
+PUT or POST creates table as necessary.
+PUT fully replaces schema.
+POST modifies schema (add or modify column family).
+Supply the full table schema for PUT or a well formed schema fragment for POST
+in the desired encoding.
+Set Content-Type header to <tt>text/xml</tt> if the desired encoding is XML.
+Set Content-Type header to <tt>application/json</tt> if the desired encoding
+is JSON.
+Set Content-Type header to <tt>application/x-protobuf</tt> if the desired
+encoding is protobufs.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 200 status.
+<p>
+
+<a name="operation_table_metadata">
+<h3>Query Table Metadata</h3>
+</a>
+<p>
+<pre>
+GET /<table>/regions
+</pre>
+<p>
+Retrieves table region metadata.
+Set Accept header to <tt>text/plain</tt> for plain text output.
+Set Accept header to <tt>text/xml</tt> for XML reply.
+Set Accept header to <tt>application/json</tt> for JSON reply.
+Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns the table region metadata in the requested encoding.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl -H "Accept: text/xml" http://localhost:8000/content/regions<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: text/xml<br>
+Content-Length: 1555<br>
+<br>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
+<TableInfo name="content"><Region location="test:51025" endKey="M2VmMTZkNzc2Nj<br>
+AzYmY5YjllNzc1YzljZWI2NDg2MGY=" startKey="" id="1244851999187" name="content,,<br>
+1244851999187"/><Region location="test:51025" endKey="NjAxMjkyYTgzOWI5NWU1MDIw<br>
+MGQ4Zjg3Njc4NTk4NjQ=" startKey="M2VmMTZkNzc2NjAzYmY5YjllNzc1YzljZWI2NDg2MGY=" <br>
+id="1244869158156" name="content,3ef16d776603bf9b9e775c9ceb64860f,124486915815<br>
+6"/><Region location="test:51025" endKey="N2Y2ZDQ4ODMwZWY1MWQ2MzVlOWE1YjY3MmU3<br>
+OWEwODM=" startKey="NjAxMjkyYTgzOWI5NWU1MDIwMGQ4Zjg3Njc4NTk4NjQ=" id="12448691<br>
+58156" name="content,601292a839b95e50200d8f8767859864,1244869158156"/><Region<br>
+location="test:51025" endKey="OWQ3ZjNhZWIyYTVjMWUyYjQ1ZDY5MGE5MWRlM2YyM2M=" st<br>
+artKey="N2Y2ZDQ4ODMwZWY1MWQ2MzVlOWE1YjY3MmU3OWEwODM=" id="1244879698031" name=<br>
+"content,7f6d48830ef51d635e9a5b672e79a083,1244879698031"/><Region location="te<br>
+st:51025" endKey="YmNmOTFlY2Y3OGVhNzJhMzNmYWNjZmI4ZTZiNWQ5MDA=" startKey="OWQ3<br>
+ZjNhZWIyYTVjMWUyYjQ1ZDY5MGE5MWRlM2YyM2M=" id="1244879698031" name="content,9d7<br>
+f3aeb2a5c1e2b45d690a91de3f23c,1244879698031"/><Region location="test:51025" en<br>
+dKey="ZGVhZmVkMmY5MGY3MThkNzJjYWFmODdiZDZjMjdkMDQ=" startKey="YmNmOTFlY2Y3OGVh<br>
+NzJhMzNmYWNjZmI4ZTZiNWQ5MDA=" id="1244870320343" name="content,bcf91ecf78ea72a<br>
+33faccfb8e6b5d900,1244870320343"/><Region location="test:51025" endKey="" star<br>
+tKey="ZGVhZmVkMmY5MGY3MThkNzJjYWFmODdiZDZjMjdkMDQ=" id="1244870320343" name="c<br>
+ontent,deafed2f90f718d72caaf87bd6c27d04,1244870320343"/></TableInfo><br>
+<br>
+% curl -H "Accept: application/json" http://localhost:8000/content/regions<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: application/json<br>
+Transfer-Encoding: chunked<br>
+<br>
+{"@name":"content","Region":[{"@location":"test:51025","@endKey":"M2VmMTZkNzc2<br>
+NjAzYmY5YjllNzc1YzljZWI2NDg2MGY=","@startKey":"","@id":"1244851999187","@name"<br>
+:"content,,1244851999187"},{"@location":"test:51025","@endKey":"NjAxMjkyYTgzOW<br>
+I5NWU1MDIwMGQ4Zjg3Njc4NTk4NjQ=","@startKey":"M2VmMTZkNzc2NjAzYmY5YjllNzc1YzljZ<br>
+WI2NDg2MGY=","@id":"1244869158156","@name":"content,3ef16d776603bf9b9e775c9ceb<br>
+64860f,1244869158156"},{"@location":"test:51025","@endKey":"N2Y2ZDQ4ODMwZWY1MW<br>
+Q2MzVlOWE1YjY3MmU3OWEwODM=","@startKey":"NjAxMjkyYTgzOWI5NWU1MDIwMGQ4Zjg3Njc4N<br>
+Tk4NjQ=","@id":"1244869158156","@name":"content,601292a839b95e50200d8f87678598<br>
+64,1244869158156"},{"@location":"test:51025","@endKey":"OWQ3ZjNhZWIyYTVjMWUyYj<br>
+Q1ZDY5MGE5MWRlM2YyM2M=","@startKey":"N2Y2ZDQ4ODMwZWY1MWQ2MzVlOWE1YjY3MmU3OWEwO<br>
+DM=","@id":"1244879698031","@name":"content,7f6d48830ef51d635e9a5b672e79a083,1<br>
+244879698031"},{"@location":"test:51025","@endKey":"YmNmOTFlY2Y3OGVhNzJhMzNmYW<br>
+NjZmI4ZTZiNWQ5MDA=","@startKey":"OWQ3ZjNhZWIyYTVjMWUyYjQ1ZDY5MGE5MWRlM2YyM2M="<br>
+,"@id":"1244879698031","@name":"content,9d7f3aeb2a5c1e2b45d690a91de3f23c,12448<br>
+79698031"},{"@location":"test:51025","@endKey":"ZGVhZmVkMmY5MGY3MThkNzJjYWFmOD<br>
+diZDZjMjdkMDQ=","@startKey":"YmNmOTFlY2Y3OGVhNzJhMzNmYWNjZmI4ZTZiNWQ5MDA=","@i<br>
+d":"1244870320343","@name":"content,bcf91ecf78ea72a33faccfb8e6b5d900,124487032<br>
+0343"},{"@location":"test:51025","@endKey":"","@startKey":"ZGVhZmVkMmY5MGY3MTh<br>
+kNzJjYWFmODdiZDZjMjdkMDQ=","@id":"1244870320343","@name":"content,deafed2f90f7<br>
+18d72caaf87bd6c27d04,1244870320343"}]}<br>
+<br>
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/regions<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 961<br>
+Cache-Control: no-cache<br>
+Content-Type: application/x-protobuf<br>
+<br>
+000000 0a 07 63 6f 6e 74 65 6e 74 12 53 0a 16 63 6f 6e<br>
+000010 74 65 6e 74 2c 2c 31 32 34 34 38 35 31 39 39 39<br>
+000020 31 38 37 12 00 1a 20 33 65 66 31 36 64 37 37 36<br>
+000030 36 30 33 62 66 39 62 39 65 37 37 35 63 39 63 65<br>
+[...]<br>
+0003c0 35<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_delete_table">
+<h3>Delete Table</h3>
+</a>
+<p>
+<pre>
+DELETE /<table>/schema
+</pre>
+<p>
+Deletes a table.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 200 status.
+<p>
+NOTE: <tt>DELETE /<table></tt> will not work
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% telnet localhost 8000<br>
+DELETE http://localhost:8000/test/schema HTTP/1.0<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 0<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_cell_query_single">
+<h3>Cell Query (Single Value)</h3>
+</a>
+<p>
+<pre>
+GET /<table>/<row>/
+ <column> ( : <qualifier> )?
+ ( / <timestamp> )?
+</pre>
+<p>
+Retrieves one cell, with optional specification of timestamp.
+Set Accept header to <tt>text/xml</tt> for XML reply.
+Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
+Set Accept header to <tt>application/octet-stream</tt> for binary.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 200 status and cell data in the response body in
+the requested encoding. If the encoding is binary, returns row, column, and
+timestamp in X headers: <tt>X-Row</tt>, <tt>X-Column</tt>, and
+<tt>X-Timestamp</tt>, respectively. Depending on the precision of the resource
+specification, some of the X-headers may be elided as redundant.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl -H "Accept: text/xml" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: max-age=14400<br>
+Content-Type: text/xml<br>
+Content-Length: 521<br>
+<br>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
+<CellSet><Row key="MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY="><Cell timesta<br>
+mp="1244880122250" column="Y29udGVudDpyYXc=">PCFET0NUWVBFIEhUTUwgUFVCTElDICItL<br>
+y9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEgTW92ZWQgUGV<br>
+ybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbnRseTwvaDE+C<br>
+jxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIuY29tL2R1bmN<br>
+hbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg==</Cell></Row></CellSet><br>
+<br>
+% curl -H "Accept: application/json" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: max-age=14400<br>
+Content-Type: application/json<br>
+Transfer-Encoding: chunked<br>
+<br>
+{"Row":{"@key":"MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY=","Cell":{"@timest<br>
+amp":"1244880122250","@column":"Y29udGVudDpyYXc=","$":"PCFET0NUWVBFIEhUTUwgUFV<br>
+CTElDICItLy9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEgT<br>
+W92ZWQgUGVybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbnR<br>
+seTwvaDE+CjxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIuY<br>
+29tL2R1bmNhbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg=="}}}<br>
+<br>
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 301<br>
+Cache-Control: max-age=14400<br>
+Content-Type: application/x-protobuf<br>
+<br>
+000000 0a aa 02 0a 20 30 30 30 31 32 36 31 34 66 37 64<br>
+000010 34 33 64 66 36 34 31 38 35 32 33 34 34 35 61 36<br>
+000020 37 38 37 64 36 12 85 02 12 0b 63 6f 6e 74 65 6e<br>
+000030 74 3a 72 61 77 18 8a e3 8c c5 9d 24 22 ee 01 3c<br>
+[...]<br>
+000120 62 6f 64 79 3e 3c 2f 68 74 6d 6c 3e 0a<br>
+<br>
+% curl -H "Accept: application/octet-stream" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 238<br>
+Cache-Control: max-age=14400<br>
+X-Timestamp: 1244880122250<br>
+Content-Type: application/octet-stream<br>
+<br>
+[...]<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_cell_query_multiple">
+<h3>Cell or Row Query (Multiple Values)</h3>
+</a>
+<p>
+<pre>
+GET /<table>/<row>
+ ( / ( <column> ( : <qualifier> )?
+ ( , <column> ( : <qualifier> )? )+ )?
+ ( / ( <start-timestamp> ',' )? <end-timestamp> )? )?
+ ( ?v= <num-versions> )?
+</pre>
+<p>
+Retrieves one or more cells from a full row, or one or more specified columns
+in the row, with optional filtering via timestamp, and an optional restriction
+on the maximum number of versions to return.
+Set Accept header to <tt>text/xml</tt> for XML reply.
+Set Accept header to <tt>application/json</tt> for JSON reply.
+Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
+Set Accept header to <tt>application/octet-stream</tt> for binary.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns row results in the requested encoding.
+<p>
+NOTE: If binary encoding is requested, only one cell can be returned, the
+first to match the resource specification. The row, column, and timestamp
+associated with the cell will be transmitted in X headers: <tt>X-Row</tt>,
+<tt>X-Column</tt>, and <tt>X-Timestamp</tt>, respectively. Depending on the
+precision of the resource specification, some of the X-headers may be elided
+as redundant.
+<p>
+<b>Suffix Globbing</b>
+<p>
+Multiple value queries of a row can optionally append a suffix glob on the row
+key. This is a restricted form of scanner which will return all values in all
+rows that have keys which contain the supplied key on their left hand side,
+for example:
+<p>
+<pre>
+ org.someorg.*
+ -> org.someorg.blog
+ -> org.someorg.home
+ -> org.someorg.www
+</pre>
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl -H "Accept: text/xml" http://localhost:8000/urls/https|ad.doubleclick.net|*<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: max-age=14400<br>
+Content-Type: text/xml<br>
+Transfer-Encoding: chunked<br>
+<br>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
+<CellSet><Row key="aHR0cHx3d3cudGVsZWdyYXBoLmNvLnVrfDgwfG5ld3N8d29ybGRuZXdzfG5<br>
+vcnRoYW1lcmljYXx1c2F8NTQ5MTI4NHxBcm5vbGQtU2Nod2FyemVuZWdnZXItdW52ZWlscy1wYXBlc<br>
+mxlc3MtY2xhc3Nyb29tcy1wbGFuLmh0bWw="><Cell timestamp="1244701257843" column="a<br>
+W5mbzpjcmF3bGVyLTEyNDQ3MDEyNTc4NDM=">eyJpcCI6IjIwOC41MS4xMzcuOSIsIm1pbWV0eXBlI<br>
+joidGV4dC9odG1sO2NoYXJzZXQ9SVNPLT<br>
+[...]<br>
+</Cell><Cell timestamp="1244701513390" column="aW5mbzp1cmw=">aHR0cDovL3d3dy50Z<br>
+WxlZ3JhcGguY28udWs6ODAvdGVsZWdyYXBoL3RlbXBsYXRlL3ZlcjEtMC90ZW1wbGF0ZXMvZnJhZ21<br>
+lbnRzL2NvbW1vbi90bWdsQnJhbmRDU1MuanNw</Cell></Row></CellSet><br>
+<br>
+% curl -H "Accept: text/xml" http://localhost:8000/content/00012614f7d43df6418523445a6787d6<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: max-age=14400<br>
+Content-Type: text/xml<br>
+Content-Length: 1177<br>
+<br>
+<CellSet><Row key="MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY="><Cell timesta<br>
+mp="1244880122250" column="Y29udGVudDpyYXc=">PCFET0NUWVBFIEhUTUwgUFVCTElDICItL<br>
+y9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEgTW92ZWQgUGV<br>
+ybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbnRseTwvaDE+C<br>
+jxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIuY29tL2R1bmN<br>
+hbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg==</Cell><Cell timestamp="1<br>
+244880122250" column="aW5mbzpjcmF3bGVyLWh0dHB8d3d3LnR3aXR0ZXIuY29tfDgwfGR1bmNh<br>
+bnJpbGV5LTEyNDQ4ODAxMjIyNTA=">eyJpcCI6IjE2OC4xNDMuMTYyLjY4IiwibWltZXR5cGUiOiJ0<br>
+ZXh0L2h0bWw7IGNoYXJzZXQ9aXNvLTg4NTktMSIsInZpYSI6Imh0dHA6Ly93d3cuaW5xdWlzaXRyLm<br>
+NvbTo4MC8yNTkyNy90b3NoMC1hbmQtdGhlLWRlbWktbW9vcmUtbnNmdy1waWMvIn0=</Cell><Cell<br>
+timestamp="1244880122250" column="aW5mbzpsZW5ndGg=">MjM4</Cell><Cell timestamp<br>
+="1244880122250" column="aW5mbzptaW1ldHlwZQ==">dGV4dC9odG1sOyBjaGFyc2V0PWlzby0<br>
+4ODU5LTE=</Cell><Cell timestamp="1244880122250" column="dXJsOmh0dHB8d3d3LnR3aX<br>
+R0ZXIuY29tfDgwfGR1bmNhbnJpbGV5">aHR0cDovL3d3dy50d2l0dGVyLmNvbTo4MC9kdW5jYW5yaW<br>
+xleQ==</Cell></Row></CellSet><br>
+<br>
+% curl -H "Accept: application/json" http://localhost:8000/content/00012614f7d43df6418523445a6787d6<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: max-age=14400<br>
+Content-Type: application/json<br>
+Transfer-Encoding: chunked<br>
+<br>
+{"Row":{"@key":"MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY=","Cell":[{"@times<br>
+tamp":"1244880122250","@column":"Y29udGVudDpyYXc=","$":"PCFET0NUWVBFIEhUTUwgUF<br>
+VCTElDICItLy9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEg<br>
+TW92ZWQgUGVybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbn<br>
+RseTwvaDE+CjxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIu<br>
+Y29tL2R1bmNhbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg=="},{"@timestam<br>
+p":"1244880122250","@column":"aW5mbzpjcmF3bGVyLWh0dHB8d3d3LnR3aXR0ZXIuY29tfDgw<br>
+fGR1bmNhbnJpbGV5LTEyNDQ4ODAxMjIyNTA=","$":"eyJpcCI6IjE2OC4xNDMuMTYyLjY4IiwibWl<br>
+tZXR5cGUiOiJ0ZXh0L2h0bWw7IGNoYXJzZXQ9aXNvLTg4NTktMSIsInZpYSI6Imh0dHA6Ly93d3cua<br>
+W5xdWlzaXRyLmNvbTo4MC8yNTkyNy90b3NoMC1hbmQtdGhlLWRlbWktbW9vcmUtbnNmdy1waWMvIn0<br>
+="},{"@timestamp":"1244880122250","@column":"aW5mbzpsZW5ndGg=","$":"MjM4"},{"@<br>
+timestamp":"1244880122250","@column":"aW5mbzptaW1ldHlwZQ==","$":"dGV4dC9odG1sO<br>
+yBjaGFyc2V0PWlzby04ODU5LTE="},{"@timestamp":"1244880122250","@column":"dXJsOmh<br>
+0dHB8d3d3LnR3aXR0ZXIuY29tfDgwfGR1bmNhbnJpbGV5","$":"aHR0cDovL3d3dy50d2l0dGVyLm<br>
+NvbTo4MC9kdW5jYW5yaWxleQ=="}]}}<br>
+</tt>
+<p>
+NOTE: The cell value is given in JSON encoding as the value associated with the key "$".
+<p>
+<tt>
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/00012614f7d43df6418523445a6787d6<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 692<br>
+Cache-Control: max-age=14400<br>
+Content-Type: application/x-protobuf<br>
+<br>
+000000 0a b1 05 0a 20 30 30 30 31 32 36 31 34 66 37 64<br>
+000010 34 33 64 66 36 34 31 38 35 32 33 34 34 35 61 36<br>
+000020 37 38 37 64 36 12 85 02 12 0b 63 6f 6e 74 65 6e<br>
+000030 74 3a 72 61 77 18 8a e3 8c c5 9d 24 22 ee 01 3c<br>
+[...]<br>
+0002b0 69 6c 65 79<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_cell_store_single">
+<h3>Cell Store (Single)</h3>
+</a>
+<p>
+<pre>
+PUT /<table>/<row>/<column>( : <qualifier> )? ( / <timestamp> )?
+
+POST /<table>/<row>/<column>( : <qualifier> )? ( / <timestamp> )?
+</pre>
+<p>
+Stores cell data into the specified location.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 200 status.
+Set Content-Type header to <tt>text/xml</tt> for XML encoding.
+Set Content-Type header to <tt>application/x-protobuf</tt> for protobufs encoding.
+Set Content-Type header to <tt>application/octet-stream</tt> for binary encoding.
+When using binary encoding, optionally, set X-Timestamp header to the desired
+timestamp.
+<p>
+PUT and POST operations are equivalent here: Specified addresses without
+existing data will create new values. Specified addresses with existing data
+will create new versions, overwriting an existing version if all of { row,
+column:qualifer, timestamp } match that of the existing value.
+<p>
+See "Cell Query (Single Value)" section for encoding examples.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl -H "Content-Type: text/xml" --data '[...]' http://localhost:8000/test/testrow/test:testcolumn<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 0<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_cell_store_multiple">
+<h3>Cell Store (Multiple)</h3>
+</a>
+<p>
+<pre>
+PUT /<table>/<false-row-key>
+
+POST /<table>/<false-row-key>
+</pre>
+<p>
+Use a false row key. Row, column, and timestamp values in supplied cells
+override the specifications of the same on the path, allowing for posting of
+multiple values to a table in batch. If not successful, returns appropriate
+HTTP error status code. If successful, returns HTTP 200 status.
+Set Content-Type to <tt>text/xml</tt> for XML encoding.
+Set Content-Type header to <tt>application/x-protobuf</tt> for protobufs encoding.
+Supply commit data in the PUT or POST body.
+<p>
+PUT and POST operations are equivalent here: Specified addresses without
+existing data will create new values. Specified addresses with existing data
+will create new versions, overwriting an existing version if all of { row,
+column:qualifer, timestamp } match that of the existing value.
+<p>
+See "Cell or Row Query (Multiple Values)" for encoding examples.
+<p>
+
+<a name="operation_delete">
+<h3>Row, Column, or Cell Delete</h3>
+</a>
+<p>
+<pre>
+DELETE /<table>/<row>
+ ( / ( <column> ( : <qualifier> )?
+ ( / <timestamp> )? )?
+</pre>
+<p>
+Deletes an entire row, a entire column family, or specific cell(s), depending
+on how specific the data address. If not successful, returns appropriate HTTP
+error status code. If successful, returns HTTP 200 status.
+<p>
+NOTE: <tt>DELETE /<table></tt> will not work.
+Use <tt>DELETE /<table>/schema</tt> instead.
+<p>
+
+<a name="operation_scanner_create">
+<h3>Scanner Creation</h3>
+</a>
+<p>
+<pre>
+PUT /<table>/scanner
+
+POST /<table>/scanner
+</pre>
+<p>
+Allocates a new table scanner.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 201 status (created) and the URI which should be
+used to address the scanner, e.g.
+<p>
+<blockquote><tt>/<table>/scanner/112876541342014107c0fa92</tt></blockquote>
+<p>
+Set Content-Type to <tt>text/xml</tt> if supplying an XML scanner specification.
+Set Content-Type to <tt>application/protobuf</tt> if supplying a protobufs
+encoded specification.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl -H "Content-Type: text/xml" -d '<Scanner batch="1"/>' http://localhost:8000/content/scanner<br>
+<br>
+HTTP/1.1 201 Created<br>
+Location: http://localhost:8000/content/scanner/12447063229213b1937<br>
+Content-Length: 0<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_scanner_next">
+<h3>Scanner Get Next</h3>
+</a>
+<p>
+<pre>
+GET /<table>/scanner/<scanner-id>
+</pre>
+<p>
+Returns the values of the next cells found by the scanner, up to the configured batch amount.
+Set Accept header to <tt>text/xml</tt> for XML encoding.
+Set Accept header to <tt>application/x-protobuf</tt> for protobufs encoding.
+Set Accept header to <tt>application/octet-stream</tt> for binary encoding.
+If not successful, returns appropriate HTTP error status code.
+If result is successful but the scanner is exhausted, returns HTTP 204 status (no content).
+Otherwise, returns HTTP 200 status and row and cell data in the response body.
+See examples from the "Cell or Row Query (Multiple Values)" section.
+<p>
+NOTE: The binary encoding option returns only one cell regardless of the
+batching parameter supplied during scanner creation. The row, column, and
+timestamp associated with the cell are transmitted as X-headers:
+<tt>X-Row</tt>, <tt>X-Column</tt>, and <tt>X-Timestamp</tt> respectively.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% curl -H "Content-Type: text/xml" http://localhost:8000/content/scanner/12447063229213b1937<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: text/xml<br>
+Content-Length: 589<br>
+<br>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
+<CellSet><Row key="MDAyMDFjMTAwNjk4ZGNkYjU5MDQxNTVkZGQ3OGRlZTk="><Cell timesta<br>
+mp="1244701281234" column="Y29udGVudDpyYXc=">PCFET0NUWVBFIEhUTUwgUFVCTElDICItL<br>
+y9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT40MDQgTm90IEZvdW5<br>
+kPC90aXRsZT4KPC9oZWFkPjxib2R5Pgo8aDE+Tm90IEZvdW5kPC9oMT4KPHA+VGhlIHJlcXVlc3RlZ<br>
+CBVUkwgL3JvYm90cy50eHQgd2FzIG5vdCBmb3VuZCBvbiB0aGlzIHNlcnZlci48L3A+Cjxocj4KPGF<br>
+kZHJlc3M+QXBhY2hlLzIuMi4zIChSZWQgSGF0KSBTZXJ2ZXIgYXQgd3gubWduZXR3b3JrLmNvbSBQb<br>
+3J0IDgwPC9hZGRyZXNzPgo8L2JvZHk+PC9odG1sPgo=</Cell></Row></CellSet><br>
+<br>
+% curl -H "Content-Type: application/json" http://localhost:8000/content/scanner/12447063229213b1937<br>
+<br>
+HTTP/1.1 200 OK<br>
+Cache-Control: no-cache<br>
+Content-Type: application/json<br>
+Transfer-Encoding: chunked<br>
+<br>
+{"Row":{"@key":"MDAyMDFjMTAwNjk4ZGNkYjU5MDQxNTVkZGQ3OGRlZTk=","Cell":{"@timest<br>
+amp":"1244701281234","@column":"aW5mbzpjcmF3bGVyLWh0dHB8d3gubWduZXR3b3JrLmNvbX<br>
+w4MHxyb2JvdHMudHh0LTEyNDQ3MDEyODEyMzQ=","$":"eyJpcCI6IjE5OS4xOTMuMTAuMTAxIiwib<br>
+WltZXR5cGUiOiJ0ZXh0L2h0bWw7IGNoYXJzZXQ9aXNvLTg4NTktMSIsInZpYSI6Imh0dHA6Ly93eC5<br>
+tZ25ldHdvcmsuY29tOjgwL2pzL2N1cnJlbnRzaGFuZGxlci5qcyJ9"}}}<br>
+<br>
+% curl -H "Content-Type: application/x-protobuf" http://localhost:8000/content/scanner/12447063229213b1937<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 63<br>
+Cache-Control: no-cache<br>
+Content-Type: application/x-protobuf<br>
+<br>
+000000 0a 3d 0a 20 30 30 32 30 31 63 31 30 30 36 39 38<br>
+000010 64 63 64 62 35 39 30 34 31 35 35 64 64 64 37 38<br>
+000020 64 65 65 39 12 19 12 0b 69 6e 66 6f 3a 6c 65 6e<br>
+000030 67 74 68 18 d2 97 e9 ef 9c 24 22 03 32 39 30<br>
+<br>
+% curl -H "Content-Type: application/octet-stream" http://localhost:8000/content/scanner/12447063229213b1937<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 37<br>
+Cache-Control: no-cache<br>
+X-Column: dXJsOmh0dHB8d3gubWduZXR3b3JrLmNvbXw4MHxyb2JvdHMudHh0<br>
+X-Row: MDAyMDFjMTAwNjk4ZGNkYjU5MDQxNTVkZGQ3OGRlZTk=<br>
+X-Timestamp: 1244701281234<br>
+Content-Type: application/octet-stream<br>
+<br>
+000000 68 74 74 70 3a 2f 2f 77 78 2e 6d 67 6e 65 74 77<br>
+000010 6f 72 6b 2e 63 6f 6d 3a 38 30 2f 72 6f 62 6f 74<br>
+000020 73 2e 74 78 74<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_scanner_delete">
+<h3>Scanner Deletion</h3>
+</a>
+<p>
+<pre>
+DELETE /<table>/scanner/<scanner-id>
+</pre>
+<p>
+Deletes resources associated with the scanner. This is an optional action.
+Scanners will expire after some globally configurable interval has elapsed
+with no activity on the scanner. If not successful, returns appropriate HTTP
+error status code. If successful, returns HTTP status 200.
+<p>
+Examples:
+<p>
+<blockquote>
+<tt>
+% telnet localhost 8000<br>
+DELETE http://localhost:8000/content/scanner/12447063229213b1937 HTTP/1.0<br>
+<br>
+HTTP/1.1 200 OK<br>
+Content-Length: 0<br>
+</tt>
+</blockquote>
+<p>
+
+<a name="operation_stateless_scanner">
+ <h3>Stateless Scanner</h3>
+</a>
+<pre>
+ GET /<table>/<optional_row_prefix>*?<scan_parameters>
+</pre>
+<p align="justify">
+ The current scanner API expects clients to restart scans if there is a REST server failure in the
+ midst. The stateless does not store any state related to scan operation and all the parameters
+ are specified as query parameters.
+<p>
+<p>
+ The following are the scan parameters
+ <ol>
+ <li>startrow - The start row for the scan.</li>
+ <li>endrow - The end row for the scan.</li>
+ <li>columns - The columns to scan.</li>
+ <li>starttime, endtime - To only retrieve columns within a specific range of version timestamps,
+ both start and end time must be specified.</li>
+ <li>maxversions - To limit the number of versions of each column to be returned.</li>
+ <li>batchsize - To limit the maximum number of values returned for each call to next().</li>
+ <li>limit - The number of rows to return in the scan operation.</li>
+ </ol>
+<p>
+<p>
+ More on start row, end row and limit parameters.
+ <ol>
+ <li>If start row, end row and limit not specified, then the whole table will be scanned.</li>
+ <li>If start row and limit (say N) is specified, then the scan operation will return N rows from
+ the start row specified.</li>
+ <li>If only limit parameter is specified, then the scan operation will return N rows from the
+ start of the table.</li>
+ <li>If limit and end row are specified, then the scan operation will return N rows from start
+ of table till the end row. If the end row is reached before N rows ( say M and M < N ),
+ then M rows will be returned to the user.</li>
+ <li>If start row, end row and limit (say N ) are specified and N < number of rows between
+ start row and end row, then N rows from start row will be returned to the user. If N >
+ (number of rows between start row and end row (say M), then M number of rows will be returned
+ to the user.</li>
+ </ol>
+<p>
+<p><b>Examples</b><p>
+<p>
+<blockquote>
+<pre>
+Lets say we have a table with name "ExampleScanner". On Hbase shell,
+>> scan 'ExampleScanner'
+
+ROW COLUMN+CELL
+testrow1 column=a:1, timestamp=1389900769772, value=testvalue-a1
+testrow1 column=b:1, timestamp=1389900780536, value=testvalue-b1
+testrow2 column=a:1, timestamp=1389900823877, value=testvalue-a2
+testrow2 column=b:1, timestamp=1389900818233, value=testvalue-b2
+testrow3 column=a:1, timestamp=1389900847336, value=testvalue-a3
+testrow3 column=b:1, timestamp=1389900856845, value=testvalue-b3
+</pre>
+<ul>
+<li>
+<pre>
+<b>Scanning the entire table in json</b>
+
+curl -H "Accept: application/json" https://localhost:8080/ExampleScanner/*
+</pre>
+<p>
+<tt>
+{"Row":[{"key":"dGVzdHJvdzE=","Cell":[{"column":"YTox","timestamp":1389900769772,<br>
+"$":"dGVzdHZhbHVlLWEx"},{"column":"Yjox","timestamp":1389900780536,"$":"dGVzdHZhbHVlLWIx"}]},<br>
+{"key":"dGVzdHJvdzI=","Cell":[{"column":"YTox","timestamp":1389900823877,"$":"dGVzdHZhbHVlLWEy"}<br>
+{"column":"Yjox","timestamp":1389900818233,"$":"dGVzdHZhbHVlLWIy"}]},{"key":"dGVzdHJvdzM=",<br>
+"Cell":[{"column":"YTox","timestamp":1389900847336,"$":"dGVzdHZhbHVlLWEz"},{"column":"Yjox",<br>
+"timestamp":1389900856845,"$":"dGVzdHZhbHVlLWIz"}]}]}<br>
+</tt>
+<p>
+</li>
+<li>
+<pre>
+<b>Scanning the entire table in XML</b>
+
+curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*
+</pre>
+<p>
+<tt>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
+Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
+/Cell><Cell column="Yjox"timestamp="1389900780536">dGVzdHZhbHVlLWIx</Cell><<br>
+/Row><Row key="dGVzdHJvdzI="><Cell column="YTox" timestamp="1389900823877"><br>
+dGVzdHZhbHVlLWEy</Cell><Cell column="Yjox"timestamp="1389900818233">dGVzdHZhbHVlLWIy<<br>
+/Cell></Row><Row key="dGVzdHJvdzM="><Cell column="YTox" timestamp="1389900847336<br>
+">dGVzdHZhbHVlLWEz</Cell><Cell column="Yjox"timestamp="1389900856845"><br>
+dGVzdHZhbHVlLWIz</Cell></Row></CellSet><br>
+</tt>
+<p>
+</li>
+<li>
+<pre>
+<b>Scanning the entire table in binary</b>
+
+curl -H "Accept: application/protobuf" https://localhost:8080/ExampleScanner/*
+
+^@Ì
+B
+^Htestrow1^R^Z^R^Ca:1^XìÓªä¹("^Ltestvalue-a1^R^Z^R^Cb:1^Xø§«ä¹("^Ltestvalue-b1
+B
+^Htestrow2^R^Z^R^Ca:1^XÅúä¹("^Ltestvalue-a2^R^Z^R^Cb:1^X¹Îä¹("^Ltestvalue-b2
+B
+^Htestrow3^R^Z^R^Ca:1^X豯ä¹("^Ltestvalue-a3^R^Z^R^Cb:1^X<8d>ü¯ä¹("^Ltestvalue-b3
+</pre>
+</li>
+<li>
+<pre>
+<b>Scanning the first row of table</b>
+
+curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?limit=1
+</pre>
+<p>
+<tt>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
+Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772"><br>
+dGVzdHZhbHVlLWEx</Cell><Cell column="Yjox"timestamp="1389900780536"><br>
+dGVzdHZhbHVlLWIx</Cell></Row></CellSet><br>
+</tt>
+<p>
+</li>
+<li>
+<pre>
+<b>Scanning a given column of table</b>
+
+curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?columns=a:1
+</pre>
+<p>
+<tt>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
+Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
+/Cell></Row><Row key="dGVzdHJvdzI="><Cell column="YTox" timestamp=<br>
+"1389900823877">dGVzdHZhbHVlLWEy</Cell></Row><Row key="dGVzdHJvdzM="><<br>
+Cell column="YTox" timestamp="1389900847336">dGVzdHZhbHVlLWEz</Cell><<br>
+/Row></CellSet><br>
+</tt>
+<p>
+</li>
+<li>
+<pre>
+<b>Scanning more than one column of table</b>
+
+curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?columns=a:1,b:1
+</pre>
+<p>
+<tt>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
+Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772"><br>
+dGVzdHZhbHVlLWEx</Cell><Cell column="Yjox"timestamp="1389900780536"><br>
+dGVzdHZhbHVlLWIx</Cell></Row><Row key="dGVzdHJvdzI="><<br>
+Cell column="YTox" timestamp="1389900823877">dGVzdHZhbHVlLWEy</Cell><<br>
+Cell column="Yjox"timestamp="1389900818233">dGVzdHZhbHVlLWIy</Cell><<br>
+/Row><Row key="dGVzdHJvdzM="><Cell column="YTox" timestamp="1389900847336"><br>
+dGVzdHZhbHVlLWEz</Cell><Cell column="Yjox"timestamp="1389900856845"><br>
+dGVzdHZhbHVlLWIz</Cell></Row></CellSet><br>
+</tt>
+<p>
+</li>
+<li>
+<pre>
+<b>Scanning table with start row and limit</b>
+
+curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?startrow=testrow1&limit=2
+</pre>
+<p>
+<tt>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
+Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
+/Cell><Cell column="Yjox"timestamp="1389900780536">dGVzdHZhbHVlLWIx</Cell><<br>
+/Row><Row key="dGVzdHJvdzI="><Cell column="YTox" timestamp="1389900823877"><br>
+dGVzdHZhbHVlLWEy</Cell><Cell column="Yjox"<br>
+timestamp="1389900818233">dGVzdHZhbHVlLWIy</Cell></Row></CellSet><br>
+</tt>
+<p>
+</li>
+<li>
+<pre>
+<b>Scanning with start and end time</b>
+
+curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?starttime=1389900769772&endtime=1389900800000
+</pre>
+<p>
+<tt>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
+Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
+/Cell><Cell column="Yjox"timestamp="1389900780536">dGVzdHZhbHVlLWIx</Cell><<br>
+/Row></CellSet><br>
+</tt>
+<p>
+</li>
+<li>
+<pre>
+<b>Scanning with row prefix</b>
+
+curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/test*
+</pre>
+<p>
+<tt>
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
+Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
+/Cell><Cell column="Yjox"timestamp="1389900780536">dGVzdHZhbHVlLWIx</Cell><<br>
+/Row><Row key="dGVzdHJvdzI="><Cell column="YTox" timestamp="1389900823877"><br>
+dGVzdHZhbHVlLWEy</Cell><Cell column="Yjox"timestamp="1389900818233"><br>
+dGVzdHZhbHVlLWIy</Cell></Row><Row key="dGVzdHJvdzM="><<br>
+Cell column="YTox" timestamp="1389900847336">dGVzdHZhbHVlLWEz</Cell><<br>
+Cell column="Yjox"timestamp="1389900856845">dGVzdHZhbHVlLWIz</Cell><<br>
+/Row></CellSet><br>
+</tt>
+<p>
+</li>
+</ul>
+</blockquote>
+</p>
+
+<p>
+<a name="xmlschema">
+<h2>XML Schema</h2>
+</a>
+<p>
+<pre>
+<schema targetNamespace="StargateSchema" elementFormDefault="qualified"
+xmlns="http://www.w3.org/2001/XMLSchema" xmlns:tns="StargateSchema">
+
+ <element name="CellSet" type="tns:CellSet"></element>
+
+ <complexType name="CellSet">
+ <sequence>
+ <element name="row" type="tns:Row" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ </complexType>
+
+ <complexType name="Row">
+ <sequence>
+ <element name="key" type="base64Binary"></element>
+ <element name="cell" type="tns:Cell" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ </complexType>
+
+ <complexType name="Cell">
+ <sequence>
+ <element name="value" maxOccurs="1" minOccurs="1"><simpleType><restriction base="base64Binary"></restriction></simpleType></element>
+ </sequence>
+ <attribute name="column" type="base64Binary" />
+ <attribute name="timestamp" type="int" />
+ </complexType>
+
+ <element name="Version" type="tns:Version"></element>
+
+ <complexType name="Version">
+ <attribute name="Stargate" type="string"></attribute>
+ <attribute name="JVM" type="string"></attribute>
+ <attribute name="OS" type="string"></attribute>
+ <attribute name="Server" type="string"></attribute>
+ <attribute name="Jersey" type="string"></attribute>
+ </complexType>
+
+
+ <element name="TableList" type="tns:TableList"></element>
+
+ <complexType name="TableList">
+ <sequence>
+ <element name="table" type="tns:Table" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ </complexType>
+
+ <complexType name="Table">
+ <sequence>
+ <element name="name" type="string"></element>
+ </sequence>
+ </complexType>
+
+ <element name="TableInfo" type="tns:TableInfo"></element>
+
+ <complexType name="TableInfo">
+ <sequence>
+ <element name="region" type="tns:TableRegion" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ <attribute name="name" type="string"></attribute>
+ </complexType>
+
+ <complexType name="TableRegion">
+ <attribute name="name" type="string"></attribute>
+ <attribute name="id" type="int"></attribute>
+ <attribute name="startKey" type="base64Binary"></attribute>
+ <attribute name="endKey" type="base64Binary"></attribute>
+ <attribute name="location" type="string"></attribute>
+ </complexType>
+
+ <element name="TableSchema" type="tns:TableSchema"></element>
+
+ <complexType name="TableSchema">
+ <sequence>
+ <element name="column" type="tns:ColumnSchema" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ <attribute name="name" type="string"></attribute>
+ <anyAttribute></anyAttribute>
+ </complexType>
+
+ <complexType name="ColumnSchema">
+ <attribute name="name" type="string"></attribute>
+ <anyAttribute></anyAttribute>
+ </complexType>
+
+ <element name="Scanner" type="tns:Scanner"></element>
+
+ <complexType name="Scanner">
+ <attribute name="startRow" type="base64Binary"></attribute>
+ <attribute name="endRow" type="base64Binary"></attribute>
+ <attribute name="columns" type="base64Binary"></attribute>
+ <attribute name="batch" type="int"></attribute>
+ <attribute name="startTime" type="int"></attribute>
+ <attribute name="endTime" type="int"></attribute>
+ </complexType>
+
+ <element name="StorageClusterVersion"
+ type="tns:StorageClusterVersion">
+ </element>
+
+ <complexType name="StorageClusterVersion">
+ <attribute name="version" type="string"></attribute>
+ </complexType>
+
+ <element name="StorageClusterStatus"
+ type="tns:StorageClusterStatus">
+ </element>
+
+ <complexType name="StorageClusterStatus">
+ <sequence>
+ <element name="liveNode" type="tns:Node"
+ maxOccurs="unbounded" minOccurs="0">
+ </element>
+ <element name="deadNode" type="string" maxOccurs="unbounded"
+ minOccurs="0">
+ </element>
+ </sequence>
+ <attribute name="regions" type="int"></attribute>
+ <attribute name="requests" type="int"></attribute>
+ <attribute name="averageLoad" type="float"></attribute>
+ </complexType>
+
+ <complexType name="Node">
+ <sequence>
+ <element name="region" type="tns:Region" maxOccurs="unbounded" minOccurs="0"></element>
+ </sequence>
+ <attribute name="name" type="string"></attribute>
+ <attribute name="startCode" type="int"></attribute>
+ <attribute name="requests" type="int"></attribute>
+ <attribute name="heapSizeMB" type="int"></attribute>
+ <attribute name="maxHeapSizeMB" type="int"></attribute>
+ </complexType>
+
+ <complexType name="Region">
+ <attribute name="name" type="base64Binary"></attribute>
+ <attribute name="stores" type="int"></attribute>
+ <attribute name="storefiles" type="int"></attribute>
+ <attribute name="storefileSizeMB" type="int"></attribute>
+ <attribute name="memstoreSizeMB" type="int"></attribute>
+ <attribute name="storefileIndexSizeMB" type="int"></attribute>
+ </complexType>
+</schema>
+</pre>
+
+<p>
+<a name="pbufschema">
+<h2>Protobufs Schema</h2>
+</a>
+<p>
+<pre>
+message Version {
+ optional string stargateVersion = 1;
+ optional string jvmVersion = 2;
+ optional string osVersion = 3;
+ optional string serverVersion = 4;
+ optional string jerseyVersion = 5;
+}
+
+message StorageClusterStatus {
+ message Region {
+ required bytes name = 1;
+ optional int32 stores = 2;
+ optional int32 storefiles = 3;
+ optional int32 storefileSizeMB = 4;
+ optional int32 memstoreSizeMB = 5;
+ optional int32 storefileIndexSizeMB = 6;
+ }
+ message Node {
+ required string name = 1; // name:port
+ optional int64 startCode = 2;
+ optional int32 requests = 3;
+ optional int32 heapSizeMB = 4;
+ optional int32 maxHeapSizeMB = 5;
+ repeated Region regions = 6;
+ }
+ // node status
+ repeated Node liveNodes = 1;
+ repeated string deadNodes = 2;
+ // summary statistics
+ optional int32 regions = 3;
+ optional int32 requests = 4;
+ optional double averageLoad = 5;
+}
+
+message TableList {
+ repeated string name = 1;
+}
+
+message TableInfo {
+ required string name = 1;
+ message Region {
+ required string name = 1;
+ optional bytes startKey = 2;
+ optional bytes endKey = 3;
+ optional int64 id = 4;
+ optional string location = 5;
+ }
+ repeated Region regions = 2;
+}
+
+message TableSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ repeated ColumnSchema columns = 3;
+ // optional helpful encodings of commonly used attributes
+ optional bool inMemory = 4;
+ optional bool readOnly = 5;
+}
+
+message ColumnSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ // optional helpful encodings of commonly used attributes
+ optional int32 ttl = 3;
+ optional int32 maxVersions = 4;
+ optional string compression = 5;
+}
+
+message Cell {
+ optional bytes row = 1; // unused if Cell is in a CellSet
+ optional bytes column = 2;
+ optional int64 timestamp = 3;
+ optional bytes data = 4;
+}
+
+message CellSet {
+ message Row {
+ required bytes key = 1;
+ repeated Cell values = 2;
+ }
+ repeated Row rows = 1;
+}
+
+message Scanner {
+ optional bytes startRow = 1;
+ optional bytes endRow = 2;
+ repeated bytes columns = 3;
+ optional int32 batch = 4;
+ optional int64 startTime = 5;
+ optional int64 endTime = 6;
+}
+</pre>
+
+</body>
+</html>
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java
new file mode 100644
index 0000000..4c859e1
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java
@@ -0,0 +1,731 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: CellMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class CellMessage {
+ private CellMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface CellOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional bytes row = 1;
+ /**
+ * <code>optional bytes row = 1;</code>
+ *
+ * <pre>
+ * unused if Cell is in a CellSet
+ * </pre>
+ */
+ boolean hasRow();
+ /**
+ * <code>optional bytes row = 1;</code>
+ *
+ * <pre>
+ * unused if Cell is in a CellSet
+ * </pre>
+ */
+ com.google.protobuf.ByteString getRow();
+
+ // optional bytes column = 2;
+ /**
+ * <code>optional bytes column = 2;</code>
+ */
+ boolean hasColumn();
+ /**
+ * <code>optional bytes column = 2;</code>
+ */
+ com.google.protobuf.ByteString getColumn();
+
+ // optional int64 timestamp = 3;
+ /**
+ * <code>optional int64 timestamp = 3;</code>
+ */
+ boolean hasTimestamp();
+ /**
+ * <code>optional int64 timestamp = 3;</code>
+ */
+ long getTimestamp();
+
+ // optional bytes data = 4;
+ /**
+ * <code>optional bytes data = 4;</code>
+ */
+ boolean hasData();
+ /**
+ * <code>optional bytes data = 4;</code>
+ */
+ com.google.protobuf.ByteString getData();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Cell}
+ */
+ public static final class Cell extends
+ com.google.protobuf.GeneratedMessage
+ implements CellOrBuilder {
+ // Use Cell.newBuilder() to construct.
+ private Cell(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Cell(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Cell defaultInstance;
+ public static Cell getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Cell getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Cell(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ row_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ column_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ timestamp_ = input.readInt64();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ data_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Cell> PARSER =
+ new com.google.protobuf.AbstractParser<Cell>() {
+ public Cell parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Cell(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Cell> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional bytes row = 1;
+ public static final int ROW_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString row_;
+ /**
+ * <code>optional bytes row = 1;</code>
+ *
+ * <pre>
+ * unused if Cell is in a CellSet
+ * </pre>
+ */
+ public boolean hasRow() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional bytes row = 1;</code>
+ *
+ * <pre>
+ * unused if Cell is in a CellSet
+ * </pre>
+ */
+ public com.google.protobuf.ByteString getRow() {
+ return row_;
+ }
+
+ // optional bytes column = 2;
+ public static final int COLUMN_FIELD_NUMBER = 2;
+ private com.google.protobuf.ByteString column_;
+ /**
+ * <code>optional bytes column = 2;</code>
+ */
+ public boolean hasColumn() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional bytes column = 2;</code>
+ */
+ public com.google.protobuf.ByteString getColumn() {
+ return column_;
+ }
+
+ // optional int64 timestamp = 3;
+ public static final int TIMESTAMP_FIELD_NUMBER = 3;
+ private long timestamp_;
+ /**
+ * <code>optional int64 timestamp = 3;</code>
+ */
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int64 timestamp = 3;</code>
+ */
+ public long getTimestamp() {
+ return timestamp_;
+ }
+
+ // optional bytes data = 4;
+ public static final int DATA_FIELD_NUMBER = 4;
+ private com.google.protobuf.ByteString data_;
+ /**
+ * <code>optional bytes data = 4;</code>
+ */
+ public boolean hasData() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional bytes data = 4;</code>
+ */
+ public com.google.protobuf.ByteString getData() {
+ return data_;
+ }
+
+ private void initFields() {
+ row_ = com.google.protobuf.ByteString.EMPTY;
+ column_ = com.google.protobuf.ByteString.EMPTY;
+ timestamp_ = 0L;
+ data_ = com.google.protobuf.ByteString.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, row_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, column_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt64(3, timestamp_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, data_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, row_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, column_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(3, timestamp_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, data_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Cell}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ row_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ column_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ timestamp_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ data_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell result = new org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.row_ = row_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.column_ = column_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.timestamp_ = timestamp_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.data_ = data_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.getDefaultInstance()) return this;
+ if (other.hasRow()) {
+ setRow(other.getRow());
+ }
+ if (other.hasColumn()) {
+ setColumn(other.getColumn());
+ }
+ if (other.hasTimestamp()) {
+ setTimestamp(other.getTimestamp());
+ }
+ if (other.hasData()) {
+ setData(other.getData());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional bytes row = 1;
+ private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes row = 1;</code>
+ *
+ * <pre>
+ * unused if Cell is in a CellSet
+ * </pre>
+ */
+ public boolean hasRow() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional bytes row = 1;</code>
+ *
+ * <pre>
+ * unused if Cell is in a CellSet
+ * </pre>
+ */
+ public com.google.protobuf.ByteString getRow() {
+ return row_;
+ }
+ /**
+ * <code>optional bytes row = 1;</code>
+ *
+ * <pre>
+ * unused if Cell is in a CellSet
+ * </pre>
+ */
+ public Builder setRow(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ row_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes row = 1;</code>
+ *
+ * <pre>
+ * unused if Cell is in a CellSet
+ * </pre>
+ */
+ public Builder clearRow() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ row_ = getDefaultInstance().getRow();
+ onChanged();
+ return this;
+ }
+
+ // optional bytes column = 2;
+ private com.google.protobuf.ByteString column_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes column = 2;</code>
+ */
+ public boolean hasColumn() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional bytes column = 2;</code>
+ */
+ public com.google.protobuf.ByteString getColumn() {
+ return column_;
+ }
+ /**
+ * <code>optional bytes column = 2;</code>
+ */
+ public Builder setColumn(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ column_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes column = 2;</code>
+ */
+ public Builder clearColumn() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ column_ = getDefaultInstance().getColumn();
+ onChanged();
+ return this;
+ }
+
+ // optional int64 timestamp = 3;
+ private long timestamp_ ;
+ /**
+ * <code>optional int64 timestamp = 3;</code>
+ */
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int64 timestamp = 3;</code>
+ */
+ public long getTimestamp() {
+ return timestamp_;
+ }
+ /**
+ * <code>optional int64 timestamp = 3;</code>
+ */
+ public Builder setTimestamp(long value) {
+ bitField0_ |= 0x00000004;
+ timestamp_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 timestamp = 3;</code>
+ */
+ public Builder clearTimestamp() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ timestamp_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional bytes data = 4;
+ private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes data = 4;</code>
+ */
+ public boolean hasData() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional bytes data = 4;</code>
+ */
+ public com.google.protobuf.ByteString getData() {
+ return data_;
+ }
+ /**
+ * <code>optional bytes data = 4;</code>
+ */
+ public Builder setData(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ data_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes data = 4;</code>
+ */
+ public Builder clearData() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ data_ = getDefaultInstance().getData();
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Cell)
+ }
+
+ static {
+ defaultInstance = new Cell(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Cell)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\021CellMessage.proto\022/org.apache.hadoop.h" +
+ "base.rest.protobuf.generated\"D\n\004Cell\022\013\n\003" +
+ "row\030\001 \001(\014\022\016\n\006column\030\002 \001(\014\022\021\n\ttimestamp\030\003" +
+ " \001(\003\022\014\n\004data\030\004 \001(\014"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor,
+ new java.lang.String[] { "Row", "Column", "Timestamp", "Data", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
[14/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java
deleted file mode 100644
index 4c859e1..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java
+++ /dev/null
@@ -1,731 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: CellMessage.proto
-
-package org.apache.hadoop.hbase.rest.protobuf.generated;
-
-public final class CellMessage {
- private CellMessage() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- public interface CellOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // optional bytes row = 1;
- /**
- * <code>optional bytes row = 1;</code>
- *
- * <pre>
- * unused if Cell is in a CellSet
- * </pre>
- */
- boolean hasRow();
- /**
- * <code>optional bytes row = 1;</code>
- *
- * <pre>
- * unused if Cell is in a CellSet
- * </pre>
- */
- com.google.protobuf.ByteString getRow();
-
- // optional bytes column = 2;
- /**
- * <code>optional bytes column = 2;</code>
- */
- boolean hasColumn();
- /**
- * <code>optional bytes column = 2;</code>
- */
- com.google.protobuf.ByteString getColumn();
-
- // optional int64 timestamp = 3;
- /**
- * <code>optional int64 timestamp = 3;</code>
- */
- boolean hasTimestamp();
- /**
- * <code>optional int64 timestamp = 3;</code>
- */
- long getTimestamp();
-
- // optional bytes data = 4;
- /**
- * <code>optional bytes data = 4;</code>
- */
- boolean hasData();
- /**
- * <code>optional bytes data = 4;</code>
- */
- com.google.protobuf.ByteString getData();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Cell}
- */
- public static final class Cell extends
- com.google.protobuf.GeneratedMessage
- implements CellOrBuilder {
- // Use Cell.newBuilder() to construct.
- private Cell(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Cell(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Cell defaultInstance;
- public static Cell getDefaultInstance() {
- return defaultInstance;
- }
-
- public Cell getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Cell(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- row_ = input.readBytes();
- break;
- }
- case 18: {
- bitField0_ |= 0x00000002;
- column_ = input.readBytes();
- break;
- }
- case 24: {
- bitField0_ |= 0x00000004;
- timestamp_ = input.readInt64();
- break;
- }
- case 34: {
- bitField0_ |= 0x00000008;
- data_ = input.readBytes();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Cell> PARSER =
- new com.google.protobuf.AbstractParser<Cell>() {
- public Cell parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Cell(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Cell> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // optional bytes row = 1;
- public static final int ROW_FIELD_NUMBER = 1;
- private com.google.protobuf.ByteString row_;
- /**
- * <code>optional bytes row = 1;</code>
- *
- * <pre>
- * unused if Cell is in a CellSet
- * </pre>
- */
- public boolean hasRow() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional bytes row = 1;</code>
- *
- * <pre>
- * unused if Cell is in a CellSet
- * </pre>
- */
- public com.google.protobuf.ByteString getRow() {
- return row_;
- }
-
- // optional bytes column = 2;
- public static final int COLUMN_FIELD_NUMBER = 2;
- private com.google.protobuf.ByteString column_;
- /**
- * <code>optional bytes column = 2;</code>
- */
- public boolean hasColumn() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional bytes column = 2;</code>
- */
- public com.google.protobuf.ByteString getColumn() {
- return column_;
- }
-
- // optional int64 timestamp = 3;
- public static final int TIMESTAMP_FIELD_NUMBER = 3;
- private long timestamp_;
- /**
- * <code>optional int64 timestamp = 3;</code>
- */
- public boolean hasTimestamp() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional int64 timestamp = 3;</code>
- */
- public long getTimestamp() {
- return timestamp_;
- }
-
- // optional bytes data = 4;
- public static final int DATA_FIELD_NUMBER = 4;
- private com.google.protobuf.ByteString data_;
- /**
- * <code>optional bytes data = 4;</code>
- */
- public boolean hasData() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional bytes data = 4;</code>
- */
- public com.google.protobuf.ByteString getData() {
- return data_;
- }
-
- private void initFields() {
- row_ = com.google.protobuf.ByteString.EMPTY;
- column_ = com.google.protobuf.ByteString.EMPTY;
- timestamp_ = 0L;
- data_ = com.google.protobuf.ByteString.EMPTY;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, row_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, column_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeInt64(3, timestamp_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeBytes(4, data_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, row_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, column_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(3, timestamp_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(4, data_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Cell}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- row_ = com.google.protobuf.ByteString.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000001);
- column_ = com.google.protobuf.ByteString.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000002);
- timestamp_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000004);
- data_ = com.google.protobuf.ByteString.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000008);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell result = new org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.row_ = row_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.column_ = column_;
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000004;
- }
- result.timestamp_ = timestamp_;
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000008;
- }
- result.data_ = data_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.getDefaultInstance()) return this;
- if (other.hasRow()) {
- setRow(other.getRow());
- }
- if (other.hasColumn()) {
- setColumn(other.getColumn());
- }
- if (other.hasTimestamp()) {
- setTimestamp(other.getTimestamp());
- }
- if (other.hasData()) {
- setData(other.getData());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // optional bytes row = 1;
- private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY;
- /**
- * <code>optional bytes row = 1;</code>
- *
- * <pre>
- * unused if Cell is in a CellSet
- * </pre>
- */
- public boolean hasRow() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional bytes row = 1;</code>
- *
- * <pre>
- * unused if Cell is in a CellSet
- * </pre>
- */
- public com.google.protobuf.ByteString getRow() {
- return row_;
- }
- /**
- * <code>optional bytes row = 1;</code>
- *
- * <pre>
- * unused if Cell is in a CellSet
- * </pre>
- */
- public Builder setRow(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- row_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bytes row = 1;</code>
- *
- * <pre>
- * unused if Cell is in a CellSet
- * </pre>
- */
- public Builder clearRow() {
- bitField0_ = (bitField0_ & ~0x00000001);
- row_ = getDefaultInstance().getRow();
- onChanged();
- return this;
- }
-
- // optional bytes column = 2;
- private com.google.protobuf.ByteString column_ = com.google.protobuf.ByteString.EMPTY;
- /**
- * <code>optional bytes column = 2;</code>
- */
- public boolean hasColumn() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional bytes column = 2;</code>
- */
- public com.google.protobuf.ByteString getColumn() {
- return column_;
- }
- /**
- * <code>optional bytes column = 2;</code>
- */
- public Builder setColumn(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- column_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bytes column = 2;</code>
- */
- public Builder clearColumn() {
- bitField0_ = (bitField0_ & ~0x00000002);
- column_ = getDefaultInstance().getColumn();
- onChanged();
- return this;
- }
-
- // optional int64 timestamp = 3;
- private long timestamp_ ;
- /**
- * <code>optional int64 timestamp = 3;</code>
- */
- public boolean hasTimestamp() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional int64 timestamp = 3;</code>
- */
- public long getTimestamp() {
- return timestamp_;
- }
- /**
- * <code>optional int64 timestamp = 3;</code>
- */
- public Builder setTimestamp(long value) {
- bitField0_ |= 0x00000004;
- timestamp_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int64 timestamp = 3;</code>
- */
- public Builder clearTimestamp() {
- bitField0_ = (bitField0_ & ~0x00000004);
- timestamp_ = 0L;
- onChanged();
- return this;
- }
-
- // optional bytes data = 4;
- private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY;
- /**
- * <code>optional bytes data = 4;</code>
- */
- public boolean hasData() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional bytes data = 4;</code>
- */
- public com.google.protobuf.ByteString getData() {
- return data_;
- }
- /**
- * <code>optional bytes data = 4;</code>
- */
- public Builder setData(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000008;
- data_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bytes data = 4;</code>
- */
- public Builder clearData() {
- bitField0_ = (bitField0_ & ~0x00000008);
- data_ = getDefaultInstance().getData();
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Cell)
- }
-
- static {
- defaultInstance = new Cell(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Cell)
- }
-
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\021CellMessage.proto\022/org.apache.hadoop.h" +
- "base.rest.protobuf.generated\"D\n\004Cell\022\013\n\003" +
- "row\030\001 \001(\014\022\016\n\006column\030\002 \001(\014\022\021\n\ttimestamp\030\003" +
- " \001(\003\022\014\n\004data\030\004 \001(\014"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor,
- new java.lang.String[] { "Row", "Column", "Timestamp", "Data", });
- return null;
- }
- };
- com.google.protobuf.Descriptors.FileDescriptor
- .internalBuildGeneratedFileFrom(descriptorData,
- new com.google.protobuf.Descriptors.FileDescriptor[] {
- }, assigner);
- }
-
- // @@protoc_insertion_point(outer_class_scope)
-}
[05/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
deleted file mode 100644
index bd65bc4..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
+++ /dev/null
@@ -1,1520 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.lang.reflect.Constructor;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Date;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.TreeMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.apache.hadoop.hbase.filter.WhileMatchFilter;
-import org.apache.hadoop.hbase.io.compress.Compression;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.RemoteAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Hash;
-import org.apache.hadoop.hbase.util.MurmurHash;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-import org.apache.hadoop.util.LineReader;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * Script used evaluating Stargate performance and scalability. Runs a SG
- * client that steps through one of a set of hardcoded tests or 'experiments'
- * (e.g. a random reads test, a random writes test, etc.). Pass on the
- * command-line which test to run and how many clients are participating in
- * this experiment. Run <code>java PerformanceEvaluation --help</code> to
- * obtain usage.
- *
- * <p>This class sets up and runs the evaluation programs described in
- * Section 7, <i>Performance Evaluation</i>, of the <a
- * href="http://labs.google.com/papers/bigtable.html">Bigtable</a>
- * paper, pages 8-10.
- *
- * <p>If number of clients > 1, we start up a MapReduce job. Each map task
- * runs an individual client. Each client does about 1GB of data.
- */
-public class PerformanceEvaluation extends Configured implements Tool {
- protected static final Log LOG = LogFactory.getLog(PerformanceEvaluation.class.getName());
-
- private static final int DEFAULT_ROW_PREFIX_LENGTH = 16;
- private static final int ROW_LENGTH = 1000;
- private static final int TAG_LENGTH = 256;
- private static final int ONE_GB = 1024 * 1024 * 1000;
- private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH;
-
- public static final TableName TABLE_NAME = TableName.valueOf("TestTable");
- public static final byte [] FAMILY_NAME = Bytes.toBytes("info");
- public static final byte [] QUALIFIER_NAME = Bytes.toBytes("data");
- private TableName tableName = TABLE_NAME;
-
- protected HTableDescriptor TABLE_DESCRIPTOR;
- protected Map<String, CmdDescriptor> commands = new TreeMap<String, CmdDescriptor>();
- protected static Cluster cluster = new Cluster();
-
- volatile Configuration conf;
- private boolean nomapred = false;
- private int N = 1;
- private int R = ROWS_PER_GB;
- private Compression.Algorithm compression = Compression.Algorithm.NONE;
- private DataBlockEncoding blockEncoding = DataBlockEncoding.NONE;
- private boolean flushCommits = true;
- private boolean writeToWAL = true;
- private boolean inMemoryCF = false;
- private int presplitRegions = 0;
- private boolean useTags = false;
- private int noOfTags = 1;
- private HConnection connection;
-
- private static final Path PERF_EVAL_DIR = new Path("performance_evaluation");
- /**
- * Regex to parse lines in input file passed to mapreduce task.
- */
- public static final Pattern LINE_PATTERN =
- Pattern.compile("tableName=(\\w+),\\s+" +
- "startRow=(\\d+),\\s+" +
- "perClientRunRows=(\\d+),\\s+" +
- "totalRows=(\\d+),\\s+" +
- "clients=(\\d+),\\s+" +
- "flushCommits=(\\w+),\\s+" +
- "writeToWAL=(\\w+),\\s+" +
- "useTags=(\\w+),\\s+" +
- "noOfTags=(\\d+)");
-
- /**
- * Enum for map metrics. Keep it out here rather than inside in the Map
- * inner-class so we can find associated properties.
- */
- protected static enum Counter {
- /** elapsed time */
- ELAPSED_TIME,
- /** number of rows */
- ROWS}
-
- /**
- * Constructor
- * @param c Configuration object
- */
- public PerformanceEvaluation(final Configuration c) {
- this.conf = c;
-
- addCommandDescriptor(RandomReadTest.class, "randomRead",
- "Run random read test");
- addCommandDescriptor(RandomSeekScanTest.class, "randomSeekScan",
- "Run random seek and scan 100 test");
- addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
- "Run random seek scan with both start and stop row (max 10 rows)");
- addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
- "Run random seek scan with both start and stop row (max 100 rows)");
- addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
- "Run random seek scan with both start and stop row (max 1000 rows)");
- addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000",
- "Run random seek scan with both start and stop row (max 10000 rows)");
- addCommandDescriptor(RandomWriteTest.class, "randomWrite",
- "Run random write test");
- addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
- "Run sequential read test");
- addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
- "Run sequential write test");
- addCommandDescriptor(ScanTest.class, "scan",
- "Run scan test (read every row)");
- addCommandDescriptor(FilteredScanTest.class, "filterScan",
- "Run scan test using a filter to find a specific row based on it's value (make sure to use --rows=20)");
- }
-
- protected void addCommandDescriptor(Class<? extends Test> cmdClass,
- String name, String description) {
- CmdDescriptor cmdDescriptor =
- new CmdDescriptor(cmdClass, name, description);
- commands.put(name, cmdDescriptor);
- }
-
- /**
- * Implementations can have their status set.
- */
- interface Status {
- /**
- * Sets status
- * @param msg status message
- * @throws IOException
- */
- void setStatus(final String msg) throws IOException;
- }
-
- /**
- * This class works as the InputSplit of Performance Evaluation
- * MapReduce InputFormat, and the Record Value of RecordReader.
- * Each map task will only read one record from a PeInputSplit,
- * the record value is the PeInputSplit itself.
- */
- public static class PeInputSplit extends InputSplit implements Writable {
- private TableName tableName = TABLE_NAME;
- private int startRow = 0;
- private int rows = 0;
- private int totalRows = 0;
- private int clients = 0;
- private boolean flushCommits = false;
- private boolean writeToWAL = true;
- private boolean useTags = false;
- private int noOfTags = 0;
-
- public PeInputSplit() {
- }
-
- public PeInputSplit(TableName tableName, int startRow, int rows, int totalRows, int clients,
- boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags) {
- this.tableName = tableName;
- this.startRow = startRow;
- this.rows = rows;
- this.totalRows = totalRows;
- this.clients = clients;
- this.flushCommits = flushCommits;
- this.writeToWAL = writeToWAL;
- this.useTags = useTags;
- this.noOfTags = noOfTags;
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- int tableNameLen = in.readInt();
- byte[] name = new byte[tableNameLen];
- in.readFully(name);
- this.tableName = TableName.valueOf(name);
- this.startRow = in.readInt();
- this.rows = in.readInt();
- this.totalRows = in.readInt();
- this.clients = in.readInt();
- this.flushCommits = in.readBoolean();
- this.writeToWAL = in.readBoolean();
- this.useTags = in.readBoolean();
- this.noOfTags = in.readInt();
- }
-
- @Override
- public void write(DataOutput out) throws IOException {
- byte[] name = this.tableName.toBytes();
- out.writeInt(name.length);
- out.write(name);
- out.writeInt(startRow);
- out.writeInt(rows);
- out.writeInt(totalRows);
- out.writeInt(clients);
- out.writeBoolean(flushCommits);
- out.writeBoolean(writeToWAL);
- out.writeBoolean(useTags);
- out.writeInt(noOfTags);
- }
-
- @Override
- public long getLength() throws IOException, InterruptedException {
- return 0;
- }
-
- @Override
- public String[] getLocations() throws IOException, InterruptedException {
- return new String[0];
- }
-
- public int getStartRow() {
- return startRow;
- }
-
- public TableName getTableName() {
- return tableName;
- }
-
- public int getRows() {
- return rows;
- }
-
- public int getTotalRows() {
- return totalRows;
- }
-
- public int getClients() {
- return clients;
- }
-
- public boolean isFlushCommits() {
- return flushCommits;
- }
-
- public boolean isWriteToWAL() {
- return writeToWAL;
- }
-
- public boolean isUseTags() {
- return useTags;
- }
-
- public int getNoOfTags() {
- return noOfTags;
- }
- }
-
- /**
- * InputFormat of Performance Evaluation MapReduce job.
- * It extends from FileInputFormat, want to use it's methods such as setInputPaths().
- */
- public static class PeInputFormat extends FileInputFormat<NullWritable, PeInputSplit> {
-
- @Override
- public List<InputSplit> getSplits(JobContext job) throws IOException {
- // generate splits
- List<InputSplit> splitList = new ArrayList<InputSplit>();
-
- for (FileStatus file: listStatus(job)) {
- if (file.isDir()) {
- continue;
- }
- Path path = file.getPath();
- FileSystem fs = path.getFileSystem(job.getConfiguration());
- FSDataInputStream fileIn = fs.open(path);
- LineReader in = new LineReader(fileIn, job.getConfiguration());
- int lineLen = 0;
- while(true) {
- Text lineText = new Text();
- lineLen = in.readLine(lineText);
- if(lineLen <= 0) {
- break;
- }
- Matcher m = LINE_PATTERN.matcher(lineText.toString());
- if((m != null) && m.matches()) {
- TableName tableName = TableName.valueOf(m.group(1));
- int startRow = Integer.parseInt(m.group(2));
- int rows = Integer.parseInt(m.group(3));
- int totalRows = Integer.parseInt(m.group(4));
- int clients = Integer.parseInt(m.group(5));
- boolean flushCommits = Boolean.parseBoolean(m.group(6));
- boolean writeToWAL = Boolean.parseBoolean(m.group(7));
- boolean useTags = Boolean.parseBoolean(m.group(8));
- int noOfTags = Integer.parseInt(m.group(9));
-
- LOG.debug("tableName=" + tableName +
- " split["+ splitList.size() + "] " +
- " startRow=" + startRow +
- " rows=" + rows +
- " totalRows=" + totalRows +
- " clients=" + clients +
- " flushCommits=" + flushCommits +
- " writeToWAL=" + writeToWAL +
- " useTags=" + useTags +
- " noOfTags=" + noOfTags);
-
- PeInputSplit newSplit =
- new PeInputSplit(tableName, startRow, rows, totalRows, clients,
- flushCommits, writeToWAL, useTags, noOfTags);
- splitList.add(newSplit);
- }
- }
- in.close();
- }
-
- LOG.info("Total # of splits: " + splitList.size());
- return splitList;
- }
-
- @Override
- public RecordReader<NullWritable, PeInputSplit> createRecordReader(InputSplit split,
- TaskAttemptContext context) {
- return new PeRecordReader();
- }
-
- public static class PeRecordReader extends RecordReader<NullWritable, PeInputSplit> {
- private boolean readOver = false;
- private PeInputSplit split = null;
- private NullWritable key = null;
- private PeInputSplit value = null;
-
- @Override
- public void initialize(InputSplit split, TaskAttemptContext context)
- throws IOException, InterruptedException {
- this.readOver = false;
- this.split = (PeInputSplit)split;
- }
-
- @Override
- public boolean nextKeyValue() throws IOException, InterruptedException {
- if(readOver) {
- return false;
- }
-
- key = NullWritable.get();
- value = (PeInputSplit)split;
-
- readOver = true;
- return true;
- }
-
- @Override
- public NullWritable getCurrentKey() throws IOException, InterruptedException {
- return key;
- }
-
- @Override
- public PeInputSplit getCurrentValue() throws IOException, InterruptedException {
- return value;
- }
-
- @Override
- public float getProgress() throws IOException, InterruptedException {
- if(readOver) {
- return 1.0f;
- } else {
- return 0.0f;
- }
- }
-
- @Override
- public void close() throws IOException {
- // do nothing
- }
- }
- }
-
- /**
- * MapReduce job that runs a performance evaluation client in each map task.
- */
- public static class EvaluationMapTask
- extends Mapper<NullWritable, PeInputSplit, LongWritable, LongWritable> {
-
- /** configuration parameter name that contains the command */
- public final static String CMD_KEY = "EvaluationMapTask.command";
- /** configuration parameter name that contains the PE impl */
- public static final String PE_KEY = "EvaluationMapTask.performanceEvalImpl";
-
- private Class<? extends Test> cmd;
- private PerformanceEvaluation pe;
-
- @Override
- protected void setup(Context context) throws IOException, InterruptedException {
- this.cmd = forName(context.getConfiguration().get(CMD_KEY), Test.class);
-
- // this is required so that extensions of PE are instantiated within the
- // map reduce task...
- Class<? extends PerformanceEvaluation> peClass =
- forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class);
- try {
- this.pe = peClass.getConstructor(Configuration.class)
- .newInstance(context.getConfiguration());
- } catch (Exception e) {
- throw new IllegalStateException("Could not instantiate PE instance", e);
- }
- }
-
- private <Type> Class<? extends Type> forName(String className, Class<Type> type) {
- Class<? extends Type> clazz = null;
- try {
- clazz = Class.forName(className).asSubclass(type);
- } catch (ClassNotFoundException e) {
- throw new IllegalStateException("Could not find class for name: " + className, e);
- }
- return clazz;
- }
-
- protected void map(NullWritable key, PeInputSplit value, final Context context)
- throws IOException, InterruptedException {
-
- Status status = new Status() {
- public void setStatus(String msg) {
- context.setStatus(msg);
- }
- };
-
- // Evaluation task
- pe.tableName = value.getTableName();
- long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(),
- value.getRows(), value.getTotalRows(),
- value.isFlushCommits(), value.isWriteToWAL(),
- value.isUseTags(), value.getNoOfTags(),
- HConnectionManager.createConnection(context.getConfiguration()), status);
- // Collect how much time the thing took. Report as map output and
- // to the ELAPSED_TIME counter.
- context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime);
- context.getCounter(Counter.ROWS).increment(value.rows);
- context.write(new LongWritable(value.startRow), new LongWritable(elapsedTime));
- context.progress();
- }
- }
-
- /*
- * If table does not already exist, create.
- * @param c Client to use checking.
- * @return True if we created the table.
- * @throws IOException
- */
- private boolean checkTable(RemoteAdmin admin) throws IOException {
- HTableDescriptor tableDescriptor = getTableDescriptor();
- if (this.presplitRegions > 0) {
- // presplit requested
- if (admin.isTableAvailable(tableDescriptor.getTableName().getName())) {
- admin.deleteTable(tableDescriptor.getTableName().getName());
- }
-
- byte[][] splits = getSplits();
- for (int i=0; i < splits.length; i++) {
- LOG.debug(" split " + i + ": " + Bytes.toStringBinary(splits[i]));
- }
- admin.createTable(tableDescriptor);
- LOG.info ("Table created with " + this.presplitRegions + " splits");
- } else {
- boolean tableExists = admin.isTableAvailable(tableDescriptor.getTableName().getName());
- if (!tableExists) {
- admin.createTable(tableDescriptor);
- LOG.info("Table " + tableDescriptor + " created");
- }
- }
- boolean tableExists = admin.isTableAvailable(tableDescriptor.getTableName().getName());
- return tableExists;
- }
-
- protected HTableDescriptor getTableDescriptor() {
- if (TABLE_DESCRIPTOR == null) {
- TABLE_DESCRIPTOR = new HTableDescriptor(tableName);
- HColumnDescriptor family = new HColumnDescriptor(FAMILY_NAME);
- family.setDataBlockEncoding(blockEncoding);
- family.setCompressionType(compression);
- if (inMemoryCF) {
- family.setInMemory(true);
- }
- TABLE_DESCRIPTOR.addFamily(family);
- }
- return TABLE_DESCRIPTOR;
- }
-
- /**
- * Generates splits based on total number of rows and specified split regions
- *
- * @return splits : array of byte []
- */
- protected byte[][] getSplits() {
- if (this.presplitRegions == 0)
- return new byte [0][];
-
- int numSplitPoints = presplitRegions - 1;
- byte[][] splits = new byte[numSplitPoints][];
- int jump = this.R / this.presplitRegions;
- for (int i=0; i < numSplitPoints; i++) {
- int rowkey = jump * (1 + i);
- splits[i] = format(rowkey);
- }
- return splits;
- }
-
- /*
- * We're to run multiple clients concurrently. Setup a mapreduce job. Run
- * one map per client. Then run a single reduce to sum the elapsed times.
- * @param cmd Command to run.
- * @throws IOException
- */
- private void runNIsMoreThanOne(final Class<? extends Test> cmd)
- throws IOException, InterruptedException, ClassNotFoundException {
- RemoteAdmin remoteAdmin = new RemoteAdmin(new Client(cluster), getConf());
- checkTable(remoteAdmin);
- if (nomapred) {
- doMultipleClients(cmd);
- } else {
- doMapReduce(cmd);
- }
- }
-
- /*
- * Run all clients in this vm each to its own thread.
- * @param cmd Command to run.
- * @throws IOException
- */
- private void doMultipleClients(final Class<? extends Test> cmd) throws IOException {
- final List<Thread> threads = new ArrayList<Thread>(this.N);
- final long[] timings = new long[this.N];
- final int perClientRows = R/N;
- final TableName tableName = this.tableName;
- final DataBlockEncoding encoding = this.blockEncoding;
- final boolean flushCommits = this.flushCommits;
- final Compression.Algorithm compression = this.compression;
- final boolean writeToWal = this.writeToWAL;
- final int preSplitRegions = this.presplitRegions;
- final boolean useTags = this.useTags;
- final int numTags = this.noOfTags;
- final HConnection connection = HConnectionManager.createConnection(getConf());
- for (int i = 0; i < this.N; i++) {
- final int index = i;
- Thread t = new Thread ("TestClient-" + i) {
- @Override
- public void run() {
- super.run();
- PerformanceEvaluation pe = new PerformanceEvaluation(getConf());
- pe.tableName = tableName;
- pe.blockEncoding = encoding;
- pe.flushCommits = flushCommits;
- pe.compression = compression;
- pe.writeToWAL = writeToWal;
- pe.presplitRegions = preSplitRegions;
- pe.N = N;
- pe.connection = connection;
- pe.useTags = useTags;
- pe.noOfTags = numTags;
- try {
- long elapsedTime = pe.runOneClient(cmd, index * perClientRows,
- perClientRows, R,
- flushCommits, writeToWAL, useTags, noOfTags, connection, new Status() {
- public void setStatus(final String msg) throws IOException {
- LOG.info("client-" + getName() + " " + msg);
- }
- });
- timings[index] = elapsedTime;
- LOG.info("Finished " + getName() + " in " + elapsedTime +
- "ms writing " + perClientRows + " rows");
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
- };
- threads.add(t);
- }
- for (Thread t: threads) {
- t.start();
- }
- for (Thread t: threads) {
- while(t.isAlive()) {
- try {
- t.join();
- } catch (InterruptedException e) {
- LOG.debug("Interrupted, continuing" + e.toString());
- }
- }
- }
- final String test = cmd.getSimpleName();
- LOG.info("[" + test + "] Summary of timings (ms): "
- + Arrays.toString(timings));
- Arrays.sort(timings);
- long total = 0;
- for (int i = 0; i < this.N; i++) {
- total += timings[i];
- }
- LOG.info("[" + test + "]"
- + "\tMin: " + timings[0] + "ms"
- + "\tMax: " + timings[this.N - 1] + "ms"
- + "\tAvg: " + (total / this.N) + "ms");
- }
-
- /*
- * Run a mapreduce job. Run as many maps as asked-for clients.
- * Before we start up the job, write out an input file with instruction
- * per client regards which row they are to start on.
- * @param cmd Command to run.
- * @throws IOException
- */
- private void doMapReduce(final Class<? extends Test> cmd) throws IOException,
- InterruptedException, ClassNotFoundException {
- Configuration conf = getConf();
- Path inputDir = writeInputFile(conf);
- conf.set(EvaluationMapTask.CMD_KEY, cmd.getName());
- conf.set(EvaluationMapTask.PE_KEY, getClass().getName());
- Job job = new Job(conf);
- job.setJarByClass(PerformanceEvaluation.class);
- job.setJobName("HBase Performance Evaluation");
-
- job.setInputFormatClass(PeInputFormat.class);
- PeInputFormat.setInputPaths(job, inputDir);
-
- job.setOutputKeyClass(LongWritable.class);
- job.setOutputValueClass(LongWritable.class);
-
- job.setMapperClass(EvaluationMapTask.class);
- job.setReducerClass(LongSumReducer.class);
- job.setNumReduceTasks(1);
-
- job.setOutputFormatClass(TextOutputFormat.class);
- TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs"));
- TableMapReduceUtil.addDependencyJars(job);
- TableMapReduceUtil.initCredentials(job);
- job.waitForCompletion(true);
- }
-
- /*
- * Write input file of offsets-per-client for the mapreduce job.
- * @param c Configuration
- * @return Directory that contains file written.
- * @throws IOException
- */
- private Path writeInputFile(final Configuration c) throws IOException {
- SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss");
- Path jobdir = new Path(PERF_EVAL_DIR, formatter.format(new Date()));
- Path inputDir = new Path(jobdir, "inputs");
-
- FileSystem fs = FileSystem.get(c);
- fs.mkdirs(inputDir);
- Path inputFile = new Path(inputDir, "input.txt");
- PrintStream out = new PrintStream(fs.create(inputFile));
- // Make input random.
- Map<Integer, String> m = new TreeMap<Integer, String>();
- Hash h = MurmurHash.getInstance();
- int perClientRows = (this.R / this.N);
- try {
- for (int i = 0; i < 10; i++) {
- for (int j = 0; j < N; j++) {
- String s = "tableName=" + this.tableName +
- ", startRow=" + ((j * perClientRows) + (i * (perClientRows/10))) +
- ", perClientRunRows=" + (perClientRows / 10) +
- ", totalRows=" + this.R +
- ", clients=" + this.N +
- ", flushCommits=" + this.flushCommits +
- ", writeToWAL=" + this.writeToWAL +
- ", useTags=" + this.useTags +
- ", noOfTags=" + this.noOfTags;
- int hash = h.hash(Bytes.toBytes(s));
- m.put(hash, s);
- }
- }
- for (Map.Entry<Integer, String> e: m.entrySet()) {
- out.println(e.getValue());
- }
- } finally {
- out.close();
- }
- return inputDir;
- }
-
- /**
- * Describes a command.
- */
- static class CmdDescriptor {
- private Class<? extends Test> cmdClass;
- private String name;
- private String description;
-
- CmdDescriptor(Class<? extends Test> cmdClass, String name, String description) {
- this.cmdClass = cmdClass;
- this.name = name;
- this.description = description;
- }
-
- public Class<? extends Test> getCmdClass() {
- return cmdClass;
- }
-
- public String getName() {
- return name;
- }
-
- public String getDescription() {
- return description;
- }
- }
-
- /**
- * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation.Test
- * tests}. This makes the reflection logic a little easier to understand...
- */
- static class TestOptions {
- private int startRow;
- private int perClientRunRows;
- private int totalRows;
- private int numClientThreads;
- private TableName tableName;
- private boolean flushCommits;
- private boolean writeToWAL = true;
- private boolean useTags = false;
- private int noOfTags = 0;
- private HConnection connection;
-
- TestOptions() {
- }
-
- TestOptions(int startRow, int perClientRunRows, int totalRows, int numClientThreads,
- TableName tableName, boolean flushCommits, boolean writeToWAL, boolean useTags,
- int noOfTags, HConnection connection) {
- this.startRow = startRow;
- this.perClientRunRows = perClientRunRows;
- this.totalRows = totalRows;
- this.numClientThreads = numClientThreads;
- this.tableName = tableName;
- this.flushCommits = flushCommits;
- this.writeToWAL = writeToWAL;
- this.useTags = useTags;
- this.noOfTags = noOfTags;
- this.connection = connection;
- }
-
- public int getStartRow() {
- return startRow;
- }
-
- public int getPerClientRunRows() {
- return perClientRunRows;
- }
-
- public int getTotalRows() {
- return totalRows;
- }
-
- public int getNumClientThreads() {
- return numClientThreads;
- }
-
- public TableName getTableName() {
- return tableName;
- }
-
- public boolean isFlushCommits() {
- return flushCommits;
- }
-
- public boolean isWriteToWAL() {
- return writeToWAL;
- }
-
- public HConnection getConnection() {
- return connection;
- }
-
- public boolean isUseTags() {
- return this.useTags;
- }
-
- public int getNumTags() {
- return this.noOfTags;
- }
- }
-
- /*
- * A test.
- * Subclass to particularize what happens per row.
- */
- static abstract class Test {
- // Below is make it so when Tests are all running in the one
- // jvm, that they each have a differently seeded Random.
- private static final Random randomSeed =
- new Random(System.currentTimeMillis());
- private static long nextRandomSeed() {
- return randomSeed.nextLong();
- }
- protected final Random rand = new Random(nextRandomSeed());
-
- protected final int startRow;
- protected final int perClientRunRows;
- protected final int totalRows;
- private final Status status;
- protected TableName tableName;
- protected HTableInterface table;
- protected volatile Configuration conf;
- protected boolean flushCommits;
- protected boolean writeToWAL;
- protected boolean useTags;
- protected int noOfTags;
- protected HConnection connection;
-
- /**
- * Note that all subclasses of this class must provide a public contructor
- * that has the exact same list of arguments.
- */
- Test(final Configuration conf, final TestOptions options, final Status status) {
- super();
- this.startRow = options.getStartRow();
- this.perClientRunRows = options.getPerClientRunRows();
- this.totalRows = options.getTotalRows();
- this.status = status;
- this.tableName = options.getTableName();
- this.table = null;
- this.conf = conf;
- this.flushCommits = options.isFlushCommits();
- this.writeToWAL = options.isWriteToWAL();
- this.useTags = options.isUseTags();
- this.noOfTags = options.getNumTags();
- this.connection = options.getConnection();
- }
-
- protected String generateStatus(final int sr, final int i, final int lr) {
- return sr + "/" + i + "/" + lr;
- }
-
- protected int getReportingPeriod() {
- int period = this.perClientRunRows / 10;
- return period == 0? this.perClientRunRows: period;
- }
-
- void testSetup() throws IOException {
- this.table = connection.getTable(tableName);
- this.table.setAutoFlush(false, true);
- }
-
- void testTakedown() throws IOException {
- if (flushCommits) {
- this.table.flushCommits();
- }
- table.close();
- }
-
- /*
- * Run test
- * @return Elapsed time.
- * @throws IOException
- */
- long test() throws IOException {
- testSetup();
- LOG.info("Timed test starting in thread " + Thread.currentThread().getName());
- final long startTime = System.nanoTime();
- try {
- testTimed();
- } finally {
- testTakedown();
- }
- return (System.nanoTime() - startTime) / 1000000;
- }
-
- /**
- * Provides an extension point for tests that don't want a per row invocation.
- */
- void testTimed() throws IOException {
- int lastRow = this.startRow + this.perClientRunRows;
- // Report on completion of 1/10th of total.
- for (int i = this.startRow; i < lastRow; i++) {
- testRow(i);
- if (status != null && i > 0 && (i % getReportingPeriod()) == 0) {
- status.setStatus(generateStatus(this.startRow, i, lastRow));
- }
- }
- }
-
- /*
- * Test for individual row.
- * @param i Row index.
- */
- abstract void testRow(final int i) throws IOException;
- }
-
- @SuppressWarnings("unused")
- static class RandomSeekScanTest extends Test {
- RandomSeekScanTest(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- void testRow(final int i) throws IOException {
- Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
- scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
- scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
- ResultScanner s = this.table.getScanner(scan);
- s.close();
- }
-
- @Override
- protected int getReportingPeriod() {
- int period = this.perClientRunRows / 100;
- return period == 0? this.perClientRunRows: period;
- }
-
- }
-
- @SuppressWarnings("unused")
- static abstract class RandomScanWithRangeTest extends Test {
- RandomScanWithRangeTest(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- void testRow(final int i) throws IOException {
- Pair<byte[], byte[]> startAndStopRow = getStartAndStopRow();
- Scan scan = new Scan(startAndStopRow.getFirst(), startAndStopRow.getSecond());
- scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
- ResultScanner s = this.table.getScanner(scan);
- int count = 0;
- for (Result rr = null; (rr = s.next()) != null;) {
- count++;
- }
-
- if (i % 100 == 0) {
- LOG.info(String.format("Scan for key range %s - %s returned %s rows",
- Bytes.toString(startAndStopRow.getFirst()),
- Bytes.toString(startAndStopRow.getSecond()), count));
- }
-
- s.close();
- }
-
- protected abstract Pair<byte[],byte[]> getStartAndStopRow();
-
- protected Pair<byte[], byte[]> generateStartAndStopRows(int maxRange) {
- int start = this.rand.nextInt(Integer.MAX_VALUE) % totalRows;
- int stop = start + maxRange;
- return new Pair<byte[],byte[]>(format(start), format(stop));
- }
-
- @Override
- protected int getReportingPeriod() {
- int period = this.perClientRunRows / 100;
- return period == 0? this.perClientRunRows: period;
- }
- }
-
- static class RandomScanWithRange10Test extends RandomScanWithRangeTest {
- RandomScanWithRange10Test(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- protected Pair<byte[], byte[]> getStartAndStopRow() {
- return generateStartAndStopRows(10);
- }
- }
-
- static class RandomScanWithRange100Test extends RandomScanWithRangeTest {
- RandomScanWithRange100Test(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- protected Pair<byte[], byte[]> getStartAndStopRow() {
- return generateStartAndStopRows(100);
- }
- }
-
- static class RandomScanWithRange1000Test extends RandomScanWithRangeTest {
- RandomScanWithRange1000Test(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- protected Pair<byte[], byte[]> getStartAndStopRow() {
- return generateStartAndStopRows(1000);
- }
- }
-
- static class RandomScanWithRange10000Test extends RandomScanWithRangeTest {
- RandomScanWithRange10000Test(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- protected Pair<byte[], byte[]> getStartAndStopRow() {
- return generateStartAndStopRows(10000);
- }
- }
-
- static class RandomReadTest extends Test {
- RandomReadTest(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- void testRow(final int i) throws IOException {
- Get get = new Get(getRandomRow(this.rand, this.totalRows));
- get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
- this.table.get(get);
- }
-
- @Override
- protected int getReportingPeriod() {
- int period = this.perClientRunRows / 100;
- return period == 0? this.perClientRunRows: period;
- }
-
- }
-
- static class RandomWriteTest extends Test {
- RandomWriteTest(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- void testRow(final int i) throws IOException {
- byte[] row = getRandomRow(this.rand, this.totalRows);
- Put put = new Put(row);
- byte[] value = generateData(this.rand, ROW_LENGTH);
- if (useTags) {
- byte[] tag = generateData(this.rand, TAG_LENGTH);
- Tag[] tags = new Tag[noOfTags];
- for (int n = 0; n < noOfTags; n++) {
- Tag t = new Tag((byte) n, tag);
- tags[n] = t;
- }
- KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
- value, tags);
- put.add(kv);
- } else {
- put.add(FAMILY_NAME, QUALIFIER_NAME, value);
- }
- put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
- table.put(put);
- }
- }
-
- static class ScanTest extends Test {
- private ResultScanner testScanner;
-
- ScanTest(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- void testTakedown() throws IOException {
- if (this.testScanner != null) {
- this.testScanner.close();
- }
- super.testTakedown();
- }
-
-
- @Override
- void testRow(final int i) throws IOException {
- if (this.testScanner == null) {
- Scan scan = new Scan(format(this.startRow));
- scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
- this.testScanner = table.getScanner(scan);
- }
- testScanner.next();
- }
-
- }
-
- static class SequentialReadTest extends Test {
- SequentialReadTest(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- void testRow(final int i) throws IOException {
- Get get = new Get(format(i));
- get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
- table.get(get);
- }
-
- }
-
- static class SequentialWriteTest extends Test {
-
- SequentialWriteTest(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- void testRow(final int i) throws IOException {
- byte[] row = format(i);
- Put put = new Put(row);
- byte[] value = generateData(this.rand, ROW_LENGTH);
- if (useTags) {
- byte[] tag = generateData(this.rand, TAG_LENGTH);
- Tag[] tags = new Tag[noOfTags];
- for (int n = 0; n < noOfTags; n++) {
- Tag t = new Tag((byte) n, tag);
- tags[n] = t;
- }
- KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
- value, tags);
- put.add(kv);
- } else {
- put.add(FAMILY_NAME, QUALIFIER_NAME, value);
- }
- put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
- table.put(put);
- }
- }
-
- static class FilteredScanTest extends Test {
- protected static final Log LOG = LogFactory.getLog(FilteredScanTest.class.getName());
-
- FilteredScanTest(Configuration conf, TestOptions options, Status status) {
- super(conf, options, status);
- }
-
- @Override
- void testRow(int i) throws IOException {
- byte[] value = generateValue(this.rand);
- Scan scan = constructScan(value);
- ResultScanner scanner = null;
- try {
- scanner = this.table.getScanner(scan);
- while (scanner.next() != null) {
- }
- } finally {
- if (scanner != null) scanner.close();
- }
- }
-
- protected Scan constructScan(byte[] valuePrefix) throws IOException {
- Filter filter = new SingleColumnValueFilter(
- FAMILY_NAME, QUALIFIER_NAME, CompareFilter.CompareOp.EQUAL,
- new BinaryComparator(valuePrefix)
- );
- Scan scan = new Scan();
- scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
- scan.setFilter(filter);
- return scan;
- }
- }
-
- /*
- * Format passed integer.
- * @param number
- * @return Returns zero-prefixed 10-byte wide decimal version of passed
- * number (Does absolute in case number is negative).
- */
- public static byte [] format(final int number) {
- byte [] b = new byte[DEFAULT_ROW_PREFIX_LENGTH + 10];
- int d = Math.abs(number);
- for (int i = b.length - 1; i >= 0; i--) {
- b[i] = (byte)((d % 10) + '0');
- d /= 10;
- }
- return b;
- }
-
- public static byte[] generateData(final Random r, int length) {
- byte [] b = new byte [length];
- int i = 0;
-
- for(i = 0; i < (length-8); i += 8) {
- b[i] = (byte) (65 + r.nextInt(26));
- b[i+1] = b[i];
- b[i+2] = b[i];
- b[i+3] = b[i];
- b[i+4] = b[i];
- b[i+5] = b[i];
- b[i+6] = b[i];
- b[i+7] = b[i];
- }
-
- byte a = (byte) (65 + r.nextInt(26));
- for(; i < length; i++) {
- b[i] = a;
- }
- return b;
- }
-
- public static byte[] generateValue(final Random r) {
- byte [] b = new byte [ROW_LENGTH];
- r.nextBytes(b);
- return b;
- }
-
- static byte [] getRandomRow(final Random random, final int totalRows) {
- return format(random.nextInt(Integer.MAX_VALUE) % totalRows);
- }
-
- long runOneClient(final Class<? extends Test> cmd, final int startRow,
- final int perClientRunRows, final int totalRows,
- boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags,
- HConnection connection, final Status status)
- throws IOException {
- status.setStatus("Start " + cmd + " at offset " + startRow + " for " +
- perClientRunRows + " rows");
- long totalElapsedTime = 0;
-
- TestOptions options = new TestOptions(startRow, perClientRunRows,
- totalRows, N, tableName, flushCommits, writeToWAL, useTags, noOfTags, connection);
- final Test t;
- try {
- Constructor<? extends Test> constructor = cmd.getDeclaredConstructor(
- Configuration.class, TestOptions.class, Status.class);
- t = constructor.newInstance(this.conf, options, status);
- } catch (NoSuchMethodException e) {
- throw new IllegalArgumentException("Invalid command class: " +
- cmd.getName() + ". It does not provide a constructor as described by" +
- "the javadoc comment. Available constructors are: " +
- Arrays.toString(cmd.getConstructors()));
- } catch (Exception e) {
- throw new IllegalStateException("Failed to construct command class", e);
- }
- totalElapsedTime = t.test();
-
- status.setStatus("Finished " + cmd + " in " + totalElapsedTime +
- "ms at offset " + startRow + " for " + perClientRunRows + " rows");
- return totalElapsedTime;
- }
-
- private void runNIsOne(final Class<? extends Test> cmd) {
- Status status = new Status() {
- public void setStatus(String msg) throws IOException {
- LOG.info(msg);
- }
- };
-
- RemoteAdmin admin = null;
- try {
- Client client = new Client(cluster);
- admin = new RemoteAdmin(client, getConf());
- checkTable(admin);
- runOneClient(cmd, 0, this.R, this.R, this.flushCommits, this.writeToWAL,
- this.useTags, this.noOfTags, this.connection, status);
- } catch (Exception e) {
- LOG.error("Failed", e);
- }
- }
-
- private void runTest(final Class<? extends Test> cmd) throws IOException,
- InterruptedException, ClassNotFoundException {
- if (N == 1) {
- // If there is only one client and one HRegionServer, we assume nothing
- // has been set up at all.
- runNIsOne(cmd);
- } else {
- // Else, run
- runNIsMoreThanOne(cmd);
- }
- }
-
- protected void printUsage() {
- printUsage(null);
- }
-
- protected void printUsage(final String message) {
- if (message != null && message.length() > 0) {
- System.err.println(message);
- }
- System.err.println("Usage: java " + this.getClass().getName() + " \\");
- System.err.println(" [--nomapred] [--rows=ROWS] [--table=NAME] \\");
- System.err.println(" [--compress=TYPE] [--blockEncoding=TYPE] [-D<property=value>]* <command> <nclients>");
- System.err.println();
- System.err.println("Options:");
- System.err.println(" nomapred Run multiple clients using threads " +
- "(rather than use mapreduce)");
- System.err.println(" rows Rows each client runs. Default: One million");
- System.err.println(" table Alternate table name. Default: 'TestTable'");
- System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'");
- System.err.println(" flushCommits Used to determine if the test should flush the table. Default: false");
- System.err.println(" writeToWAL Set writeToWAL on puts. Default: True");
- System.err.println(" presplit Create presplit table. Recommended for accurate perf analysis (see guide). Default: disabled");
- System.err
- .println(" inmemory Tries to keep the HFiles of the CF inmemory as far as possible. Not " +
- "guaranteed that reads are always served from inmemory. Default: false");
- System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. Default : false");
- System.err
- .println(" numoftags Specify the no of tags that would be needed. This works only if usetags is true.");
- System.err.println();
- System.err.println(" Note: -D properties will be applied to the conf used. ");
- System.err.println(" For example: ");
- System.err.println(" -Dmapred.output.compress=true");
- System.err.println(" -Dmapreduce.task.timeout=60000");
- System.err.println();
- System.err.println("Command:");
- for (CmdDescriptor command : commands.values()) {
- System.err.println(String.format(" %-15s %s", command.getName(), command.getDescription()));
- }
- System.err.println();
- System.err.println("Args:");
- System.err.println(" nclients Integer. Required. Total number of " +
- "clients (and HRegionServers)");
- System.err.println(" running: 1 <= value <= 500");
- System.err.println("Examples:");
- System.err.println(" To run a single evaluation client:");
- System.err.println(" $ bin/hbase " + this.getClass().getName()
- + " sequentialWrite 1");
- }
-
- private void getArgs(final int start, final String[] args) {
- if(start + 1 > args.length) {
- throw new IllegalArgumentException("must supply the number of clients");
- }
- N = Integer.parseInt(args[start]);
- if (N < 1) {
- throw new IllegalArgumentException("Number of clients must be > 1");
- }
- // Set total number of rows to write.
- R = R * N;
- }
-
- @Override
- public int run(String[] args) throws Exception {
- // Process command-line args. TODO: Better cmd-line processing
- // (but hopefully something not as painful as cli options).
- int errCode = -1;
- if (args.length < 1) {
- printUsage();
- return errCode;
- }
-
- try {
- for (int i = 0; i < args.length; i++) {
- String cmd = args[i];
- if (cmd.equals("-h") || cmd.startsWith("--h")) {
- printUsage();
- errCode = 0;
- break;
- }
-
- final String nmr = "--nomapred";
- if (cmd.startsWith(nmr)) {
- nomapred = true;
- continue;
- }
-
- final String rows = "--rows=";
- if (cmd.startsWith(rows)) {
- R = Integer.parseInt(cmd.substring(rows.length()));
- continue;
- }
-
- final String table = "--table=";
- if (cmd.startsWith(table)) {
- this.tableName = TableName.valueOf(cmd.substring(table.length()));
- continue;
- }
-
- final String compress = "--compress=";
- if (cmd.startsWith(compress)) {
- this.compression = Compression.Algorithm.valueOf(cmd.substring(compress.length()));
- continue;
- }
-
- final String blockEncoding = "--blockEncoding=";
- if (cmd.startsWith(blockEncoding)) {
- this.blockEncoding = DataBlockEncoding.valueOf(cmd.substring(blockEncoding.length()));
- continue;
- }
-
- final String flushCommits = "--flushCommits=";
- if (cmd.startsWith(flushCommits)) {
- this.flushCommits = Boolean.parseBoolean(cmd.substring(flushCommits.length()));
- continue;
- }
-
- final String writeToWAL = "--writeToWAL=";
- if (cmd.startsWith(writeToWAL)) {
- this.writeToWAL = Boolean.parseBoolean(cmd.substring(writeToWAL.length()));
- continue;
- }
-
- final String presplit = "--presplit=";
- if (cmd.startsWith(presplit)) {
- this.presplitRegions = Integer.parseInt(cmd.substring(presplit.length()));
- continue;
- }
-
- final String inMemory = "--inmemory=";
- if (cmd.startsWith(inMemory)) {
- this.inMemoryCF = Boolean.parseBoolean(cmd.substring(inMemory.length()));
- continue;
- }
-
- this.connection = HConnectionManager.createConnection(getConf());
-
- final String useTags = "--usetags=";
- if (cmd.startsWith(useTags)) {
- this.useTags = Boolean.parseBoolean(cmd.substring(useTags.length()));
- continue;
- }
-
- final String noOfTags = "--nooftags=";
- if (cmd.startsWith(noOfTags)) {
- this.noOfTags = Integer.parseInt(cmd.substring(noOfTags.length()));
- continue;
- }
-
- final String host = "--host=";
- if (cmd.startsWith(host)) {
- cluster.add(cmd.substring(host.length()));
- continue;
- }
-
- Class<? extends Test> cmdClass = determineCommandClass(cmd);
- if (cmdClass != null) {
- getArgs(i + 1, args);
- if (cluster.isEmpty()) {
- String s = conf.get("stargate.hostname", "localhost");
- if (s.contains(":")) {
- cluster.add(s);
- } else {
- cluster.add(s, conf.getInt("stargate.port", 8080));
- }
- }
- runTest(cmdClass);
- errCode = 0;
- break;
- }
-
- printUsage();
- break;
- }
- } catch (Exception e) {
- LOG.error("Failed", e);
- }
-
- return errCode;
- }
-
- private Class<? extends Test> determineCommandClass(String cmd) {
- CmdDescriptor descriptor = commands.get(cmd);
- return descriptor != null ? descriptor.getCmdClass() : null;
- }
-
- /**
- * @param args
- */
- public static void main(final String[] args) throws Exception {
- int res = ToolRunner.run(new PerformanceEvaluation(HBaseConfiguration.create()), args);
- System.exit(res);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
deleted file mode 100644
index adebc6b..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
+++ /dev/null
@@ -1,481 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.StringWriter;
-
-import javax.ws.rs.core.MediaType;
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.rest.provider.JacksonProvider;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-
-public class RowResourceBase {
-
- protected static final String TABLE = "TestRowResource";
- protected static final String CFA = "a";
- protected static final String CFB = "b";
- protected static final String COLUMN_1 = CFA + ":1";
- protected static final String COLUMN_2 = CFB + ":2";
- protected static final String COLUMN_3 = CFA + ":";
- protected static final String ROW_1 = "testrow1";
- protected static final String VALUE_1 = "testvalue1";
- protected static final String ROW_2 = "testrow2";
- protected static final String VALUE_2 = "testvalue2";
- protected static final String ROW_3 = "testrow3";
- protected static final String VALUE_3 = "testvalue3";
- protected static final String ROW_4 = "testrow4";
- protected static final String VALUE_4 = "testvalue4";
-
- protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- protected static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- protected static Client client;
- protected static JAXBContext context;
- protected static Marshaller xmlMarshaller;
- protected static Unmarshaller xmlUnmarshaller;
- protected static Configuration conf;
- protected static ObjectMapper jsonMapper;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- conf = TEST_UTIL.getConfiguration();
- TEST_UTIL.startMiniCluster(3);
- REST_TEST_UTIL.startServletContainer(conf);
- context = JAXBContext.newInstance(
- CellModel.class,
- CellSetModel.class,
- RowModel.class);
- xmlMarshaller = context.createMarshaller();
- xmlUnmarshaller = context.createUnmarshaller();
- jsonMapper = new JacksonProvider()
- .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- @Before
- public void beforeMethod() throws Exception {
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (admin.tableExists(TABLE)) {
- TEST_UTIL.deleteTable(Bytes.toBytes(TABLE));
- }
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
- htd.addFamily(new HColumnDescriptor(CFA));
- htd.addFamily(new HColumnDescriptor(CFB));
- admin.createTable(htd);
- }
-
- @After
- public void afterMethod() throws Exception {
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (admin.tableExists(TABLE)) {
- TEST_UTIL.deleteTable(Bytes.toBytes(TABLE));
- }
- }
-
- static Response putValuePB(String table, String row, String column,
- String value) throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append('/');
- path.append(column);
- return putValuePB(path.toString(), table, row, column, value);
- }
-
- static Response putValuePB(String url, String table, String row,
- String column, String value) throws IOException {
- RowModel rowModel = new RowModel(row);
- rowModel.addCell(new CellModel(Bytes.toBytes(column),
- Bytes.toBytes(value)));
- CellSetModel cellSetModel = new CellSetModel();
- cellSetModel.addRow(rowModel);
- Response response = client.put(url, Constants.MIMETYPE_PROTOBUF,
- cellSetModel.createProtobufOutput());
- Thread.yield();
- return response;
- }
-
- protected static void checkValueXML(String url, String table, String row,
- String column, String value) throws IOException, JAXBException {
- Response response = getValueXML(url);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- CellSetModel cellSet = (CellSetModel)
- xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
- RowModel rowModel = cellSet.getRows().get(0);
- CellModel cell = rowModel.getCells().get(0);
- assertEquals(Bytes.toString(cell.getColumn()), column);
- assertEquals(Bytes.toString(cell.getValue()), value);
- }
-
- protected static void checkValueXML(String table, String row, String column,
- String value) throws IOException, JAXBException {
- Response response = getValueXML(table, row, column);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- CellSetModel cellSet = (CellSetModel)
- xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
- RowModel rowModel = cellSet.getRows().get(0);
- CellModel cell = rowModel.getCells().get(0);
- assertEquals(Bytes.toString(cell.getColumn()), column);
- assertEquals(Bytes.toString(cell.getValue()), value);
- }
-
- protected static Response getValuePB(String url) throws IOException {
- Response response = client.get(url, Constants.MIMETYPE_PROTOBUF);
- return response;
- }
-
- protected static Response putValueXML(String table, String row, String column,
- String value) throws IOException, JAXBException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append('/');
- path.append(column);
- return putValueXML(path.toString(), table, row, column, value);
- }
-
- protected static Response putValueXML(String url, String table, String row,
- String column, String value) throws IOException, JAXBException {
- RowModel rowModel = new RowModel(row);
- rowModel.addCell(new CellModel(Bytes.toBytes(column),
- Bytes.toBytes(value)));
- CellSetModel cellSetModel = new CellSetModel();
- cellSetModel.addRow(rowModel);
- StringWriter writer = new StringWriter();
- xmlMarshaller.marshal(cellSetModel, writer);
- Response response = client.put(url, Constants.MIMETYPE_XML,
- Bytes.toBytes(writer.toString()));
- Thread.yield();
- return response;
- }
-
- protected static Response getValuePB(String table, String row, String column)
- throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append('/');
- path.append(column);
- return getValuePB(path.toString());
- }
-
- protected static void checkValuePB(String table, String row, String column,
- String value) throws IOException {
- Response response = getValuePB(table, row, column);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- CellSetModel cellSet = new CellSetModel();
- cellSet.getObjectFromMessage(response.getBody());
- RowModel rowModel = cellSet.getRows().get(0);
- CellModel cell = rowModel.getCells().get(0);
- assertEquals(Bytes.toString(cell.getColumn()), column);
- assertEquals(Bytes.toString(cell.getValue()), value);
- }
-
- protected static Response checkAndPutValuePB(String url, String table,
- String row, String column, String valueToCheck, String valueToPut)
- throws IOException {
- RowModel rowModel = new RowModel(row);
- rowModel.addCell(new CellModel(Bytes.toBytes(column),
- Bytes.toBytes(valueToPut)));
- rowModel.addCell(new CellModel(Bytes.toBytes(column),
- Bytes.toBytes(valueToCheck)));
- CellSetModel cellSetModel = new CellSetModel();
- cellSetModel.addRow(rowModel);
- Response response = client.put(url, Constants.MIMETYPE_PROTOBUF,
- cellSetModel.createProtobufOutput());
- Thread.yield();
- return response;
- }
-
- protected static Response checkAndPutValuePB(String table, String row,
- String column, String valueToCheck, String valueToPut) throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append("?check=put");
- return checkAndPutValuePB(path.toString(), table, row, column,
- valueToCheck, valueToPut);
- }
-
- protected static Response checkAndPutValueXML(String url, String table,
- String row, String column, String valueToCheck, String valueToPut)
- throws IOException, JAXBException {
- RowModel rowModel = new RowModel(row);
- rowModel.addCell(new CellModel(Bytes.toBytes(column),
- Bytes.toBytes(valueToPut)));
- rowModel.addCell(new CellModel(Bytes.toBytes(column),
- Bytes.toBytes(valueToCheck)));
- CellSetModel cellSetModel = new CellSetModel();
- cellSetModel.addRow(rowModel);
- StringWriter writer = new StringWriter();
- xmlMarshaller.marshal(cellSetModel, writer);
- Response response = client.put(url, Constants.MIMETYPE_XML,
- Bytes.toBytes(writer.toString()));
- Thread.yield();
- return response;
- }
-
- protected static Response checkAndPutValueXML(String table, String row,
- String column, String valueToCheck, String valueToPut)
- throws IOException, JAXBException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append("?check=put");
- return checkAndPutValueXML(path.toString(), table, row, column,
- valueToCheck, valueToPut);
- }
-
- protected static Response checkAndDeleteXML(String url, String table,
- String row, String column, String valueToCheck)
- throws IOException, JAXBException {
- RowModel rowModel = new RowModel(row);
- rowModel.addCell(new CellModel(Bytes.toBytes(column),
- Bytes.toBytes(valueToCheck)));
- CellSetModel cellSetModel = new CellSetModel();
- cellSetModel.addRow(rowModel);
- StringWriter writer = new StringWriter();
- xmlMarshaller.marshal(cellSetModel, writer);
- Response response = client.put(url, Constants.MIMETYPE_XML,
- Bytes.toBytes(writer.toString()));
- Thread.yield();
- return response;
- }
-
- protected static Response checkAndDeleteXML(String table, String row,
- String column, String valueToCheck) throws IOException, JAXBException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append("?check=delete");
- return checkAndDeleteXML(path.toString(), table, row, column, valueToCheck);
- }
-
- protected static Response checkAndDeleteJson(String table, String row,
- String column, String valueToCheck) throws IOException, JAXBException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append("?check=delete");
- return checkAndDeleteJson(path.toString(), table, row, column, valueToCheck);
- }
-
- protected static Response checkAndDeleteJson(String url, String table,
- String row, String column, String valueToCheck)
- throws IOException, JAXBException {
- RowModel rowModel = new RowModel(row);
- rowModel.addCell(new CellModel(Bytes.toBytes(column),
- Bytes.toBytes(valueToCheck)));
- CellSetModel cellSetModel = new CellSetModel();
- cellSetModel.addRow(rowModel);
- String jsonString = jsonMapper.writeValueAsString(cellSetModel);
- Response response = client.put(url, Constants.MIMETYPE_JSON,
- Bytes.toBytes(jsonString));
- Thread.yield();
- return response;
- }
-
- protected static Response checkAndDeletePB(String table, String row,
- String column, String value) throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append("?check=delete");
- return checkAndDeleteValuePB(path.toString(), table, row, column, value);
- }
-
- protected static Response checkAndDeleteValuePB(String url, String table,
- String row, String column, String valueToCheck)
- throws IOException {
- RowModel rowModel = new RowModel(row);
- rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes
- .toBytes(valueToCheck)));
- CellSetModel cellSetModel = new CellSetModel();
- cellSetModel.addRow(rowModel);
- Response response = client.put(url, Constants.MIMETYPE_PROTOBUF,
- cellSetModel.createProtobufOutput());
- Thread.yield();
- return response;
- }
-
- protected static Response getValueXML(String table, String startRow,
- String endRow, String column) throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(startRow);
- path.append(",");
- path.append(endRow);
- path.append('/');
- path.append(column);
- return getValueXML(path.toString());
- }
-
- protected static Response getValueXML(String url) throws IOException {
- Response response = client.get(url, Constants.MIMETYPE_XML);
- return response;
- }
-
- protected static Response getValueJson(String url) throws IOException {
- Response response = client.get(url, Constants.MIMETYPE_JSON);
- return response;
- }
-
- protected static Response deleteValue(String table, String row, String column)
- throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append('/');
- path.append(column);
- Response response = client.delete(path.toString());
- Thread.yield();
- return response;
- }
-
- protected static Response getValueXML(String table, String row, String column)
- throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append('/');
- path.append(column);
- return getValueXML(path.toString());
- }
-
- protected static Response deleteRow(String table, String row)
- throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- Response response = client.delete(path.toString());
- Thread.yield();
- return response;
- }
-
- protected static Response getValueJson(String table, String row,
- String column) throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append('/');
- path.append(column);
- return getValueJson(path.toString());
- }
-
- protected static void checkValueJSON(String table, String row, String column,
- String value) throws IOException, JAXBException {
- Response response = getValueJson(table, row, column);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- ObjectMapper mapper = new JacksonProvider()
- .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
- CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class);
- RowModel rowModel = cellSet.getRows().get(0);
- CellModel cell = rowModel.getCells().get(0);
- assertEquals(Bytes.toString(cell.getColumn()), column);
- assertEquals(Bytes.toString(cell.getValue()), value);
- }
-
- protected static Response putValueJson(String table, String row, String column,
- String value) throws IOException, JAXBException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- path.append(table);
- path.append('/');
- path.append(row);
- path.append('/');
- path.append(column);
- return putValueJson(path.toString(), table, row, column, value);
- }
-
- protected static Response putValueJson(String url, String table, String row, String column,
- String value) throws IOException, JAXBException {
- RowModel rowModel = new RowModel(row);
- rowModel.addCell(new CellModel(Bytes.toBytes(column),
- Bytes.toBytes(value)));
- CellSetModel cellSetModel = new CellSetModel();
- cellSetModel.addRow(rowModel);
- String jsonString = jsonMapper.writeValueAsString(cellSetModel);
- Response response = client.put(url, Constants.MIMETYPE_JSON,
- Bytes.toBytes(jsonString));
- Thread.yield();
- return response;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java
deleted file mode 100644
index 89e1b20..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-
-@Category(MediumTests.class)
-public class TestDeleteRow extends RowResourceBase {
-
- @Test
- public void testDeleteNonExistentColumn() throws Exception {
- Response response = putValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 200);
-
- response = checkAndDeleteJson(TABLE, ROW_1, COLUMN_1, VALUE_2);
- assertEquals(304, response.getCode());
- assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
-
- response = checkAndDeleteJson(TABLE, ROW_2, COLUMN_1, VALUE_2);
- assertEquals(304, response.getCode());
- assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
-
- response = checkAndDeleteJson(TABLE, ROW_1, "dummy", VALUE_1);
- assertEquals(400, response.getCode());
- assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
-
- response = checkAndDeleteJson(TABLE, ROW_1, "dummy:test", VALUE_1);
- assertEquals(404, response.getCode());
- assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
-
- response = checkAndDeleteJson(TABLE, ROW_1, "a:test", VALUE_1);
- assertEquals(304, response.getCode());
- assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode());
- }
-
- @Test
- public void testDeleteXML() throws IOException, JAXBException {
- Response response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 200);
- response = putValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
- assertEquals(response.getCode(), 200);
- checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
- checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
-
- response = deleteValue(TABLE, ROW_1, COLUMN_1);
- assertEquals(response.getCode(), 200);
- response = getValueXML(TABLE, ROW_1, COLUMN_1);
- assertEquals(response.getCode(), 404);
- checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
-
- response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 200);
- response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
- assertEquals(response.getCode(), 200);
- response = getValueXML(TABLE, ROW_1, COLUMN_1);
- assertEquals(response.getCode(), 404);
-
- response = deleteRow(TABLE, ROW_1);
- assertEquals(response.getCode(), 200);
- response = getValueXML(TABLE, ROW_1, COLUMN_1);
- assertEquals(response.getCode(), 404);
- response = getValueXML(TABLE, ROW_1, COLUMN_2);
- assertEquals(response.getCode(), 404);
-
- //Delete a row in non existent table
- response = deleteValue("dummy", ROW_1, COLUMN_1);
- assertEquals(response.getCode(), 404);
-
- //Delete non existent column
- response = deleteValue(TABLE, ROW_1, "dummy");
- assertEquals(response.getCode(), 404);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java
deleted file mode 100644
index 763765f..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-
-import javax.servlet.ServletOutputStream;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.rest.filter.GZIPResponseStream;
-import org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestGZIPResponseWrapper {
-
- private final HttpServletResponse response = mock(HttpServletResponse.class);
- private final GZIPResponseWrapper wrapper = new GZIPResponseWrapper(response);
-
- /**
- * wrapper should set all headers except "content-length"
- */
- @Test
- public void testHeader() throws IOException {
- wrapper.setStatus(200);
- verify(response).setStatus(200);
- wrapper.addHeader("header", "header value");
- verify(response).addHeader("header", "header value");
- wrapper.addHeader("content-length", "header value2");
- verify(response, never()).addHeader("content-length", "header value");
-
- wrapper.setIntHeader("header", 5);
- verify(response).setIntHeader("header", 5);
- wrapper.setIntHeader("content-length", 4);
- verify(response, never()).setIntHeader("content-length", 4);
-
- wrapper.setHeader("set-header", "new value");
- verify(response).setHeader("set-header", "new value");
- wrapper.setHeader("content-length", "content length value");
- verify(response, never()).setHeader("content-length", "content length value");
-
- wrapper.sendRedirect("location");
- verify(response).sendRedirect("location");
-
- wrapper.flushBuffer();
- verify(response).flushBuffer();
- }
-
- @Test
- public void testResetBuffer() throws IOException {
- when(response.isCommitted()).thenReturn(false);
- ServletOutputStream out = mock(ServletOutputStream.class);
- when(response.getOutputStream()).thenReturn(out);
-
- ServletOutputStream servletOutput = wrapper.getOutputStream();
- assertEquals(GZIPResponseStream.class, servletOutput.getClass());
- wrapper.resetBuffer();
- verify(response).setHeader("Content-Encoding", null);
-
- when(response.isCommitted()).thenReturn(true);
- servletOutput = wrapper.getOutputStream();
- assertEquals(out.getClass(), servletOutput.getClass());
- assertNotNull(wrapper.getWriter());
- }
-
- @Test
- public void testReset() throws IOException {
- when(response.isCommitted()).thenReturn(false);
- ServletOutputStream out = mock(ServletOutputStream.class);
- when(response.getOutputStream()).thenReturn(out);
-
- ServletOutputStream servletOutput = wrapper.getOutputStream();
- verify(response).addHeader("Content-Encoding", "gzip");
- assertEquals(GZIPResponseStream.class, servletOutput.getClass());
- wrapper.reset();
- verify(response).setHeader("Content-Encoding", null);
-
- when(response.isCommitted()).thenReturn(true);
- servletOutput = wrapper.getOutputStream();
- assertEquals(out.getClass(), servletOutput.getClass());
- }
-
- @Test
- public void testSendError() throws IOException {
- wrapper.sendError(404);
- verify(response).sendError(404);
-
- wrapper.sendError(404, "error message");
- verify(response).sendError(404, "error message");
- }
-
-}
[35/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
new file mode 100644
index 0000000..2cfea1b
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
@@ -0,0 +1,147 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import javax.servlet.ServletOutputStream;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletResponseWrapper;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class GZIPResponseWrapper extends HttpServletResponseWrapper {
+ private HttpServletResponse response;
+ private ServletOutputStream os;
+ private PrintWriter writer;
+ private boolean compress = true;
+
+ public GZIPResponseWrapper(HttpServletResponse response) {
+ super(response);
+ this.response = response;
+ }
+
+ @Override
+ public void setStatus(int status) {
+ super.setStatus(status);
+ if (status < 200 || status >= 300) {
+ compress = false;
+ }
+ }
+
+ @Override
+ public void addHeader(String name, String value) {
+ if (!"content-length".equalsIgnoreCase(name)) {
+ super.addHeader(name, value);
+ }
+ }
+
+ @Override
+ public void setContentLength(int length) {
+ // do nothing
+ }
+
+ @Override
+ public void setIntHeader(String name, int value) {
+ if (!"content-length".equalsIgnoreCase(name)) {
+ super.setIntHeader(name, value);
+ }
+ }
+
+ @Override
+ public void setHeader(String name, String value) {
+ if (!"content-length".equalsIgnoreCase(name)) {
+ super.setHeader(name, value);
+ }
+ }
+
+ @Override
+ public void flushBuffer() throws IOException {
+ if (writer != null) {
+ writer.flush();
+ }
+ if (os != null && (os instanceof GZIPResponseStream)) {
+ ((GZIPResponseStream)os).finish();
+ } else {
+ getResponse().flushBuffer();
+ }
+ }
+
+ @Override
+ public void reset() {
+ super.reset();
+ if (os != null && (os instanceof GZIPResponseStream)) {
+ ((GZIPResponseStream)os).resetBuffer();
+ }
+ writer = null;
+ os = null;
+ compress = true;
+ }
+
+ @Override
+ public void resetBuffer() {
+ super.resetBuffer();
+ if (os != null && (os instanceof GZIPResponseStream)) {
+ ((GZIPResponseStream)os).resetBuffer();
+ }
+ writer = null;
+ os = null;
+ }
+
+ @Override
+ public void sendError(int status, String msg) throws IOException {
+ resetBuffer();
+ super.sendError(status, msg);
+ }
+
+ @Override
+ public void sendError(int status) throws IOException {
+ resetBuffer();
+ super.sendError(status);
+ }
+
+ @Override
+ public void sendRedirect(String location) throws IOException {
+ resetBuffer();
+ super.sendRedirect(location);
+ }
+
+ @Override
+ public ServletOutputStream getOutputStream() throws IOException {
+ if (os == null) {
+ if (!response.isCommitted() && compress) {
+ os = (ServletOutputStream)new GZIPResponseStream(response);
+ } else {
+ os = response.getOutputStream();
+ }
+ }
+ return os;
+ }
+
+ @Override
+ public PrintWriter getWriter() throws IOException {
+ if (writer == null) {
+ writer = new PrintWriter(getOutputStream());
+ }
+ return writer;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
new file mode 100644
index 0000000..4995b86
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
@@ -0,0 +1,85 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class GzipFilter implements Filter {
+ private Set<String> mimeTypes = new HashSet<String>();
+
+ @Override
+ public void init(FilterConfig filterConfig) throws ServletException {
+ String s = filterConfig.getInitParameter("mimeTypes");
+ if (s != null) {
+ StringTokenizer tok = new StringTokenizer(s, ",", false);
+ while (tok.hasMoreTokens()) {
+ mimeTypes.add(tok.nextToken());
+ }
+ }
+ }
+
+ @Override
+ public void destroy() {
+ }
+
+ @Override
+ public void doFilter(ServletRequest req, ServletResponse rsp,
+ FilterChain chain) throws IOException, ServletException {
+ HttpServletRequest request = (HttpServletRequest)req;
+ HttpServletResponse response = (HttpServletResponse)rsp;
+ String contentEncoding = request.getHeader("content-encoding");
+ String acceptEncoding = request.getHeader("accept-encoding");
+ String contentType = request.getHeader("content-type");
+ if ((contentEncoding != null) &&
+ (contentEncoding.toLowerCase().indexOf("gzip") > -1)) {
+ request = new GZIPRequestWrapper(request);
+ }
+ if (((acceptEncoding != null) &&
+ (acceptEncoding.toLowerCase().indexOf("gzip") > -1)) ||
+ ((contentType != null) && mimeTypes.contains(contentType))) {
+ response = new GZIPResponseWrapper(response);
+ }
+ chain.doFilter(request, response);
+ if (response instanceof GZIPResponseWrapper) {
+ OutputStream os = response.getOutputStream();
+ if (os instanceof GZIPResponseStream) {
+ ((GZIPResponseStream)os).finish();
+ }
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
new file mode 100644
index 0000000..349d352
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
@@ -0,0 +1,207 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlValue;
+
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+ * Representation of a cell. A cell is a single value associated a column and
+ * optional qualifier, and either the timestamp when it was stored or the user-
+ * provided timestamp if one was explicitly supplied.
+ *
+ * <pre>
+ * <complexType name="Cell">
+ * <sequence>
+ * <element name="value" maxOccurs="1" minOccurs="1">
+ * <simpleType>
+ * <restriction base="base64Binary"/>
+ * </simpleType>
+ * </element>
+ * </sequence>
+ * <attribute name="column" type="base64Binary" />
+ * <attribute name="timestamp" type="int" />
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="Cell")
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceAudience.Private
+public class CellModel implements ProtobufMessageHandler, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ @JsonProperty("column")
+ @XmlAttribute
+ private byte[] column;
+
+ @JsonProperty("timestamp")
+ @XmlAttribute
+ private long timestamp = HConstants.LATEST_TIMESTAMP;
+
+ @JsonProperty("$")
+ @XmlValue
+ private byte[] value;
+
+ /**
+ * Default constructor
+ */
+ public CellModel() {}
+
+ /**
+ * Constructor
+ * @param column
+ * @param value
+ */
+ public CellModel(byte[] column, byte[] value) {
+ this(column, HConstants.LATEST_TIMESTAMP, value);
+ }
+
+ /**
+ * Constructor
+ * @param column
+ * @param qualifier
+ * @param value
+ */
+ public CellModel(byte[] column, byte[] qualifier, byte[] value) {
+ this(column, qualifier, HConstants.LATEST_TIMESTAMP, value);
+ }
+
+ /**
+ * Constructor from KeyValue
+ * @param kv
+ */
+ public CellModel(KeyValue kv) {
+ this(kv.getFamily(), kv.getQualifier(), kv.getTimestamp(), kv.getValue());
+ }
+
+ /**
+ * Constructor
+ * @param column
+ * @param timestamp
+ * @param value
+ */
+ public CellModel(byte[] column, long timestamp, byte[] value) {
+ this.column = column;
+ this.timestamp = timestamp;
+ this.value = value;
+ }
+
+ /**
+ * Constructor
+ * @param column
+ * @param qualifier
+ * @param timestamp
+ * @param value
+ */
+ public CellModel(byte[] column, byte[] qualifier, long timestamp,
+ byte[] value) {
+ this.column = KeyValue.makeColumn(column, qualifier);
+ this.timestamp = timestamp;
+ this.value = value;
+ }
+
+ /**
+ * @return the column
+ */
+ public byte[] getColumn() {
+ return column;
+ }
+
+ /**
+ * @param column the column to set
+ */
+ public void setColumn(byte[] column) {
+ this.column = column;
+ }
+
+ /**
+ * @return true if the timestamp property has been specified by the
+ * user
+ */
+ public boolean hasUserTimestamp() {
+ return timestamp != HConstants.LATEST_TIMESTAMP;
+ }
+
+ /**
+ * @return the timestamp
+ */
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ /**
+ * @param timestamp the timestamp to set
+ */
+ public void setTimestamp(long timestamp) {
+ this.timestamp = timestamp;
+ }
+
+ /**
+ * @return the value
+ */
+ public byte[] getValue() {
+ return value;
+ }
+
+ /**
+ * @param value the value to set
+ */
+ public void setValue(byte[] value) {
+ this.value = value;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Cell.Builder builder = Cell.newBuilder();
+ builder.setColumn(ByteStringer.wrap(getColumn()));
+ builder.setData(ByteStringer.wrap(getValue()));
+ if (hasUserTimestamp()) {
+ builder.setTimestamp(getTimestamp());
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ Cell.Builder builder = Cell.newBuilder();
+ builder.mergeFrom(message);
+ setColumn(builder.getColumn().toByteArray());
+ setValue(builder.getData().toByteArray());
+ if (builder.hasTimestamp()) {
+ setTimestamp(builder.getTimestamp());
+ }
+ return this;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
new file mode 100644
index 0000000..094da36
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
@@ -0,0 +1,152 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell;
+import org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet;
+
+/**
+ * Representation of a grouping of cells. May contain cells from more than
+ * one row. Encapsulates RowModel and CellModel models.
+ *
+ * <pre>
+ * <complexType name="CellSet">
+ * <sequence>
+ * <element name="row" type="tns:Row" maxOccurs="unbounded"
+ * minOccurs="1"></element>
+ * </sequence>
+ * </complexType>
+ *
+ * <complexType name="Row">
+ * <sequence>
+ * <element name="key" type="base64Binary"></element>
+ * <element name="cell" type="tns:Cell"
+ * maxOccurs="unbounded" minOccurs="1"></element>
+ * </sequence>
+ * </complexType>
+ *
+ * <complexType name="Cell">
+ * <sequence>
+ * <element name="value" maxOccurs="1" minOccurs="1">
+ * <simpleType>
+ * <restriction base="base64Binary"/>
+ * </simpleType>
+ * </element>
+ * </sequence>
+ * <attribute name="column" type="base64Binary" />
+ * <attribute name="timestamp" type="int" />
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="CellSet")
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceAudience.Private
+public class CellSetModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ @XmlElement(name="Row")
+ private List<RowModel> rows;
+
+ /**
+ * Constructor
+ */
+ public CellSetModel() {
+ this.rows = new ArrayList<RowModel>();
+ }
+
+ /**
+ * @param rows the rows
+ */
+ public CellSetModel(List<RowModel> rows) {
+ super();
+ this.rows = rows;
+ }
+
+ /**
+ * Add a row to this cell set
+ * @param row the row
+ */
+ public void addRow(RowModel row) {
+ rows.add(row);
+ }
+
+ /**
+ * @return the rows
+ */
+ public List<RowModel> getRows() {
+ return rows;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ CellSet.Builder builder = CellSet.newBuilder();
+ for (RowModel row: getRows()) {
+ CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder();
+ rowBuilder.setKey(ByteStringer.wrap(row.getKey()));
+ for (CellModel cell: row.getCells()) {
+ Cell.Builder cellBuilder = Cell.newBuilder();
+ cellBuilder.setColumn(ByteStringer.wrap(cell.getColumn()));
+ cellBuilder.setData(ByteStringer.wrap(cell.getValue()));
+ if (cell.hasUserTimestamp()) {
+ cellBuilder.setTimestamp(cell.getTimestamp());
+ }
+ rowBuilder.addValues(cellBuilder);
+ }
+ builder.addRows(rowBuilder);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ CellSet.Builder builder = CellSet.newBuilder();
+ builder.mergeFrom(message);
+ for (CellSet.Row row: builder.getRowsList()) {
+ RowModel rowModel = new RowModel(row.getKey().toByteArray());
+ for (Cell cell: row.getValuesList()) {
+ long timestamp = HConstants.LATEST_TIMESTAMP;
+ if (cell.hasTimestamp()) {
+ timestamp = cell.getTimestamp();
+ }
+ rowModel.addCell(
+ new CellModel(cell.getColumn().toByteArray(), timestamp,
+ cell.getData().toByteArray()));
+ }
+ addRow(rowModel);
+ }
+ return this;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
new file mode 100644
index 0000000..ba0eed8
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
@@ -0,0 +1,241 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.Serializable;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAnyAttribute;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.namespace.QName;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.codehaus.jackson.annotate.JsonAnyGetter;
+import org.codehaus.jackson.annotate.JsonAnySetter;
+
+/**
+ * Representation of a column family schema.
+ *
+ * <pre>
+ * <complexType name="ColumnSchema">
+ * <attribute name="name" type="string"></attribute>
+ * <anyAttribute></anyAttribute>
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="ColumnSchema")
+@InterfaceAudience.Private
+public class ColumnSchemaModel implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE);
+ private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE);
+ private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER);
+ private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION);
+ private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
+ private static QName TTL = new QName(HColumnDescriptor.TTL);
+ private static QName VERSIONS = new QName(HConstants.VERSIONS);
+
+ private String name;
+ private Map<QName,Object> attrs = new LinkedHashMap<QName,Object>();
+
+ /**
+ * Default constructor
+ */
+ public ColumnSchemaModel() {}
+
+ /**
+ * Add an attribute to the column family schema
+ * @param name the attribute name
+ * @param value the attribute value
+ */
+ @JsonAnySetter
+ public void addAttribute(String name, Object value) {
+ attrs.put(new QName(name), value);
+ }
+
+ /**
+ * @param name the attribute name
+ * @return the attribute value
+ */
+ public String getAttribute(String name) {
+ Object o = attrs.get(new QName(name));
+ return o != null ? o.toString(): null;
+ }
+
+ /**
+ * @return the column name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the map for holding unspecified (user) attributes
+ */
+ @XmlAnyAttribute
+ @JsonAnyGetter
+ public Map<QName,Object> getAny() {
+ return attrs;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ NAME => '");
+ sb.append(name);
+ sb.append('\'');
+ for (Map.Entry<QName,Object> e: attrs.entrySet()) {
+ sb.append(", ");
+ sb.append(e.getKey().getLocalPart());
+ sb.append(" => '");
+ sb.append(e.getValue().toString());
+ sb.append('\'');
+ }
+ sb.append(" }");
+ return sb.toString();
+ }
+
+ // getters and setters for common schema attributes
+
+ // cannot be standard bean type getters and setters, otherwise this would
+ // confuse JAXB
+
+ /**
+ * @return true if the BLOCKCACHE attribute is present and true
+ */
+ public boolean __getBlockcache() {
+ Object o = attrs.get(BLOCKCACHE);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE;
+ }
+
+ /**
+ * @return the value of the BLOCKSIZE attribute or its default if it is unset
+ */
+ public int __getBlocksize() {
+ Object o = attrs.get(BLOCKSIZE);
+ return o != null ?
+ Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE;
+ }
+
+ /**
+ * @return the value of the BLOOMFILTER attribute or its default if unset
+ */
+ public String __getBloomfilter() {
+ Object o = attrs.get(BLOOMFILTER);
+ return o != null ? o.toString() : HColumnDescriptor.DEFAULT_BLOOMFILTER;
+ }
+
+ /**
+ * @return the value of the COMPRESSION attribute or its default if unset
+ */
+ public String __getCompression() {
+ Object o = attrs.get(COMPRESSION);
+ return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION;
+ }
+
+ /**
+ * @return true if the IN_MEMORY attribute is present and true
+ */
+ public boolean __getInMemory() {
+ Object o = attrs.get(IN_MEMORY);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY;
+ }
+
+ /**
+ * @return the value of the TTL attribute or its default if it is unset
+ */
+ public int __getTTL() {
+ Object o = attrs.get(TTL);
+ return o != null ?
+ Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_TTL;
+ }
+
+ /**
+ * @return the value of the VERSIONS attribute or its default if it is unset
+ */
+ public int __getVersions() {
+ Object o = attrs.get(VERSIONS);
+ return o != null ?
+ Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS;
+ }
+
+ /**
+ * @param value the desired value of the BLOCKSIZE attribute
+ */
+ public void __setBlocksize(int value) {
+ attrs.put(BLOCKSIZE, Integer.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the BLOCKCACHE attribute
+ */
+ public void __setBlockcache(boolean value) {
+ attrs.put(BLOCKCACHE, Boolean.toString(value));
+ }
+
+ public void __setBloomfilter(String value) {
+ attrs.put(BLOOMFILTER, value);
+ }
+
+ /**
+ * @param value the desired value of the COMPRESSION attribute
+ */
+ public void __setCompression(String value) {
+ attrs.put(COMPRESSION, value);
+ }
+
+ /**
+ * @param value the desired value of the IN_MEMORY attribute
+ */
+ public void __setInMemory(boolean value) {
+ attrs.put(IN_MEMORY, Boolean.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the TTL attribute
+ */
+ public void __setTTL(int value) {
+ attrs.put(TTL, Integer.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the VERSIONS attribute
+ */
+ public void __setVersions(int value) {
+ attrs.put(VERSIONS, Integer.toString(value));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
new file mode 100644
index 0000000..596c754
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
@@ -0,0 +1,151 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+ * Representation of a row. A row is a related set of cells, grouped by common
+ * row key. RowModels do not appear in results by themselves. They are always
+ * encapsulated within CellSetModels.
+ *
+ * <pre>
+ * <complexType name="Row">
+ * <sequence>
+ * <element name="key" type="base64Binary"></element>
+ * <element name="cell" type="tns:Cell"
+ * maxOccurs="unbounded" minOccurs="1"></element>
+ * </sequence>
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="Row")
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceAudience.Private
+public class RowModel implements ProtobufMessageHandler, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ @JsonProperty("key")
+ @XmlAttribute
+ private byte[] key;
+
+ @JsonProperty("Cell")
+ @XmlElement(name="Cell")
+ private List<CellModel> cells = new ArrayList<CellModel>();
+
+
+ /**
+ * Default constructor
+ */
+ public RowModel() { }
+
+ /**
+ * Constructor
+ * @param key the row key
+ */
+ public RowModel(final String key) {
+ this(key.getBytes());
+ }
+
+ /**
+ * Constructor
+ * @param key the row key
+ */
+ public RowModel(final byte[] key) {
+ this.key = key;
+ cells = new ArrayList<CellModel>();
+ }
+
+ /**
+ * Constructor
+ * @param key the row key
+ * @param cells the cells
+ */
+ public RowModel(final String key, final List<CellModel> cells) {
+ this(key.getBytes(), cells);
+ }
+
+ /**
+ * Constructor
+ * @param key the row key
+ * @param cells the cells
+ */
+ public RowModel(final byte[] key, final List<CellModel> cells) {
+ this.key = key;
+ this.cells = cells;
+ }
+
+ /**
+ * Adds a cell to the list of cells for this row
+ * @param cell the cell
+ */
+ public void addCell(CellModel cell) {
+ cells.add(cell);
+ }
+
+ /**
+ * @return the row key
+ */
+ public byte[] getKey() {
+ return key;
+ }
+
+ /**
+ * @param key the row key
+ */
+ public void setKey(byte[] key) {
+ this.key = key;
+ }
+
+ /**
+ * @return the cells
+ */
+ public List<CellModel> getCells() {
+ return cells;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ // there is no standalone row protobuf message
+ throw new UnsupportedOperationException(
+ "no protobuf equivalent to RowModel");
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ // there is no standalone row protobuf message
+ throw new UnsupportedOperationException(
+ "no protobuf equivalent to RowModel");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
new file mode 100644
index 0000000..2ffdd4f
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
@@ -0,0 +1,852 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.BitComparator;
+import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
+import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
+import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
+import org.apache.hadoop.hbase.filter.ColumnRangeFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.DependentColumnFilter;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
+import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter;
+import org.apache.hadoop.hbase.filter.NullComparator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.hbase.filter.RandomRowFilter;
+import org.apache.hadoop.hbase.filter.RegexStringComparator;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.SkipFilter;
+import org.apache.hadoop.hbase.filter.SubstringComparator;
+import org.apache.hadoop.hbase.filter.TimestampsFilter;
+import org.apache.hadoop.hbase.filter.ValueFilter;
+import org.apache.hadoop.hbase.filter.WhileMatchFilter;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner;
+import org.apache.hadoop.hbase.security.visibility.Authorizations;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.protobuf.ByteString;
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONMarshaller;
+import com.sun.jersey.api.json.JSONUnmarshaller;
+
+/**
+ * A representation of Scanner parameters.
+ *
+ * <pre>
+ * <complexType name="Scanner">
+ * <sequence>
+ * <element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/>
+ * <element name="filter" type="string" minOccurs="0" maxOccurs="1"></element>
+ * </sequence>
+ * <attribute name="startRow" type="base64Binary"></attribute>
+ * <attribute name="endRow" type="base64Binary"></attribute>
+ * <attribute name="batch" type="int"></attribute>
+ * <attribute name="caching" type="int"></attribute>
+ * <attribute name="startTime" type="int"></attribute>
+ * <attribute name="endTime" type="int"></attribute>
+ * <attribute name="maxVersions" type="int"></attribute>
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="Scanner")
+@InterfaceAudience.Private
+public class ScannerModel implements ProtobufMessageHandler, Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ private byte[] startRow = HConstants.EMPTY_START_ROW;
+ private byte[] endRow = HConstants.EMPTY_END_ROW;;
+ private List<byte[]> columns = new ArrayList<byte[]>();
+ private int batch = Integer.MAX_VALUE;
+ private long startTime = 0;
+ private long endTime = Long.MAX_VALUE;
+ private String filter = null;
+ private int maxVersions = Integer.MAX_VALUE;
+ private int caching = -1;
+ private List<String> labels = new ArrayList<String>();
+ private boolean cacheBlocks = true;
+
+ @XmlRootElement
+ static class FilterModel {
+
+ @XmlRootElement
+ static class ByteArrayComparableModel {
+ @XmlAttribute public String type;
+ @XmlAttribute public String value;
+ @XmlAttribute public String op;
+
+ static enum ComparatorType {
+ BinaryComparator,
+ BinaryPrefixComparator,
+ BitComparator,
+ NullComparator,
+ RegexStringComparator,
+ SubstringComparator
+ }
+
+ public ByteArrayComparableModel() { }
+
+ public ByteArrayComparableModel(
+ ByteArrayComparable comparator) {
+ String typeName = comparator.getClass().getSimpleName();
+ ComparatorType type = ComparatorType.valueOf(typeName);
+ this.type = typeName;
+ switch (type) {
+ case BinaryComparator:
+ case BinaryPrefixComparator:
+ this.value = Base64.encodeBytes(comparator.getValue());
+ break;
+ case BitComparator:
+ this.value = Base64.encodeBytes(comparator.getValue());
+ this.op = ((BitComparator)comparator).getOperator().toString();
+ break;
+ case NullComparator:
+ break;
+ case RegexStringComparator:
+ case SubstringComparator:
+ this.value = Bytes.toString(comparator.getValue());
+ break;
+ default:
+ throw new RuntimeException("unhandled filter type: " + type);
+ }
+ }
+
+ public ByteArrayComparable build() {
+ ByteArrayComparable comparator;
+ switch (ComparatorType.valueOf(type)) {
+ case BinaryComparator:
+ comparator = new BinaryComparator(Base64.decode(value));
+ break;
+ case BinaryPrefixComparator:
+ comparator = new BinaryPrefixComparator(Base64.decode(value));
+ break;
+ case BitComparator:
+ comparator = new BitComparator(Base64.decode(value),
+ BitComparator.BitwiseOp.valueOf(op));
+ break;
+ case NullComparator:
+ comparator = new NullComparator();
+ break;
+ case RegexStringComparator:
+ comparator = new RegexStringComparator(value);
+ break;
+ case SubstringComparator:
+ comparator = new SubstringComparator(value);
+ break;
+ default:
+ throw new RuntimeException("unhandled comparator type: " + type);
+ }
+ return comparator;
+ }
+
+ }
+
+ // A grab bag of fields, would have been a union if this were C.
+ // These are null by default and will only be serialized if set (non null).
+ @XmlAttribute public String type;
+ @XmlAttribute public String op;
+ @XmlElement ByteArrayComparableModel comparator;
+ @XmlAttribute public String value;
+ @XmlElement public List<FilterModel> filters;
+ @XmlAttribute public Integer limit;
+ @XmlAttribute public Integer offset;
+ @XmlAttribute public String family;
+ @XmlAttribute public String qualifier;
+ @XmlAttribute public Boolean ifMissing;
+ @XmlAttribute public Boolean latestVersion;
+ @XmlAttribute public String minColumn;
+ @XmlAttribute public Boolean minColumnInclusive;
+ @XmlAttribute public String maxColumn;
+ @XmlAttribute public Boolean maxColumnInclusive;
+ @XmlAttribute public Boolean dropDependentColumn;
+ @XmlAttribute public Float chance;
+ @XmlElement public List<String> prefixes;
+ @XmlElement public List<Long> timestamps;
+
+ static enum FilterType {
+ ColumnCountGetFilter,
+ ColumnPaginationFilter,
+ ColumnPrefixFilter,
+ ColumnRangeFilter,
+ DependentColumnFilter,
+ FamilyFilter,
+ FilterList,
+ FirstKeyOnlyFilter,
+ InclusiveStopFilter,
+ KeyOnlyFilter,
+ MultipleColumnPrefixFilter,
+ PageFilter,
+ PrefixFilter,
+ QualifierFilter,
+ RandomRowFilter,
+ RowFilter,
+ SingleColumnValueExcludeFilter,
+ SingleColumnValueFilter,
+ SkipFilter,
+ TimestampsFilter,
+ ValueFilter,
+ WhileMatchFilter
+ }
+
+ public FilterModel() { }
+
+ public FilterModel(Filter filter) {
+ String typeName = filter.getClass().getSimpleName();
+ FilterType type = FilterType.valueOf(typeName);
+ this.type = typeName;
+ switch (type) {
+ case ColumnCountGetFilter:
+ this.limit = ((ColumnCountGetFilter)filter).getLimit();
+ break;
+ case ColumnPaginationFilter:
+ this.limit = ((ColumnPaginationFilter)filter).getLimit();
+ this.offset = ((ColumnPaginationFilter)filter).getOffset();
+ break;
+ case ColumnPrefixFilter:
+ this.value = Base64.encodeBytes(((ColumnPrefixFilter)filter).getPrefix());
+ break;
+ case ColumnRangeFilter:
+ this.minColumn = Base64.encodeBytes(((ColumnRangeFilter)filter).getMinColumn());
+ this.minColumnInclusive = ((ColumnRangeFilter)filter).getMinColumnInclusive();
+ this.maxColumn = Base64.encodeBytes(((ColumnRangeFilter)filter).getMaxColumn());
+ this.maxColumnInclusive = ((ColumnRangeFilter)filter).getMaxColumnInclusive();
+ break;
+ case DependentColumnFilter: {
+ DependentColumnFilter dcf = (DependentColumnFilter)filter;
+ this.family = Base64.encodeBytes(dcf.getFamily());
+ byte[] qualifier = dcf.getQualifier();
+ if (qualifier != null) {
+ this.qualifier = Base64.encodeBytes(qualifier);
+ }
+ this.op = dcf.getOperator().toString();
+ this.comparator = new ByteArrayComparableModel(dcf.getComparator());
+ this.dropDependentColumn = dcf.dropDependentColumn();
+ } break;
+ case FilterList:
+ this.op = ((FilterList)filter).getOperator().toString();
+ this.filters = new ArrayList<FilterModel>();
+ for (Filter child: ((FilterList)filter).getFilters()) {
+ this.filters.add(new FilterModel(child));
+ }
+ break;
+ case FirstKeyOnlyFilter:
+ case KeyOnlyFilter:
+ break;
+ case InclusiveStopFilter:
+ this.value =
+ Base64.encodeBytes(((InclusiveStopFilter)filter).getStopRowKey());
+ break;
+ case MultipleColumnPrefixFilter:
+ this.prefixes = new ArrayList<String>();
+ for (byte[] prefix: ((MultipleColumnPrefixFilter)filter).getPrefix()) {
+ this.prefixes.add(Base64.encodeBytes(prefix));
+ }
+ break;
+ case PageFilter:
+ this.value = Long.toString(((PageFilter)filter).getPageSize());
+ break;
+ case PrefixFilter:
+ this.value = Base64.encodeBytes(((PrefixFilter)filter).getPrefix());
+ break;
+ case FamilyFilter:
+ case QualifierFilter:
+ case RowFilter:
+ case ValueFilter:
+ this.op = ((CompareFilter)filter).getOperator().toString();
+ this.comparator =
+ new ByteArrayComparableModel(
+ ((CompareFilter)filter).getComparator());
+ break;
+ case RandomRowFilter:
+ this.chance = ((RandomRowFilter)filter).getChance();
+ break;
+ case SingleColumnValueExcludeFilter:
+ case SingleColumnValueFilter: {
+ SingleColumnValueFilter scvf = (SingleColumnValueFilter) filter;
+ this.family = Base64.encodeBytes(scvf.getFamily());
+ byte[] qualifier = scvf.getQualifier();
+ if (qualifier != null) {
+ this.qualifier = Base64.encodeBytes(qualifier);
+ }
+ this.op = scvf.getOperator().toString();
+ this.comparator =
+ new ByteArrayComparableModel(scvf.getComparator());
+ if (scvf.getFilterIfMissing()) {
+ this.ifMissing = true;
+ }
+ if (scvf.getLatestVersionOnly()) {
+ this.latestVersion = true;
+ }
+ } break;
+ case SkipFilter:
+ this.filters = new ArrayList<FilterModel>();
+ this.filters.add(new FilterModel(((SkipFilter)filter).getFilter()));
+ break;
+ case TimestampsFilter:
+ this.timestamps = ((TimestampsFilter)filter).getTimestamps();
+ break;
+ case WhileMatchFilter:
+ this.filters = new ArrayList<FilterModel>();
+ this.filters.add(
+ new FilterModel(((WhileMatchFilter)filter).getFilter()));
+ break;
+ default:
+ throw new RuntimeException("unhandled filter type " + type);
+ }
+ }
+
+ public Filter build() {
+ Filter filter;
+ switch (FilterType.valueOf(type)) {
+ case ColumnCountGetFilter:
+ filter = new ColumnCountGetFilter(limit);
+ break;
+ case ColumnPaginationFilter:
+ filter = new ColumnPaginationFilter(limit, offset);
+ break;
+ case ColumnPrefixFilter:
+ filter = new ColumnPrefixFilter(Base64.decode(value));
+ break;
+ case ColumnRangeFilter:
+ filter = new ColumnRangeFilter(Base64.decode(minColumn),
+ minColumnInclusive, Base64.decode(maxColumn),
+ maxColumnInclusive);
+ break;
+ case DependentColumnFilter:
+ filter = new DependentColumnFilter(Base64.decode(family),
+ qualifier != null ? Base64.decode(qualifier) : null,
+ dropDependentColumn, CompareOp.valueOf(op), comparator.build());
+ break;
+ case FamilyFilter:
+ filter = new FamilyFilter(CompareOp.valueOf(op), comparator.build());
+ break;
+ case FilterList: {
+ List<Filter> list = new ArrayList<Filter>();
+ for (FilterModel model: filters) {
+ list.add(model.build());
+ }
+ filter = new FilterList(FilterList.Operator.valueOf(op), list);
+ } break;
+ case FirstKeyOnlyFilter:
+ filter = new FirstKeyOnlyFilter();
+ break;
+ case InclusiveStopFilter:
+ filter = new InclusiveStopFilter(Base64.decode(value));
+ break;
+ case KeyOnlyFilter:
+ filter = new KeyOnlyFilter();
+ break;
+ case MultipleColumnPrefixFilter: {
+ byte[][] values = new byte[prefixes.size()][];
+ for (int i = 0; i < prefixes.size(); i++) {
+ values[i] = Base64.decode(prefixes.get(i));
+ }
+ filter = new MultipleColumnPrefixFilter(values);
+ } break;
+ case PageFilter:
+ filter = new PageFilter(Long.valueOf(value));
+ break;
+ case PrefixFilter:
+ filter = new PrefixFilter(Base64.decode(value));
+ break;
+ case QualifierFilter:
+ filter = new QualifierFilter(CompareOp.valueOf(op), comparator.build());
+ break;
+ case RandomRowFilter:
+ filter = new RandomRowFilter(chance);
+ break;
+ case RowFilter:
+ filter = new RowFilter(CompareOp.valueOf(op), comparator.build());
+ break;
+ case SingleColumnValueFilter:
+ filter = new SingleColumnValueFilter(Base64.decode(family),
+ qualifier != null ? Base64.decode(qualifier) : null,
+ CompareOp.valueOf(op), comparator.build());
+ if (ifMissing != null) {
+ ((SingleColumnValueFilter)filter).setFilterIfMissing(ifMissing);
+ }
+ if (latestVersion != null) {
+ ((SingleColumnValueFilter)filter).setLatestVersionOnly(latestVersion);
+ }
+ break;
+ case SingleColumnValueExcludeFilter:
+ filter = new SingleColumnValueExcludeFilter(Base64.decode(family),
+ qualifier != null ? Base64.decode(qualifier) : null,
+ CompareOp.valueOf(op), comparator.build());
+ if (ifMissing != null) {
+ ((SingleColumnValueExcludeFilter)filter).setFilterIfMissing(ifMissing);
+ }
+ if (latestVersion != null) {
+ ((SingleColumnValueExcludeFilter)filter).setLatestVersionOnly(latestVersion);
+ }
+ break;
+ case SkipFilter:
+ filter = new SkipFilter(filters.get(0).build());
+ break;
+ case TimestampsFilter:
+ filter = new TimestampsFilter(timestamps);
+ break;
+ case ValueFilter:
+ filter = new ValueFilter(CompareOp.valueOf(op), comparator.build());
+ break;
+ case WhileMatchFilter:
+ filter = new WhileMatchFilter(filters.get(0).build());
+ break;
+ default:
+ throw new RuntimeException("unhandled filter type: " + type);
+ }
+ return filter;
+ }
+
+ }
+
+ /**
+ * @param s the JSON representation of the filter
+ * @return the filter
+ * @throws Exception
+ */
+ public static Filter buildFilter(String s) throws Exception {
+ JSONJAXBContext context =
+ new JSONJAXBContext(JSONConfiguration.natural().build(),
+ FilterModel.class);
+ JSONUnmarshaller unmarshaller = context.createJSONUnmarshaller();
+ FilterModel model = unmarshaller.unmarshalFromJSON(new StringReader(s),
+ FilterModel.class);
+ return model.build();
+ }
+
+ /**
+ * @param filter the filter
+ * @return the JSON representation of the filter
+ * @throws Exception
+ */
+ public static String stringifyFilter(final Filter filter) throws Exception {
+ JSONJAXBContext context =
+ new JSONJAXBContext(JSONConfiguration.natural().build(),
+ FilterModel.class);
+ JSONMarshaller marshaller = context.createJSONMarshaller();
+ StringWriter writer = new StringWriter();
+ marshaller.marshallToJSON(new FilterModel(filter), writer);
+ return writer.toString();
+ }
+
+ private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":");
+
+ /**
+ * @param scan the scan specification
+ * @throws Exception
+ */
+ public static ScannerModel fromScan(Scan scan) throws Exception {
+ ScannerModel model = new ScannerModel();
+ model.setStartRow(scan.getStartRow());
+ model.setEndRow(scan.getStopRow());
+ Map<byte [], NavigableSet<byte []>> families = scan.getFamilyMap();
+ if (families != null) {
+ for (Map.Entry<byte [], NavigableSet<byte []>> entry : families.entrySet()) {
+ if (entry.getValue() != null) {
+ for (byte[] qualifier: entry.getValue()) {
+ model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier));
+ }
+ } else {
+ model.addColumn(entry.getKey());
+ }
+ }
+ }
+ model.setStartTime(scan.getTimeRange().getMin());
+ model.setEndTime(scan.getTimeRange().getMax());
+ int caching = scan.getCaching();
+ if (caching > 0) {
+ model.setCaching(caching);
+ }
+ int batch = scan.getBatch();
+ if (batch > 0) {
+ model.setBatch(batch);
+ }
+ int maxVersions = scan.getMaxVersions();
+ if (maxVersions > 0) {
+ model.setMaxVersions(maxVersions);
+ }
+ Filter filter = scan.getFilter();
+ if (filter != null) {
+ model.setFilter(stringifyFilter(filter));
+ }
+ // Add the visbility labels if found in the attributes
+ Authorizations authorizations = scan.getAuthorizations();
+ if (authorizations != null) {
+ List<String> labels = authorizations.getLabels();
+ for (String label : labels) {
+ model.addLabel(label);
+ }
+ }
+ return model;
+ }
+
+ /**
+ * Default constructor
+ */
+ public ScannerModel() {}
+
+ /**
+ * Constructor
+ * @param startRow the start key of the row-range
+ * @param endRow the end key of the row-range
+ * @param columns the columns to scan
+ * @param batch the number of values to return in batch
+ * @param caching the number of rows that the scanner will fetch at once
+ * @param endTime the upper bound on timestamps of values of interest
+ * @param maxVersions the maximum number of versions to return
+ * @param filter a filter specification
+ * (values with timestamps later than this are excluded)
+ */
+ public ScannerModel(byte[] startRow, byte[] endRow, List<byte[]> columns,
+ int batch, int caching, long endTime, int maxVersions, String filter) {
+ super();
+ this.startRow = startRow;
+ this.endRow = endRow;
+ this.columns = columns;
+ this.batch = batch;
+ this.caching = caching;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ this.filter = filter;
+ }
+
+ /**
+ * Constructor
+ * @param startRow the start key of the row-range
+ * @param endRow the end key of the row-range
+ * @param columns the columns to scan
+ * @param batch the number of values to return in batch
+ * @param caching the number of rows that the scanner will fetch at once
+ * @param startTime the lower bound on timestamps of values of interest
+ * (values with timestamps earlier than this are excluded)
+ * @param endTime the upper bound on timestamps of values of interest
+ * (values with timestamps later than this are excluded)
+ * @param filter a filter specification
+ */
+ public ScannerModel(byte[] startRow, byte[] endRow, List<byte[]> columns,
+ int batch, int caching, long startTime, long endTime, String filter) {
+ super();
+ this.startRow = startRow;
+ this.endRow = endRow;
+ this.columns = columns;
+ this.batch = batch;
+ this.caching = caching;
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.filter = filter;
+ }
+
+ /**
+ * Add a column to the column set
+ * @param column the column name, as <column>(:<qualifier>)?
+ */
+ public void addColumn(byte[] column) {
+ columns.add(column);
+ }
+
+ /**
+ * Add a visibility label to the scan
+ */
+ public void addLabel(String label) {
+ labels.add(label);
+ }
+ /**
+ * @return true if a start row was specified
+ */
+ public boolean hasStartRow() {
+ return !Bytes.equals(startRow, HConstants.EMPTY_START_ROW);
+ }
+
+ /**
+ * @return start row
+ */
+ @XmlAttribute
+ public byte[] getStartRow() {
+ return startRow;
+ }
+
+ /**
+ * @return true if an end row was specified
+ */
+ public boolean hasEndRow() {
+ return !Bytes.equals(endRow, HConstants.EMPTY_END_ROW);
+ }
+
+ /**
+ * @return end row
+ */
+ @XmlAttribute
+ public byte[] getEndRow() {
+ return endRow;
+ }
+
+ /**
+ * @return list of columns of interest in column:qualifier format, or empty for all
+ */
+ @XmlElement(name="column")
+ public List<byte[]> getColumns() {
+ return columns;
+ }
+
+ @XmlElement(name="labels")
+ public List<String> getLabels() {
+ return labels;
+ }
+
+ /**
+ * @return the number of cells to return in batch
+ */
+ @XmlAttribute
+ public int getBatch() {
+ return batch;
+ }
+
+ /**
+ * @return the number of rows that the scanner to fetch at once
+ */
+ @XmlAttribute
+ public int getCaching() {
+ return caching;
+ }
+
+ /**
+ * @return true if HFile blocks should be cached on the servers for this scan, false otherwise
+ */
+ @XmlAttribute
+ public boolean getCacheBlocks() {
+ return cacheBlocks;
+ }
+
+ /**
+ * @return the lower bound on timestamps of items of interest
+ */
+ @XmlAttribute
+ public long getStartTime() {
+ return startTime;
+ }
+
+ /**
+ * @return the upper bound on timestamps of items of interest
+ */
+ @XmlAttribute
+ public long getEndTime() {
+ return endTime;
+ }
+
+ /**
+ * @return maximum number of versions to return
+ */
+ @XmlAttribute
+ public int getMaxVersions() {
+ return maxVersions;
+ }
+
+ /**
+ * @return the filter specification
+ */
+ @XmlElement
+ public String getFilter() {
+ return filter;
+ }
+
+ /**
+ * @param startRow start row
+ */
+ public void setStartRow(byte[] startRow) {
+ this.startRow = startRow;
+ }
+
+ /**
+ * @param endRow end row
+ */
+ public void setEndRow(byte[] endRow) {
+ this.endRow = endRow;
+ }
+
+ /**
+ * @param columns list of columns of interest in column:qualifier format, or empty for all
+ */
+ public void setColumns(List<byte[]> columns) {
+ this.columns = columns;
+ }
+
+ /**
+ * @param batch the number of cells to return in batch
+ */
+ public void setBatch(int batch) {
+ this.batch = batch;
+ }
+
+ /**
+ * @param caching the number of rows to fetch at once
+ */
+ public void setCaching(int caching) {
+ this.caching = caching;
+ }
+
+ /**
+ * @param value true if HFile blocks should be cached on the servers for this scan, false otherwise
+ */
+ public void setCacheBlocks(boolean value) {
+ this.cacheBlocks = value;
+ }
+
+ /**
+ * @param maxVersions maximum number of versions to return
+ */
+ public void setMaxVersions(int maxVersions) {
+ this.maxVersions = maxVersions;
+ }
+
+ /**
+ * @param startTime the lower bound on timestamps of values of interest
+ */
+ public void setStartTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ /**
+ * @param endTime the upper bound on timestamps of values of interest
+ */
+ public void setEndTime(long endTime) {
+ this.endTime = endTime;
+ }
+
+ /**
+ * @param filter the filter specification
+ */
+ public void setFilter(String filter) {
+ this.filter = filter;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Scanner.Builder builder = Scanner.newBuilder();
+ if (!Bytes.equals(startRow, HConstants.EMPTY_START_ROW)) {
+ builder.setStartRow(ByteStringer.wrap(startRow));
+ }
+ if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) {
+ builder.setEndRow(ByteStringer.wrap(endRow));
+ }
+ for (byte[] column: columns) {
+ builder.addColumns(ByteStringer.wrap(column));
+ }
+ if (startTime != 0) {
+ builder.setStartTime(startTime);
+ }
+ if (endTime != 0) {
+ builder.setEndTime(endTime);
+ }
+ builder.setBatch(getBatch());
+ if (caching > 0) {
+ builder.setCaching(caching);
+ }
+ builder.setMaxVersions(maxVersions);
+ if (filter != null) {
+ builder.setFilter(filter);
+ }
+ if (labels != null && labels.size() > 0) {
+ for (String label : labels)
+ builder.addLabels(label);
+ }
+ builder.setCacheBlocks(cacheBlocks);
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ Scanner.Builder builder = Scanner.newBuilder();
+ builder.mergeFrom(message);
+ if (builder.hasStartRow()) {
+ startRow = builder.getStartRow().toByteArray();
+ }
+ if (builder.hasEndRow()) {
+ endRow = builder.getEndRow().toByteArray();
+ }
+ for (ByteString column: builder.getColumnsList()) {
+ addColumn(column.toByteArray());
+ }
+ if (builder.hasBatch()) {
+ batch = builder.getBatch();
+ }
+ if (builder.hasCaching()) {
+ caching = builder.getCaching();
+ }
+ if (builder.hasStartTime()) {
+ startTime = builder.getStartTime();
+ }
+ if (builder.hasEndTime()) {
+ endTime = builder.getEndTime();
+ }
+ if (builder.hasMaxVersions()) {
+ maxVersions = builder.getMaxVersions();
+ }
+ if (builder.hasFilter()) {
+ filter = builder.getFilter();
+ }
+ if (builder.getLabelsList() != null) {
+ List<String> labels = builder.getLabelsList();
+ for(String label : labels) {
+ addLabel(label);
+ }
+ }
+ if (builder.hasCacheBlocks()) {
+ this.cacheBlocks = builder.getCacheBlocks();
+ }
+ return this;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
new file mode 100644
index 0000000..3b044e7
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
@@ -0,0 +1,790 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Representation of the status of a storage cluster:
+ * <p>
+ * <ul>
+ * <li>regions: the total number of regions served by the cluster</li>
+ * <li>requests: the total number of requests per second handled by the
+ * cluster in the last reporting interval</li>
+ * <li>averageLoad: the average load of the region servers in the cluster</li>
+ * <li>liveNodes: detailed status of the live region servers</li>
+ * <li>deadNodes: the names of region servers declared dead</li>
+ * </ul>
+ *
+ * <pre>
+ * <complexType name="StorageClusterStatus">
+ * <sequence>
+ * <element name="liveNode" type="tns:Node"
+ * maxOccurs="unbounded" minOccurs="0">
+ * </element>
+ * <element name="deadNode" type="string" maxOccurs="unbounded"
+ * minOccurs="0">
+ * </element>
+ * </sequence>
+ * <attribute name="regions" type="int"></attribute>
+ * <attribute name="requests" type="int"></attribute>
+ * <attribute name="averageLoad" type="float"></attribute>
+ * </complexType>
+ *
+ * <complexType name="Node">
+ * <sequence>
+ * <element name="region" type="tns:Region"
+ * maxOccurs="unbounded" minOccurs="0"></element>
+ * </sequence>
+ * <attribute name="name" type="string"></attribute>
+ * <attribute name="startCode" type="int"></attribute>
+ * <attribute name="requests" type="int"></attribute>
+ * <attribute name="heapSizeMB" type="int"></attribute>
+ * <attribute name="maxHeapSizeMB" type="int"></attribute>
+ * </complexType>
+ *
+ * <complexType name="Region">
+ * <attribute name="name" type="base64Binary"></attribute>
+ * <attribute name="stores" type="int"></attribute>
+ * <attribute name="storefiles" type="int"></attribute>
+ * <attribute name="storefileSizeMB" type="int"></attribute>
+ * <attribute name="memstoreSizeMB" type="int"></attribute>
+ * <attribute name="storefileIndexSizeMB" type="int"></attribute>
+ * <attribute name="readRequestsCount" type="int"></attribute>
+ * <attribute name="writeRequestsCount" type="int"></attribute>
+ * <attribute name="rootIndexSizeKB" type="int"></attribute>
+ * <attribute name="totalStaticIndexSizeKB" type="int"></attribute>
+ * <attribute name="totalStaticBloomSizeKB" type="int"></attribute>
+ * <attribute name="totalCompactingKVs" type="int"></attribute>
+ * <attribute name="currentCompactedKVs" type="int"></attribute>
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="ClusterStatus")
+@InterfaceAudience.Private
+public class StorageClusterStatusModel
+ implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Represents a region server.
+ */
+ public static class Node implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Represents a region hosted on a region server.
+ */
+ public static class Region {
+ private byte[] name;
+ private int stores;
+ private int storefiles;
+ private int storefileSizeMB;
+ private int memstoreSizeMB;
+ private int storefileIndexSizeMB;
+ private long readRequestsCount;
+ private long writeRequestsCount;
+ private int rootIndexSizeKB;
+ private int totalStaticIndexSizeKB;
+ private int totalStaticBloomSizeKB;
+ private long totalCompactingKVs;
+ private long currentCompactedKVs;
+
+ /**
+ * Default constructor
+ */
+ public Region() {
+ }
+
+ /**
+ * Constructor
+ * @param name the region name
+ */
+ public Region(byte[] name) {
+ this.name = name;
+ }
+
+ /**
+ * Constructor
+ * @param name the region name
+ * @param stores the number of stores
+ * @param storefiles the number of store files
+ * @param storefileSizeMB total size of store files, in MB
+ * @param memstoreSizeMB total size of memstore, in MB
+ * @param storefileIndexSizeMB total size of store file indexes, in MB
+ */
+ public Region(byte[] name, int stores, int storefiles,
+ int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB,
+ long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
+ int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
+ long totalCompactingKVs, long currentCompactedKVs) {
+ this.name = name;
+ this.stores = stores;
+ this.storefiles = storefiles;
+ this.storefileSizeMB = storefileSizeMB;
+ this.memstoreSizeMB = memstoreSizeMB;
+ this.storefileIndexSizeMB = storefileIndexSizeMB;
+ this.readRequestsCount = readRequestsCount;
+ this.writeRequestsCount = writeRequestsCount;
+ this.rootIndexSizeKB = rootIndexSizeKB;
+ this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
+ this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
+ this.totalCompactingKVs = totalCompactingKVs;
+ this.currentCompactedKVs = currentCompactedKVs;
+ }
+
+ /**
+ * @return the region name
+ */
+ @XmlAttribute
+ public byte[] getName() {
+ return name;
+ }
+
+ /**
+ * @return the number of stores
+ */
+ @XmlAttribute
+ public int getStores() {
+ return stores;
+ }
+
+ /**
+ * @return the number of store files
+ */
+ @XmlAttribute
+ public int getStorefiles() {
+ return storefiles;
+ }
+
+ /**
+ * @return the total size of store files, in MB
+ */
+ @XmlAttribute
+ public int getStorefileSizeMB() {
+ return storefileSizeMB;
+ }
+
+ /**
+ * @return memstore size, in MB
+ */
+ @XmlAttribute
+ public int getMemstoreSizeMB() {
+ return memstoreSizeMB;
+ }
+
+ /**
+ * @return the total size of store file indexes, in MB
+ */
+ @XmlAttribute
+ public int getStorefileIndexSizeMB() {
+ return storefileIndexSizeMB;
+ }
+
+ /**
+ * @return the current total read requests made to region
+ */
+ @XmlAttribute
+ public long getReadRequestsCount() {
+ return readRequestsCount;
+ }
+
+ /**
+ * @return the current total write requests made to region
+ */
+ @XmlAttribute
+ public long getWriteRequestsCount() {
+ return writeRequestsCount;
+ }
+
+ /**
+ * @return The current total size of root-level indexes for the region, in KB.
+ */
+ @XmlAttribute
+ public int getRootIndexSizeKB() {
+ return rootIndexSizeKB;
+ }
+
+ /**
+ * @return The total size of static index, in KB
+ */
+ @XmlAttribute
+ public int getTotalStaticIndexSizeKB() {
+ return totalStaticIndexSizeKB;
+ }
+
+ /**
+ * @return The total size of static bloom, in KB
+ */
+ @XmlAttribute
+ public int getTotalStaticBloomSizeKB() {
+ return totalStaticBloomSizeKB;
+ }
+
+ /**
+ * @return The total number of compacting key-values
+ */
+ @XmlAttribute
+ public long getTotalCompactingKVs() {
+ return totalCompactingKVs;
+ }
+
+ /**
+ * @return The number of current compacted key-values
+ */
+ @XmlAttribute
+ public long getCurrentCompactedKVs() {
+ return currentCompactedKVs;
+ }
+
+ /**
+ * @param readRequestsCount The current total read requests made to region
+ */
+ public void setReadRequestsCount(long readRequestsCount) {
+ this.readRequestsCount = readRequestsCount;
+ }
+
+ /**
+ * @param rootIndexSizeKB The current total size of root-level indexes
+ * for the region, in KB
+ */
+ public void setRootIndexSizeKB(int rootIndexSizeKB) {
+ this.rootIndexSizeKB = rootIndexSizeKB;
+ }
+
+ /**
+ * @param writeRequestsCount The current total write requests made to region
+ */
+ public void setWriteRequestsCount(long writeRequestsCount) {
+ this.writeRequestsCount = writeRequestsCount;
+ }
+
+ /**
+ * @param currentCompactedKVs The completed count of key values
+ * in currently running compaction
+ */
+ public void setCurrentCompactedKVs(long currentCompactedKVs) {
+ this.currentCompactedKVs = currentCompactedKVs;
+ }
+
+ /**
+ * @param totalCompactingKVs The total compacting key values
+ * in currently running compaction
+ */
+ public void setTotalCompactingKVs(long totalCompactingKVs) {
+ this.totalCompactingKVs = totalCompactingKVs;
+ }
+
+ /**
+ * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks,
+ * not just loaded into the block cache, in KB.
+ */
+ public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) {
+ this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
+ }
+
+ /**
+ * @param totalStaticIndexSizeKB The total size of all index blocks,
+ * not just the root level, in KB.
+ */
+ public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) {
+ this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
+ }
+
+ /**
+ * @param name the region name
+ */
+ public void setName(byte[] name) {
+ this.name = name;
+ }
+
+ /**
+ * @param stores the number of stores
+ */
+ public void setStores(int stores) {
+ this.stores = stores;
+ }
+
+ /**
+ * @param storefiles the number of store files
+ */
+ public void setStorefiles(int storefiles) {
+ this.storefiles = storefiles;
+ }
+
+ /**
+ * @param storefileSizeMB total size of store files, in MB
+ */
+ public void setStorefileSizeMB(int storefileSizeMB) {
+ this.storefileSizeMB = storefileSizeMB;
+ }
+
+ /**
+ * @param memstoreSizeMB memstore size, in MB
+ */
+ public void setMemstoreSizeMB(int memstoreSizeMB) {
+ this.memstoreSizeMB = memstoreSizeMB;
+ }
+
+ /**
+ * @param storefileIndexSizeMB total size of store file indexes, in MB
+ */
+ public void setStorefileIndexSizeMB(int storefileIndexSizeMB) {
+ this.storefileIndexSizeMB = storefileIndexSizeMB;
+ }
+ }
+
+ private String name;
+ private long startCode;
+ private int requests;
+ private int heapSizeMB;
+ private int maxHeapSizeMB;
+ private List<Region> regions = new ArrayList<Region>();
+
+ /**
+ * Add a region name to the list
+ * @param name the region name
+ */
+ public void addRegion(byte[] name, int stores, int storefiles,
+ int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB,
+ long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
+ int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
+ long totalCompactingKVs, long currentCompactedKVs) {
+ regions.add(new Region(name, stores, storefiles, storefileSizeMB,
+ memstoreSizeMB, storefileIndexSizeMB, readRequestsCount,
+ writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB,
+ totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs));
+ }
+
+ /**
+ * @param index the index
+ * @return the region name
+ */
+ public Region getRegion(int index) {
+ return regions.get(index);
+ }
+
+ /**
+ * Default constructor
+ */
+ public Node() {}
+
+ /**
+ * Constructor
+ * @param name the region server name
+ * @param startCode the region server's start code
+ */
+ public Node(String name, long startCode) {
+ this.name = name;
+ this.startCode = startCode;
+ }
+
+ /**
+ * @return the region server's name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the region server's start code
+ */
+ @XmlAttribute
+ public long getStartCode() {
+ return startCode;
+ }
+
+ /**
+ * @return the current heap size, in MB
+ */
+ @XmlAttribute
+ public int getHeapSizeMB() {
+ return heapSizeMB;
+ }
+
+ /**
+ * @return the maximum heap size, in MB
+ */
+ @XmlAttribute
+ public int getMaxHeapSizeMB() {
+ return maxHeapSizeMB;
+ }
+
+ /**
+ * @return the list of regions served by the region server
+ */
+ @XmlElement(name="Region")
+ public List<Region> getRegions() {
+ return regions;
+ }
+
+ /**
+ * @return the number of requests per second processed by the region server
+ */
+ @XmlAttribute
+ public int getRequests() {
+ return requests;
+ }
+
+ /**
+ * @param name the region server's hostname
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param startCode the region server's start code
+ */
+ public void setStartCode(long startCode) {
+ this.startCode = startCode;
+ }
+
+ /**
+ * @param heapSizeMB the current heap size, in MB
+ */
+ public void setHeapSizeMB(int heapSizeMB) {
+ this.heapSizeMB = heapSizeMB;
+ }
+
+ /**
+ * @param maxHeapSizeMB the maximum heap size, in MB
+ */
+ public void setMaxHeapSizeMB(int maxHeapSizeMB) {
+ this.maxHeapSizeMB = maxHeapSizeMB;
+ }
+
+ /**
+ * @param regions a list of regions served by the region server
+ */
+ public void setRegions(List<Region> regions) {
+ this.regions = regions;
+ }
+
+ /**
+ * @param requests the number of requests per second processed by the
+ * region server
+ */
+ public void setRequests(int requests) {
+ this.requests = requests;
+ }
+ }
+
+ private List<Node> liveNodes = new ArrayList<Node>();
+ private List<String> deadNodes = new ArrayList<String>();
+ private int regions;
+ private int requests;
+ private double averageLoad;
+
+ /**
+ * Add a live node to the cluster representation.
+ * @param name the region server name
+ * @param startCode the region server's start code
+ * @param heapSizeMB the current heap size, in MB
+ * @param maxHeapSizeMB the maximum heap size, in MB
+ */
+ public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) {
+ Node node = new Node(name, startCode);
+ node.setHeapSizeMB(heapSizeMB);
+ node.setMaxHeapSizeMB(maxHeapSizeMB);
+ liveNodes.add(node);
+ return node;
+ }
+
+ /**
+ * @param index the index
+ * @return the region server model
+ */
+ public Node getLiveNode(int index) {
+ return liveNodes.get(index);
+ }
+
+ /**
+ * Add a dead node to the cluster representation.
+ * @param node the dead region server's name
+ */
+ public void addDeadNode(String node) {
+ deadNodes.add(node);
+ }
+
+ /**
+ * @param index the index
+ * @return the dead region server's name
+ */
+ public String getDeadNode(int index) {
+ return deadNodes.get(index);
+ }
+
+ /**
+ * Default constructor
+ */
+ public StorageClusterStatusModel() {
+ }
+
+ /**
+ * @return the list of live nodes
+ */
+ @XmlElement(name = "Node")
+ @XmlElementWrapper(name = "LiveNodes")
+ public List<Node> getLiveNodes() {
+ return liveNodes;
+ }
+
+ /**
+ * @return the list of dead nodes
+ */
+ @XmlElement(name = "Node")
+ @XmlElementWrapper(name = "DeadNodes")
+ public List<String> getDeadNodes() {
+ return deadNodes;
+ }
+
+ /**
+ * @return the total number of regions served by the cluster
+ */
+ @XmlAttribute
+ public int getRegions() {
+ return regions;
+ }
+
+ /**
+ * @return the total number of requests per second handled by the cluster in
+ * the last reporting interval
+ */
+ @XmlAttribute
+ public int getRequests() {
+ return requests;
+ }
+
+ /**
+ * @return the average load of the region servers in the cluster
+ */
+ @XmlAttribute
+ public double getAverageLoad() {
+ return averageLoad;
+ }
+
+ /**
+ * @param nodes the list of live node models
+ */
+ public void setLiveNodes(List<Node> nodes) {
+ this.liveNodes = nodes;
+ }
+
+ /**
+ * @param nodes the list of dead node names
+ */
+ public void setDeadNodes(List<String> nodes) {
+ this.deadNodes = nodes;
+ }
+
+ /**
+ * @param regions the total number of regions served by the cluster
+ */
+ public void setRegions(int regions) {
+ this.regions = regions;
+ }
+
+ /**
+ * @param requests the total number of requests per second handled by the
+ * cluster
+ */
+ public void setRequests(int requests) {
+ this.requests = requests;
+ }
+
+ /**
+ * @param averageLoad the average load of region servers in the cluster
+ */
+ public void setAverageLoad(double averageLoad) {
+ this.averageLoad = averageLoad;
+ }
+
+ /*
+ * (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(String.format("%d live servers, %d dead servers, " +
+ "%.4f average load%n%n", liveNodes.size(), deadNodes.size(),
+ averageLoad));
+ if (!liveNodes.isEmpty()) {
+ sb.append(liveNodes.size());
+ sb.append(" live servers\n");
+ for (Node node: liveNodes) {
+ sb.append(" ");
+ sb.append(node.name);
+ sb.append(' ');
+ sb.append(node.startCode);
+ sb.append("\n requests=");
+ sb.append(node.requests);
+ sb.append(", regions=");
+ sb.append(node.regions.size());
+ sb.append("\n heapSizeMB=");
+ sb.append(node.heapSizeMB);
+ sb.append("\n maxHeapSizeMB=");
+ sb.append(node.maxHeapSizeMB);
+ sb.append("\n\n");
+ for (Node.Region region: node.regions) {
+ sb.append(" ");
+ sb.append(Bytes.toString(region.name));
+ sb.append("\n stores=");
+ sb.append(region.stores);
+ sb.append("\n storefiless=");
+ sb.append(region.storefiles);
+ sb.append("\n storefileSizeMB=");
+ sb.append(region.storefileSizeMB);
+ sb.append("\n memstoreSizeMB=");
+ sb.append(region.memstoreSizeMB);
+ sb.append("\n storefileIndexSizeMB=");
+ sb.append(region.storefileIndexSizeMB);
+ sb.append("\n readRequestsCount=");
+ sb.append(region.readRequestsCount);
+ sb.append("\n writeRequestsCount=");
+ sb.append(region.writeRequestsCount);
+ sb.append("\n rootIndexSizeKB=");
+ sb.append(region.rootIndexSizeKB);
+ sb.append("\n totalStaticIndexSizeKB=");
+ sb.append(region.totalStaticIndexSizeKB);
+ sb.append("\n totalStaticBloomSizeKB=");
+ sb.append(region.totalStaticBloomSizeKB);
+ sb.append("\n totalCompactingKVs=");
+ sb.append(region.totalCompactingKVs);
+ sb.append("\n currentCompactedKVs=");
+ sb.append(region.currentCompactedKVs);
+ sb.append('\n');
+ }
+ sb.append('\n');
+ }
+ }
+ if (!deadNodes.isEmpty()) {
+ sb.append('\n');
+ sb.append(deadNodes.size());
+ sb.append(" dead servers\n");
+ for (String node: deadNodes) {
+ sb.append(" ");
+ sb.append(node);
+ sb.append('\n');
+ }
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+ builder.setRegions(regions);
+ builder.setRequests(requests);
+ builder.setAverageLoad(averageLoad);
+ for (Node node: liveNodes) {
+ StorageClusterStatus.Node.Builder nodeBuilder =
+ StorageClusterStatus.Node.newBuilder();
+ nodeBuilder.setName(node.name);
+ nodeBuilder.setStartCode(node.startCode);
+ nodeBuilder.setRequests(node.requests);
+ nodeBuilder.setHeapSizeMB(node.heapSizeMB);
+ nodeBuilder.setMaxHeapSizeMB(node.maxHeapSizeMB);
+ for (Node.Region region: node.regions) {
+ StorageClusterStatus.Region.Builder regionBuilder =
+ StorageClusterStatus.Region.newBuilder();
+ regionBuilder.setName(ByteStringer.wrap(region.name));
+ regionBuilder.setStores(region.stores);
+ regionBuilder.setStorefiles(region.storefiles);
+ regionBuilder.setStorefileSizeMB(region.storefileSizeMB);
+ regionBuilder.setMemstoreSizeMB(region.memstoreSizeMB);
+ regionBuilder.setStorefileIndexSizeMB(region.storefileIndexSizeMB);
+ regionBuilder.setReadRequestsCount(region.readRequestsCount);
+ regionBuilder.setWriteRequestsCount(region.writeRequestsCount);
+ regionBuilder.setRootIndexSizeKB(region.rootIndexSizeKB);
+ regionBuilder.setTotalStaticIndexSizeKB(region.totalStaticIndexSizeKB);
+ regionBuilder.setTotalStaticBloomSizeKB(region.totalStaticBloomSizeKB);
+ regionBuilder.setTotalCompactingKVs(region.totalCompactingKVs);
+ regionBuilder.setCurrentCompactedKVs(region.currentCompactedKVs);
+ nodeBuilder.addRegions(regionBuilder);
+ }
+ builder.addLiveNodes(nodeBuilder);
+ }
+ for (String node: deadNodes) {
+ builder.addDeadNodes(node);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+ builder.mergeFrom(message);
+ if (builder.hasRegions()) {
+ regions = builder.getRegions();
+ }
+ if (builder.hasRequests()) {
+ requests = builder.getRequests();
+ }
+ if (builder.hasAverageLoad()) {
+ averageLoad = builder.getAverageLoad();
+ }
+ for (StorageClusterStatus.Node node: builder.getLiveNodesList()) {
+ long startCode = node.hasStartCode() ? node.getStartCode() : -1;
+ StorageClusterStatusModel.Node nodeModel =
+ addLiveNode(node.getName(), startCode, node.getHeapSizeMB(),
+ node.getMaxHeapSizeMB());
+ int requests = node.hasRequests() ? node.getRequests() : 0;
+ nodeModel.setRequests(requests);
+ for (StorageClusterStatus.Region region: node.getRegionsList()) {
+ nodeModel.addRegion(
+ region.getName().toByteArray(),
+ region.getStores(),
+ region.getStorefiles(),
+ region.getStorefileSizeMB(),
+ region.getMemstoreSizeMB(),
+ region.getStorefileIndexSizeMB(),
+ region.getReadRequestsCount(),
+ region.getWriteRequestsCount(),
+ region.getRootIndexSizeKB(),
+ region.getTotalStaticIndexSizeKB(),
+ region.getTotalStaticBloomSizeKB(),
+ region.getTotalCompactingKVs(),
+ region.getCurrentCompactedKVs());
+ }
+ }
+ for (String node: builder.getDeadNodesList()) {
+ addDeadNode(node);
+ }
+ return this;
+ }
+}
[21/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java
new file mode 100644
index 0000000..2e55181
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java
@@ -0,0 +1,508 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.DataInputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.parsers.SAXParserFactory;
+import javax.xml.stream.XMLStreamException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.provider.JacksonProvider;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonParser;
+import org.codehaus.jackson.JsonToken;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.xml.sax.InputSource;
+import org.xml.sax.XMLReader;
+
+@Category(MediumTests.class)
+public class TestTableScan {
+
+ private static final String TABLE = "TestScanResource";
+ private static final String CFA = "a";
+ private static final String CFB = "b";
+ private static final String COLUMN_1 = CFA + ":1";
+ private static final String COLUMN_2 = CFB + ":2";
+ private static Client client;
+ private static int expectedRows1;
+ private static int expectedRows2;
+ private static Configuration conf;
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ TEST_UTIL.startMiniCluster();
+ REST_TEST_UTIL.startServletContainer(conf);
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (!admin.tableExists(TABLE)) {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
+ htd.addFamily(new HColumnDescriptor(CFA));
+ htd.addFamily(new HColumnDescriptor(CFB));
+ admin.createTable(htd);
+ expectedRows1 = TestScannerResource.insertData(conf, TABLE, COLUMN_1, 1.0);
+ expectedRows2 = TestScannerResource.insertData(conf, TABLE, COLUMN_2, 0.5);
+ }
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.getHBaseAdmin().disableTable(TABLE);
+ TEST_UTIL.getHBaseAdmin().deleteTable(TABLE);
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testSimpleScannerXML() throws IOException, JAXBException, XMLStreamException {
+ // Test scanning particular columns
+ StringBuilder builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_LIMIT + "=10");
+ Response response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_XML);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class);
+ Unmarshaller ush = ctx.createUnmarshaller();
+ CellSetModel model = (CellSetModel) ush.unmarshal(response.getStream());
+ int count = TestScannerResource.countCellSet(model);
+ assertEquals(10, count);
+ checkRowsNotNull(model);
+
+ //Test with no limit.
+ builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_XML);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ model = (CellSetModel) ush.unmarshal(response.getStream());
+ count = TestScannerResource.countCellSet(model);
+ assertEquals(expectedRows1, count);
+ checkRowsNotNull(model);
+
+ //Test with start and end row.
+ builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_START_ROW + "=aaa");
+ builder.append("&");
+ builder.append(Constants.SCAN_END_ROW + "=aay");
+ response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_XML);
+ assertEquals(200, response.getCode());
+ model = (CellSetModel) ush.unmarshal(response.getStream());
+ count = TestScannerResource.countCellSet(model);
+ RowModel startRow = model.getRows().get(0);
+ assertEquals("aaa", Bytes.toString(startRow.getKey()));
+ RowModel endRow = model.getRows().get(model.getRows().size() - 1);
+ assertEquals("aax", Bytes.toString(endRow.getKey()));
+ assertEquals(24, count);
+ checkRowsNotNull(model);
+
+ //Test with start row and limit.
+ builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_START_ROW + "=aaa");
+ builder.append("&");
+ builder.append(Constants.SCAN_LIMIT + "=15");
+ response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_XML);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ model = (CellSetModel) ush.unmarshal(response.getStream());
+ startRow = model.getRows().get(0);
+ assertEquals("aaa", Bytes.toString(startRow.getKey()));
+ count = TestScannerResource.countCellSet(model);
+ assertEquals(15, count);
+ checkRowsNotNull(model);
+
+ }
+
+ @Test
+ public void testSimpleScannerJson() throws IOException, JAXBException {
+ // Test scanning particular columns with limit.
+ StringBuilder builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_LIMIT + "=20");
+ Response response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_JSON);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ ObjectMapper mapper = new JacksonProvider()
+ .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
+ CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class);
+ int count = TestScannerResource.countCellSet(model);
+ assertEquals(20, count);
+ checkRowsNotNull(model);
+
+ //Test scanning with no limit.
+ builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2);
+ response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_JSON);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ model = mapper.readValue(response.getStream(), CellSetModel.class);
+ count = TestScannerResource.countCellSet(model);
+ assertEquals(expectedRows2, count);
+ checkRowsNotNull(model);
+
+ //Test with start row and end row.
+ builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_START_ROW + "=aaa");
+ builder.append("&");
+ builder.append(Constants.SCAN_END_ROW + "=aay");
+ response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_JSON);
+ assertEquals(200, response.getCode());
+ model = mapper.readValue(response.getStream(), CellSetModel.class);
+ RowModel startRow = model.getRows().get(0);
+ assertEquals("aaa", Bytes.toString(startRow.getKey()));
+ RowModel endRow = model.getRows().get(model.getRows().size() - 1);
+ assertEquals("aax", Bytes.toString(endRow.getKey()));
+ count = TestScannerResource.countCellSet(model);
+ assertEquals(24, count);
+ checkRowsNotNull(model);
+ }
+
+ /**
+ * An example to scan using listener in unmarshaller for XML.
+ * @throws Exception the exception
+ */
+ @Test
+ public void testScanUsingListenerUnmarshallerXML() throws Exception {
+ StringBuilder builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_LIMIT + "=10");
+ Response response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_XML);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ JAXBContext context = JAXBContext.newInstance(ClientSideCellSetModel.class, RowModel.class,
+ CellModel.class);
+ Unmarshaller unmarshaller = context.createUnmarshaller();
+
+ final ClientSideCellSetModel.Listener listener = new ClientSideCellSetModel.Listener() {
+ @Override
+ public void handleRowModel(ClientSideCellSetModel helper, RowModel row) {
+ assertTrue(row.getKey() != null);
+ assertTrue(row.getCells().size() > 0);
+ }
+ };
+
+ // install the callback on all ClientSideCellSetModel instances
+ unmarshaller.setListener(new Unmarshaller.Listener() {
+ public void beforeUnmarshal(Object target, Object parent) {
+ if (target instanceof ClientSideCellSetModel) {
+ ((ClientSideCellSetModel) target).setCellSetModelListener(listener);
+ }
+ }
+
+ public void afterUnmarshal(Object target, Object parent) {
+ if (target instanceof ClientSideCellSetModel) {
+ ((ClientSideCellSetModel) target).setCellSetModelListener(null);
+ }
+ }
+ });
+
+ // create a new XML parser
+ SAXParserFactory factory = SAXParserFactory.newInstance();
+ factory.setNamespaceAware(true);
+ XMLReader reader = factory.newSAXParser().getXMLReader();
+ reader.setContentHandler(unmarshaller.getUnmarshallerHandler());
+ assertFalse(ClientSideCellSetModel.listenerInvoked);
+ reader.parse(new InputSource(response.getStream()));
+ assertTrue(ClientSideCellSetModel.listenerInvoked);
+
+ }
+
+ @Test
+ public void testStreamingJSON() throws Exception {
+ // Test scanning particular columns with limit.
+ StringBuilder builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_LIMIT + "=20");
+ Response response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_JSON);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ ObjectMapper mapper = new JacksonProvider()
+ .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
+ CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class);
+ int count = TestScannerResource.countCellSet(model);
+ assertEquals(20, count);
+ checkRowsNotNull(model);
+
+ //Test scanning with no limit.
+ builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2);
+ response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_JSON);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ model = mapper.readValue(response.getStream(), CellSetModel.class);
+ count = TestScannerResource.countCellSet(model);
+ assertEquals(expectedRows2, count);
+ checkRowsNotNull(model);
+
+ //Test with start row and end row.
+ builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_START_ROW + "=aaa");
+ builder.append("&");
+ builder.append(Constants.SCAN_END_ROW + "=aay");
+ response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_JSON);
+ assertEquals(200, response.getCode());
+
+ count = 0;
+ JsonFactory jfactory = new JsonFactory(mapper);
+ JsonParser jParser = jfactory.createJsonParser(response.getStream());
+ boolean found = false;
+ while (jParser.nextToken() != JsonToken.END_OBJECT) {
+ if(jParser.getCurrentToken() == JsonToken.START_OBJECT && found) {
+ RowModel row = jParser.readValueAs(RowModel.class);
+ assertNotNull(row.getKey());
+ for (int i = 0; i < row.getCells().size(); i++) {
+ if (count == 0) {
+ assertEquals("aaa", Bytes.toString(row.getKey()));
+ }
+ if (count == 23) {
+ assertEquals("aax", Bytes.toString(row.getKey()));
+ }
+ count++;
+ }
+ jParser.skipChildren();
+ } else {
+ found = jParser.getCurrentToken() == JsonToken.START_ARRAY;
+ }
+ }
+ assertEquals(24, count);
+ }
+
+ @Test
+ public void testSimpleScannerProtobuf() throws Exception {
+ StringBuilder builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_LIMIT + "=15");
+ Response response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_PROTOBUF);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ int rowCount = readProtobufStream(response.getStream());
+ assertEquals(15, rowCount);
+
+ //Test with start row and end row.
+ builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
+ builder.append("&");
+ builder.append(Constants.SCAN_START_ROW + "=aaa");
+ builder.append("&");
+ builder.append(Constants.SCAN_END_ROW + "=aay");
+ response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_PROTOBUF);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ rowCount = readProtobufStream(response.getStream());
+ assertEquals(24, rowCount);
+ }
+
+ private void checkRowsNotNull(CellSetModel model) {
+ for (RowModel row: model.getRows()) {
+ assertTrue(row.getKey() != null);
+ assertTrue(row.getCells().size() > 0);
+ }
+ }
+
+ /**
+ * Read protobuf stream.
+ * @param inputStream the input stream
+ * @return The number of rows in the cell set model.
+ * @throws IOException Signals that an I/O exception has occurred.
+ */
+ public int readProtobufStream(InputStream inputStream) throws IOException{
+ DataInputStream stream = new DataInputStream(inputStream);
+ CellSetModel model = null;
+ int rowCount = 0;
+ try {
+ while (true) {
+ byte[] lengthBytes = new byte[2];
+ int readBytes = stream.read(lengthBytes);
+ if (readBytes == -1) {
+ break;
+ }
+ assertEquals(2, readBytes);
+ int length = Bytes.toShort(lengthBytes);
+ byte[] cellset = new byte[length];
+ stream.read(cellset);
+ model = new CellSetModel();
+ model.getObjectFromMessage(cellset);
+ checkRowsNotNull(model);
+ rowCount = rowCount + TestScannerResource.countCellSet(model);
+ }
+ } catch (EOFException exp) {
+ exp.printStackTrace();
+ } finally {
+ stream.close();
+ }
+ return rowCount;
+ }
+
+ @Test
+ public void testScanningUnknownColumnJson() throws IOException, JAXBException {
+ // Test scanning particular columns with limit.
+ StringBuilder builder = new StringBuilder();
+ builder.append("/*");
+ builder.append("?");
+ builder.append(Constants.SCAN_COLUMN + "=a:test");
+ Response response = client.get("/" + TABLE + builder.toString(),
+ Constants.MIMETYPE_JSON);
+ assertEquals(200, response.getCode());
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ ObjectMapper mapper = new JacksonProvider().locateMapper(CellSetModel.class,
+ MediaType.APPLICATION_JSON_TYPE);
+ CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class);
+ int count = TestScannerResource.countCellSet(model);
+ assertEquals(0, count);
+ }
+
+ /**
+ * The Class ClientSideCellSetModel which mimics cell set model, and contains listener to perform
+ * user defined operations on the row model.
+ */
+ @XmlRootElement(name = "CellSet")
+ @XmlAccessorType(XmlAccessType.FIELD)
+ public static class ClientSideCellSetModel implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * This list is not a real list; instead it will notify a listener whenever JAXB has
+ * unmarshalled the next row.
+ */
+ @XmlElement(name="Row")
+ private List<RowModel> row;
+
+ static boolean listenerInvoked = false;
+
+ /**
+ * Install a listener for row model on this object. If l is null, the listener
+ * is removed again.
+ */
+ public void setCellSetModelListener(final Listener l) {
+ row = (l == null) ? null : new ArrayList<RowModel>() {
+ private static final long serialVersionUID = 1L;
+
+ public boolean add(RowModel o) {
+ l.handleRowModel(ClientSideCellSetModel.this, o);
+ listenerInvoked = true;
+ return false;
+ }
+ };
+ }
+
+ /**
+ * This listener is invoked every time a new row model is unmarshalled.
+ */
+ public static interface Listener {
+ void handleRowModel(ClientSideCellSetModel helper, RowModel rowModel);
+ }
+ }
+}
+
+
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java
new file mode 100644
index 0000000..ef68084
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java
@@ -0,0 +1,179 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
+import org.apache.hadoop.hbase.rest.model.VersionModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import static org.junit.Assert.*;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestVersionResource {
+ private static final Log LOG = LogFactory.getLog(TestVersionResource.class);
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ private static Client client;
+ private static JAXBContext context;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniCluster();
+ REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ context = JAXBContext.newInstance(
+ VersionModel.class,
+ StorageClusterVersionModel.class);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ private static void validate(VersionModel model) {
+ assertNotNull(model);
+ assertNotNull(model.getRESTVersion());
+ assertEquals(model.getRESTVersion(), RESTServlet.VERSION_STRING);
+ String osVersion = model.getOSVersion();
+ assertNotNull(osVersion);
+ assertTrue(osVersion.contains(System.getProperty("os.name")));
+ assertTrue(osVersion.contains(System.getProperty("os.version")));
+ assertTrue(osVersion.contains(System.getProperty("os.arch")));
+ String jvmVersion = model.getJVMVersion();
+ assertNotNull(jvmVersion);
+ assertTrue(jvmVersion.contains(System.getProperty("java.vm.vendor")));
+ assertTrue(jvmVersion.contains(System.getProperty("java.version")));
+ assertTrue(jvmVersion.contains(System.getProperty("java.vm.version")));
+ assertNotNull(model.getServerVersion());
+ String jerseyVersion = model.getJerseyVersion();
+ assertNotNull(jerseyVersion);
+ assertEquals(jerseyVersion, ServletContainer.class.getPackage()
+ .getImplementationVersion());
+ }
+
+ @Test
+ public void testGetStargateVersionText() throws IOException {
+ Response response = client.get("/version", Constants.MIMETYPE_TEXT);
+ assertTrue(response.getCode() == 200);
+ assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
+ String body = Bytes.toString(response.getBody());
+ assertTrue(body.length() > 0);
+ assertTrue(body.contains(RESTServlet.VERSION_STRING));
+ assertTrue(body.contains(System.getProperty("java.vm.vendor")));
+ assertTrue(body.contains(System.getProperty("java.version")));
+ assertTrue(body.contains(System.getProperty("java.vm.version")));
+ assertTrue(body.contains(System.getProperty("os.name")));
+ assertTrue(body.contains(System.getProperty("os.version")));
+ assertTrue(body.contains(System.getProperty("os.arch")));
+ assertTrue(body.contains(ServletContainer.class.getPackage()
+ .getImplementationVersion()));
+ }
+
+ @Test
+ public void testGetStargateVersionXML() throws IOException, JAXBException {
+ Response response = client.get("/version", Constants.MIMETYPE_XML);
+ assertTrue(response.getCode() == 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ VersionModel model = (VersionModel)
+ context.createUnmarshaller().unmarshal(
+ new ByteArrayInputStream(response.getBody()));
+ validate(model);
+ LOG.info("success retrieving Stargate version as XML");
+ }
+
+ @Test
+ public void testGetStargateVersionJSON() throws IOException {
+ Response response = client.get("/version", Constants.MIMETYPE_JSON);
+ assertTrue(response.getCode() == 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ }
+
+ @Test
+ public void testGetStargateVersionPB() throws IOException {
+ Response response = client.get("/version", Constants.MIMETYPE_PROTOBUF);
+ assertTrue(response.getCode() == 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ VersionModel model = new VersionModel();
+ model.getObjectFromMessage(response.getBody());
+ validate(model);
+ response = client.get("/version", Constants.MIMETYPE_PROTOBUF_IETF);
+ assertTrue(response.getCode() == 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
+ model = new VersionModel();
+ model.getObjectFromMessage(response.getBody());
+ validate(model);
+ }
+
+ @Test
+ public void testGetStorageClusterVersionText() throws IOException {
+ Response response = client.get("/version/cluster", Constants.MIMETYPE_TEXT);
+ assertTrue(response.getCode() == 200);
+ assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
+ }
+
+ @Test
+ public void testGetStorageClusterVersionXML() throws IOException,
+ JAXBException {
+ Response response = client.get("/version/cluster",Constants.MIMETYPE_XML);
+ assertTrue(response.getCode() == 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ StorageClusterVersionModel clusterVersionModel =
+ (StorageClusterVersionModel)
+ context.createUnmarshaller().unmarshal(
+ new ByteArrayInputStream(response.getBody()));
+ assertNotNull(clusterVersionModel);
+ assertNotNull(clusterVersionModel.getVersion());
+ LOG.info("success retrieving storage cluster version as XML");
+ }
+
+ @Test
+ public void doTestGetStorageClusterVersionJSON() throws IOException {
+ Response response = client.get("/version/cluster", Constants.MIMETYPE_JSON);
+ assertTrue(response.getCode() == 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java
new file mode 100644
index 0000000..a5e5b93
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.client;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Tests {@link RemoteAdmin} retries.
+ */
+@Category(SmallTests.class)
+public class TestRemoteAdminRetries {
+
+ private static final int SLEEP_TIME = 50;
+ private static final int RETRIES = 3;
+ private static final long MAX_TIME = SLEEP_TIME * (RETRIES - 1);
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ private RemoteAdmin remoteAdmin;
+ private Client client;
+
+ @Before
+ public void setup() throws Exception {
+ client = mock(Client.class);
+ Response response = new Response(509);
+ when(client.get(anyString(), anyString())).thenReturn(response);
+ when(client.delete(anyString())).thenReturn(response);
+ when(client.put(anyString(), anyString(), any(byte[].class))).thenReturn(response);
+ when(client.post(anyString(), anyString(), any(byte[].class))).thenReturn(response);
+ Configuration configuration = TEST_UTIL.getConfiguration();
+
+ configuration.setInt("hbase.rest.client.max.retries", RETRIES);
+ configuration.setInt("hbase.rest.client.sleep", SLEEP_TIME);
+
+ remoteAdmin = new RemoteAdmin(client, TEST_UTIL.getConfiguration(), "MyTable");
+ }
+
+ @Test
+ public void testFailingGetRestVersion() throws Exception {
+ testTimedOutGetCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteAdmin.getRestVersion();
+ }
+ });
+ }
+
+ @Test
+ public void testFailingGetClusterStatus() throws Exception {
+ testTimedOutGetCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteAdmin.getClusterStatus();
+ }
+ });
+ }
+
+ @Test
+ public void testFailingGetClusterVersion() throws Exception {
+ testTimedOutGetCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteAdmin.getClusterVersion();
+ }
+ });
+ }
+
+ @Test
+ public void testFailingGetTableAvailable() throws Exception {
+ testTimedOutCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteAdmin.isTableAvailable(Bytes.toBytes("TestTable"));
+ }
+ });
+ }
+
+ @Test
+ @SuppressWarnings("deprecation")
+ public void testFailingCreateTable() throws Exception {
+ testTimedOutCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteAdmin.createTable(new HTableDescriptor(Bytes.toBytes("TestTable")));
+ }
+ });
+ verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
+ }
+
+ @Test
+ public void testFailingDeleteTable() throws Exception {
+ testTimedOutCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteAdmin.deleteTable("TestTable");
+ }
+ });
+ verify(client, times(RETRIES)).delete(anyString());
+ }
+
+ @Test
+ public void testFailingGetTableList() throws Exception {
+ testTimedOutGetCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteAdmin.getTableList();
+ }
+ });
+ }
+
+ private void testTimedOutGetCall(CallExecutor callExecutor) throws Exception {
+ testTimedOutCall(callExecutor);
+ verify(client, times(RETRIES)).get(anyString(), anyString());
+ }
+
+ private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
+ long start = System.currentTimeMillis();
+ try {
+ callExecutor.run();
+ fail("should be timeout exception!");
+ } catch (IOException e) {
+ assertTrue(Pattern.matches(".*MyTable.*timed out", e.toString()));
+ }
+ assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
+ }
+
+ private static interface CallExecutor {
+ void run() throws Exception;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java
new file mode 100644
index 0000000..547dfab
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.client;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test RemoteHTable retries.
+ */
+@Category(SmallTests.class)
+public class TestRemoteHTableRetries {
+
+ private static final int SLEEP_TIME = 50;
+ private static final int RETRIES = 3;
+ private static final long MAX_TIME = SLEEP_TIME * (RETRIES - 1);
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
+ private static final byte[] COLUMN_1 = Bytes.toBytes("a");
+ private static final byte[] QUALIFIER_1 = Bytes.toBytes("1");
+ private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1");
+
+ private Client client;
+ private RemoteHTable remoteTable;
+
+ @Before
+ public void setup() throws Exception {
+ client = mock(Client.class);
+ Response response = new Response(509);
+ when(client.get(anyString(), anyString())).thenReturn(response);
+ when(client.delete(anyString())).thenReturn(response);
+ when(client.put(anyString(), anyString(), any(byte[].class))).thenReturn(
+ response);
+ when(client.post(anyString(), anyString(), any(byte[].class))).thenReturn(
+ response);
+
+ Configuration configuration = TEST_UTIL.getConfiguration();
+ configuration.setInt("hbase.rest.client.max.retries", RETRIES);
+ configuration.setInt("hbase.rest.client.sleep", SLEEP_TIME);
+
+ remoteTable = new RemoteHTable(client, TEST_UTIL.getConfiguration(),
+ "MyTable");
+ }
+
+ @After
+ public void tearDownAfterClass() throws Exception {
+ remoteTable.close();
+ }
+
+ @Test
+ public void testDelete() throws Exception {
+ testTimedOutCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ Delete delete = new Delete(Bytes.toBytes("delete"));
+ remoteTable.delete(delete);
+ }
+ });
+ verify(client, times(RETRIES)).delete(anyString());
+ }
+
+ @Test
+ public void testGet() throws Exception {
+ testTimedOutGetCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteTable.get(new Get(Bytes.toBytes("Get")));
+ }
+ });
+ }
+
+ @Test
+ public void testSingleRowPut() throws Exception {
+ testTimedOutCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteTable.put(new Put(Bytes.toBytes("Row")));
+ }
+ });
+ verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
+ }
+
+ @Test
+ public void testMultiRowPut() throws Exception {
+ testTimedOutCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ Put[] puts = { new Put(Bytes.toBytes("Row1")),
+ new Put(Bytes.toBytes("Row2")) };
+ remoteTable.put(Arrays.asList(puts));
+ }
+ });
+ verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
+ }
+
+ @Test
+ public void testGetScanner() throws Exception {
+ testTimedOutCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ remoteTable.getScanner(new Scan());
+ }
+ });
+ verify(client, times(RETRIES)).post(anyString(), anyString(), any(byte[].class));
+ }
+
+ @Test
+ public void testCheckAndPut() throws Exception {
+ testTimedOutCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ Put put = new Put(ROW_1);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, put );
+ }
+ });
+ verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
+ }
+
+ @Test
+ public void testCheckAndDelete() throws Exception {
+ testTimedOutCall(new CallExecutor() {
+ @Override
+ public void run() throws Exception {
+ Put put = new Put(ROW_1);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ Delete delete= new Delete(ROW_1);
+ remoteTable.checkAndDelete(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, delete );
+ }
+ });
+ }
+
+ private void testTimedOutGetCall(CallExecutor callExecutor) throws Exception {
+ testTimedOutCall(callExecutor);
+ verify(client, times(RETRIES)).get(anyString(), anyString());
+ }
+
+ private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
+ long start = System.currentTimeMillis();
+ try {
+ callExecutor.run();
+ fail("should be timeout exception!");
+ } catch (IOException e) {
+ assertTrue(Pattern.matches(".*request timed out", e.toString()));
+ }
+ assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
+ }
+
+ private static interface CallExecutor {
+ void run() throws Exception;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
new file mode 100644
index 0000000..76fb800
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -0,0 +1,538 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.rest.HBaseRESTTestingUtility;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestRemoteTable {
+ private static final String TABLE = "TestRemoteTable";
+ private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
+ private static final byte[] ROW_2 = Bytes.toBytes("testrow2");
+ private static final byte[] ROW_3 = Bytes.toBytes("testrow3");
+ private static final byte[] ROW_4 = Bytes.toBytes("testrow4");
+ private static final byte[] COLUMN_1 = Bytes.toBytes("a");
+ private static final byte[] COLUMN_2 = Bytes.toBytes("b");
+ private static final byte[] COLUMN_3 = Bytes.toBytes("c");
+ private static final byte[] QUALIFIER_1 = Bytes.toBytes("1");
+ private static final byte[] QUALIFIER_2 = Bytes.toBytes("2");
+ private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1");
+ private static final byte[] VALUE_2 = Bytes.toBytes("testvalue2");
+
+ private static final long ONE_HOUR = 60 * 60 * 1000;
+ private static final long TS_2 = System.currentTimeMillis();
+ private static final long TS_1 = TS_2 - ONE_HOUR;
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ private RemoteHTable remoteTable;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniCluster();
+ REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
+ }
+
+ @Before
+ public void before() throws Exception {
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (admin.tableExists(TABLE)) {
+ if (admin.isTableEnabled(TABLE)) admin.disableTable(TABLE);
+ admin.deleteTable(TABLE);
+ }
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
+ htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
+ htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
+ htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
+ admin.createTable(htd);
+ HTable table = null;
+ try {
+ table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
+ Put put = new Put(ROW_1);
+ put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
+ table.put(put);
+ put = new Put(ROW_2);
+ put.add(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1);
+ put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2);
+ put.add(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
+ table.put(put);
+ table.flushCommits();
+ } finally {
+ if (null != table) table.close();
+ }
+ remoteTable = new RemoteHTable(
+ new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort())),
+ TEST_UTIL.getConfiguration(), TABLE);
+ }
+
+ @After
+ public void after() throws Exception {
+ remoteTable.close();
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testGetTableDescriptor() throws IOException {
+ HTable table = null;
+ try {
+ table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
+ HTableDescriptor local = table.getTableDescriptor();
+ assertEquals(remoteTable.getTableDescriptor(), local);
+ } finally {
+ if (null != table) table.close();
+ }
+ }
+
+ @Test
+ public void testGet() throws IOException {
+ Get get = new Get(ROW_1);
+ Result result = remoteTable.get(get);
+ byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_1, value1));
+ assertNull(value2);
+
+ get = new Get(ROW_1);
+ get.addFamily(COLUMN_3);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNull(value1);
+ assertNull(value2);
+
+ get = new Get(ROW_1);
+ get.addColumn(COLUMN_1, QUALIFIER_1);
+ get.addColumn(COLUMN_2, QUALIFIER_2);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_1, value1));
+ assertNull(value2);
+
+ get = new Get(ROW_2);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_2, value1)); // @TS_2
+ assertNotNull(value2);
+ assertTrue(Bytes.equals(VALUE_2, value2));
+
+ get = new Get(ROW_2);
+ get.addFamily(COLUMN_1);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_2, value1)); // @TS_2
+ assertNull(value2);
+
+ get = new Get(ROW_2);
+ get.addColumn(COLUMN_1, QUALIFIER_1);
+ get.addColumn(COLUMN_2, QUALIFIER_2);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_2, value1)); // @TS_2
+ assertNotNull(value2);
+ assertTrue(Bytes.equals(VALUE_2, value2));
+
+ // test timestamp
+
+ get = new Get(ROW_2);
+ get.addFamily(COLUMN_1);
+ get.addFamily(COLUMN_2);
+ get.setTimeStamp(TS_1);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_1, value1)); // @TS_1
+ assertNull(value2);
+
+ // test timerange
+
+ get = new Get(ROW_2);
+ get.addFamily(COLUMN_1);
+ get.addFamily(COLUMN_2);
+ get.setTimeRange(0, TS_1 + 1);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_1, value1)); // @TS_1
+ assertNull(value2);
+
+ // test maxVersions
+
+ get = new Get(ROW_2);
+ get.addFamily(COLUMN_1);
+ get.setMaxVersions(2);
+ result = remoteTable.get(get);
+ int count = 0;
+ for (Cell kv: result.listCells()) {
+ if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_1 == kv.getTimestamp()) {
+ assertTrue(CellUtil.matchingValue(kv, VALUE_1)); // @TS_1
+ count++;
+ }
+ if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_2 == kv.getTimestamp()) {
+ assertTrue(CellUtil.matchingValue(kv, VALUE_2)); // @TS_2
+ count++;
+ }
+ }
+ assertEquals(2, count);
+ }
+
+ @Test
+ public void testMultiGet() throws Exception {
+ ArrayList<Get> gets = new ArrayList<Get>();
+ gets.add(new Get(ROW_1));
+ gets.add(new Get(ROW_2));
+ Result[] results = remoteTable.get(gets);
+ assertNotNull(results);
+ assertEquals(2, results.length);
+ assertEquals(1, results[0].size());
+ assertEquals(2, results[1].size());
+
+ //Test Versions
+ gets = new ArrayList<Get>();
+ Get g = new Get(ROW_1);
+ g.setMaxVersions(3);
+ gets.add(g);
+ gets.add(new Get(ROW_2));
+ results = remoteTable.get(gets);
+ assertNotNull(results);
+ assertEquals(2, results.length);
+ assertEquals(1, results[0].size());
+ assertEquals(3, results[1].size());
+
+ //404
+ gets = new ArrayList<Get>();
+ gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
+ results = remoteTable.get(gets);
+ assertNotNull(results);
+ assertEquals(0, results.length);
+
+ gets = new ArrayList<Get>();
+ gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
+ gets.add(new Get(ROW_1));
+ gets.add(new Get(ROW_2));
+ results = remoteTable.get(gets);
+ assertNotNull(results);
+ assertEquals(2, results.length);
+ }
+
+ @Test
+ public void testPut() throws IOException {
+ Put put = new Put(ROW_3);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ remoteTable.put(put);
+
+ Get get = new Get(ROW_3);
+ get.addFamily(COLUMN_1);
+ Result result = remoteTable.get(get);
+ byte[] value = result.getValue(COLUMN_1, QUALIFIER_1);
+ assertNotNull(value);
+ assertTrue(Bytes.equals(VALUE_1, value));
+
+ // multiput
+
+ List<Put> puts = new ArrayList<Put>();
+ put = new Put(ROW_3);
+ put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
+ puts.add(put);
+ put = new Put(ROW_4);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ puts.add(put);
+ put = new Put(ROW_4);
+ put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
+ puts.add(put);
+ remoteTable.put(puts);
+
+ get = new Get(ROW_3);
+ get.addFamily(COLUMN_2);
+ result = remoteTable.get(get);
+ value = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value);
+ assertTrue(Bytes.equals(VALUE_2, value));
+ get = new Get(ROW_4);
+ result = remoteTable.get(get);
+ value = result.getValue(COLUMN_1, QUALIFIER_1);
+ assertNotNull(value);
+ assertTrue(Bytes.equals(VALUE_1, value));
+ value = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value);
+ assertTrue(Bytes.equals(VALUE_2, value));
+
+ assertTrue(Bytes.equals(Bytes.toBytes("TestRemoteTable"), remoteTable.getTableName()));
+ }
+
+ @Test
+ public void testDelete() throws IOException {
+ Put put = new Put(ROW_3);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
+ remoteTable.put(put);
+
+ Get get = new Get(ROW_3);
+ get.addFamily(COLUMN_1);
+ get.addFamily(COLUMN_2);
+ Result result = remoteTable.get(get);
+ byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_1, value1));
+ assertNotNull(value2);
+ assertTrue(Bytes.equals(VALUE_2, value2));
+
+ Delete delete = new Delete(ROW_3);
+ delete.deleteColumn(COLUMN_2, QUALIFIER_2);
+ remoteTable.delete(delete);
+
+ get = new Get(ROW_3);
+ get.addFamily(COLUMN_1);
+ get.addFamily(COLUMN_2);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_1, value1));
+ assertNull(value2);
+
+ delete = new Delete(ROW_3);
+ delete.setTimestamp(1L);
+ remoteTable.delete(delete);
+
+ get = new Get(ROW_3);
+ get.addFamily(COLUMN_1);
+ get.addFamily(COLUMN_2);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_1, value1));
+ assertNull(value2);
+
+ delete = new Delete(ROW_3);
+ remoteTable.delete(delete);
+
+ get = new Get(ROW_3);
+ get.addFamily(COLUMN_1);
+ get.addFamily(COLUMN_2);
+ result = remoteTable.get(get);
+ value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNull(value1);
+ assertNull(value2);
+ }
+
+ /**
+ * Test RemoteHTable.Scanner
+ */
+ @Test
+ public void testScanner() throws IOException {
+ List<Put> puts = new ArrayList<Put>();
+ Put put = new Put(ROW_1);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ puts.add(put);
+ put = new Put(ROW_2);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ puts.add(put);
+ put = new Put(ROW_3);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ puts.add(put);
+ put = new Put(ROW_4);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ puts.add(put);
+ remoteTable.put(puts);
+
+ ResultScanner scanner = remoteTable.getScanner(new Scan());
+
+ Result[] results = scanner.next(1);
+ assertNotNull(results);
+ assertEquals(1, results.length);
+ assertTrue(Bytes.equals(ROW_1, results[0].getRow()));
+
+ Result result = scanner.next();
+ assertNotNull(result);
+ assertTrue(Bytes.equals(ROW_2, result.getRow()));
+
+ results = scanner.next(2);
+ assertNotNull(results);
+ assertEquals(2, results.length);
+ assertTrue(Bytes.equals(ROW_3, results[0].getRow()));
+ assertTrue(Bytes.equals(ROW_4, results[1].getRow()));
+
+ results = scanner.next(1);
+ assertNull(results);
+ scanner.close();
+
+ scanner = remoteTable.getScanner(COLUMN_1);
+ results = scanner.next(4);
+ assertNotNull(results);
+ assertEquals(4, results.length);
+ assertTrue(Bytes.equals(ROW_1, results[0].getRow()));
+ assertTrue(Bytes.equals(ROW_2, results[1].getRow()));
+ assertTrue(Bytes.equals(ROW_3, results[2].getRow()));
+ assertTrue(Bytes.equals(ROW_4, results[3].getRow()));
+
+ scanner.close();
+
+ scanner = remoteTable.getScanner(COLUMN_1,QUALIFIER_1);
+ results = scanner.next(4);
+ assertNotNull(results);
+ assertEquals(4, results.length);
+ assertTrue(Bytes.equals(ROW_1, results[0].getRow()));
+ assertTrue(Bytes.equals(ROW_2, results[1].getRow()));
+ assertTrue(Bytes.equals(ROW_3, results[2].getRow()));
+ assertTrue(Bytes.equals(ROW_4, results[3].getRow()));
+ scanner.close();
+ assertTrue(remoteTable.isAutoFlush());
+
+ }
+
+ @Test
+ public void testCheckAndDelete() throws IOException {
+ Get get = new Get(ROW_1);
+ Result result = remoteTable.get(get);
+ byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
+ byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+ assertNotNull(value1);
+ assertTrue(Bytes.equals(VALUE_1, value1));
+ assertNull(value2);
+ assertTrue(remoteTable.exists(get));
+ assertEquals(1, remoteTable.exists(Collections.singletonList(get)).length);
+ Delete delete = new Delete(ROW_1);
+
+ remoteTable.checkAndDelete(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, delete);
+ assertFalse(remoteTable.exists(get));
+
+ Put put = new Put(ROW_1);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ remoteTable.put(put);
+
+ assertTrue(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1,
+ put));
+ assertFalse(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_2,
+ put));
+ }
+
+ /**
+ * Test RemoteHable.Scanner.iterator method
+ */
+ @Test
+ public void testIteratorScaner() throws IOException {
+ List<Put> puts = new ArrayList<Put>();
+ Put put = new Put(ROW_1);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ puts.add(put);
+ put = new Put(ROW_2);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ puts.add(put);
+ put = new Put(ROW_3);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ puts.add(put);
+ put = new Put(ROW_4);
+ put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
+ puts.add(put);
+ remoteTable.put(puts);
+
+ ResultScanner scanner = remoteTable.getScanner(new Scan());
+ Iterator<Result> iterator = scanner.iterator();
+ assertTrue(iterator.hasNext());
+ int counter = 0;
+ while (iterator.hasNext()) {
+ iterator.next();
+ counter++;
+ }
+ assertEquals(4, counter);
+ }
+
+ /**
+ * Test a some methods of class Response.
+ */
+ @Test
+ public void testResponse(){
+ Response response = new Response(200);
+ assertEquals(200, response.getCode());
+ Header[] headers = new Header[2];
+ headers[0] = new Header("header1", "value1");
+ headers[1] = new Header("header2", "value2");
+ response = new Response(200, headers);
+ assertEquals("value1", response.getHeader("header1"));
+ assertFalse(response.hasBody());
+ response.setCode(404);
+ assertEquals(404, response.getCode());
+ headers = new Header[2];
+ headers[0] = new Header("header1", "value1.1");
+ headers[1] = new Header("header2", "value2");
+ response.setHeaders(headers);
+ assertEquals("value1.1", response.getHeader("header1"));
+ response.setBody(Bytes.toBytes("body"));
+ assertTrue(response.hasBody());
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java
new file mode 100644
index 0000000..170dfab
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java
@@ -0,0 +1,84 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import com.sun.jersey.api.json.JSONJAXBContext;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestCellModel extends TestModelBase<CellModel> {
+
+ private static final long TIMESTAMP = 1245219839331L;
+ private static final byte[] COLUMN = Bytes.toBytes("testcolumn");
+ private static final byte[] VALUE = Bytes.toBytes("testvalue");
+
+ public TestCellModel() throws Exception {
+ super(CellModel.class);
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Cell " +
+ "column=\"dGVzdGNvbHVtbg==\" timestamp=\"1245219839331\">dGVzdHZhbHVl</Cell>";
+ AS_PB =
+ "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl";
+
+ AS_JSON =
+ "{\"column\":\"dGVzdGNvbHVtbg==\",\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVl\"}";
+ }
+
+ protected CellModel buildTestModel() {
+ CellModel model = new CellModel();
+ model.setColumn(COLUMN);
+ model.setTimestamp(TIMESTAMP);
+ model.setValue(VALUE);
+ return model;
+ }
+
+ protected void checkModel(CellModel model) {
+ assertTrue(Bytes.equals(model.getColumn(), COLUMN));
+ assertTrue(Bytes.equals(model.getValue(), VALUE));
+ assertTrue(model.hasUserTimestamp());
+ assertEquals(model.getTimestamp(), TIMESTAMP);
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java
new file mode 100644
index 0000000..716da14
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java
@@ -0,0 +1,146 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestCellSetModel extends TestModelBase<CellSetModel> {
+
+ private static final byte[] ROW1 = Bytes.toBytes("testrow1");
+ private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1");
+ private static final byte[] VALUE1 = Bytes.toBytes("testvalue1");
+ private static final long TIMESTAMP1 = 1245219839331L;
+ private static final byte[] ROW2 = Bytes.toBytes("testrow1");
+ private static final byte[] COLUMN2 = Bytes.toBytes("testcolumn2");
+ private static final byte[] VALUE2 = Bytes.toBytes("testvalue2");
+ private static final long TIMESTAMP2 = 1245239813319L;
+ private static final byte[] COLUMN3 = Bytes.toBytes("testcolumn3");
+ private static final byte[] VALUE3 = Bytes.toBytes("testvalue3");
+ private static final long TIMESTAMP3 = 1245393318192L;
+
+ public TestCellSetModel() throws Exception {
+ super(CellSetModel.class);
+ AS_XML =
+ "<CellSet>" +
+ "<Row key=\"dGVzdHJvdzE=\">" +
+ "<Cell timestamp=\"1245219839331\" column=\"dGVzdGNvbHVtbjE=\">" +
+ "dGVzdHZhbHVlMQ==</Cell>" +
+ "</Row>" +
+ "<Row key=\"dGVzdHJvdzE=\">" +
+ "<Cell timestamp=\"1245239813319\" column=\"dGVzdGNvbHVtbjI=\">" +
+ "dGVzdHZhbHVlMg==</Cell>" +
+ "<Cell timestamp=\"1245393318192\" column=\"dGVzdGNvbHVtbjM=\">" +
+ "dGVzdHZhbHVlMw==</Cell>" +
+ "</Row>" +
+ "</CellSet>";
+
+ AS_PB =
+ "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" +
+ "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" +
+ "Igp0ZXN0dmFsdWUz";
+
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><CellSet>" +
+ "<Row key=\"dGVzdHJvdzE=\"><Cell column=\"dGVzdGNvbHVtbjE=\" timestamp=\"1245219839331\">" +
+ "dGVzdHZhbHVlMQ==</Cell></Row><Row key=\"dGVzdHJvdzE=\">" +
+ "<Cell column=\"dGVzdGNvbHVtbjI=\" timestamp=\"1245239813319\">" +
+ "dGVzdHZhbHVlMg==</Cell>" +
+ "<Cell column=\"dGVzdGNvbHVtbjM=\" timestamp=\"1245393318192\">dGVzdHZhbHVlMw==</Cell>" +
+ "</Row></CellSet>";
+
+ AS_JSON =
+ "{\"Row\":[{\"key\":\"dGVzdHJvdzE=\"," +
+ "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\",\"timestamp\":1245219839331," +
+ "\"$\":\"dGVzdHZhbHVlMQ==\"}]},{\"key\":\"dGVzdHJvdzE=\"," +
+ "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjI=\",\"timestamp\":1245239813319," +
+ "\"$\":\"dGVzdHZhbHVlMg==\"},{\"column\":\"dGVzdGNvbHVtbjM=\"," +
+ "\"timestamp\":1245393318192,\"$\":\"dGVzdHZhbHVlMw==\"}]}]}";
+ }
+
+ protected CellSetModel buildTestModel() {
+ CellSetModel model = new CellSetModel();
+ RowModel row;
+ row = new RowModel();
+ row.setKey(ROW1);
+ row.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1));
+ model.addRow(row);
+ row = new RowModel();
+ row.setKey(ROW2);
+ row.addCell(new CellModel(COLUMN2, TIMESTAMP2, VALUE2));
+ row.addCell(new CellModel(COLUMN3, TIMESTAMP3, VALUE3));
+ model.addRow(row);
+ return model;
+ }
+
+ protected void checkModel(CellSetModel model) {
+ Iterator<RowModel> rows = model.getRows().iterator();
+ RowModel row = rows.next();
+ assertTrue(Bytes.equals(ROW1, row.getKey()));
+ Iterator<CellModel> cells = row.getCells().iterator();
+ CellModel cell = cells.next();
+ assertTrue(Bytes.equals(COLUMN1, cell.getColumn()));
+ assertTrue(Bytes.equals(VALUE1, cell.getValue()));
+ assertTrue(cell.hasUserTimestamp());
+ assertEquals(cell.getTimestamp(), TIMESTAMP1);
+ assertFalse(cells.hasNext());
+ row = rows.next();
+ assertTrue(Bytes.equals(ROW2, row.getKey()));
+ cells = row.getCells().iterator();
+ cell = cells.next();
+ assertTrue(Bytes.equals(COLUMN2, cell.getColumn()));
+ assertTrue(Bytes.equals(VALUE2, cell.getValue()));
+ assertTrue(cell.hasUserTimestamp());
+ assertEquals(cell.getTimestamp(), TIMESTAMP2);
+ cell = cells.next();
+ assertTrue(Bytes.equals(COLUMN3, cell.getColumn()));
+ assertTrue(Bytes.equals(VALUE3, cell.getValue()));
+ assertTrue(cell.hasUserTimestamp());
+ assertEquals(cell.getTimestamp(), TIMESTAMP3);
+ assertFalse(cells.hasNext());
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java
new file mode 100644
index 0000000..15e1652
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java
@@ -0,0 +1,86 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.SmallTests;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestColumnSchemaModel extends TestModelBase<ColumnSchemaModel> {
+
+ protected static final String COLUMN_NAME = "testcolumn";
+ protected static final boolean BLOCKCACHE = true;
+ protected static final int BLOCKSIZE = 16384;
+ protected static final String BLOOMFILTER = "NONE";
+ protected static final String COMPRESSION = "GZ";
+ protected static final boolean IN_MEMORY = false;
+ protected static final int TTL = 86400;
+ protected static final int VERSIONS = 1;
+
+ public TestColumnSchemaModel() throws Exception {
+ super(ColumnSchemaModel.class);
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><ColumnSchema " +
+ "name=\"testcolumn\" BLOCKSIZE=\"16384\" BLOOMFILTER=\"NONE\" BLOCKCACHE=\"true\" " +
+ "COMPRESSION=\"GZ\" VERSIONS=\"1\" TTL=\"86400\" IN_MEMORY=\"false\"/>";
+
+ AS_JSON =
+ "{\"name\":\"testcolumn\",\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\"," +
+ "\"BLOCKCACHE\":\"true\",\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\"," +
+ "\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}";
+ }
+
+ protected ColumnSchemaModel buildTestModel() {
+ ColumnSchemaModel model = new ColumnSchemaModel();
+ model.setName(COLUMN_NAME);
+ model.__setBlocksize(BLOCKSIZE);
+ model.__setBloomfilter(BLOOMFILTER);
+ model.__setBlockcache(BLOCKCACHE);
+ model.__setCompression(COMPRESSION);
+ model.__setVersions(VERSIONS);
+ model.__setTTL(TTL);
+ model.__setInMemory(IN_MEMORY);
+ return model;
+ }
+
+ protected void checkModel(ColumnSchemaModel model) {
+ assertEquals(model.getName(), COLUMN_NAME);
+ assertEquals(model.__getBlockcache(), BLOCKCACHE);
+ assertEquals(model.__getBlocksize(), BLOCKSIZE);
+ assertEquals(model.__getBloomfilter(), BLOOMFILTER);
+ assertTrue(model.__getCompression().equalsIgnoreCase(COMPRESSION));
+ assertEquals(model.__getInMemory(), IN_MEMORY);
+ assertEquals(model.__getTTL(), TTL);
+ assertEquals(model.__getVersions(), VERSIONS);
+ }
+
+ public void testFromPB() throws Exception {
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java
new file mode 100644
index 0000000..500d924
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.provider.JAXBContextResolver;
+import org.apache.hadoop.hbase.util.Base64;
+import org.codehaus.jackson.jaxrs.JacksonJaxbJsonProvider;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.node.ObjectNode;
+import org.junit.experimental.categories.Category;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+
+@Category(SmallTests.class)
+public abstract class TestModelBase<T> extends TestCase {
+
+ protected String AS_XML;
+
+ protected String AS_PB;
+
+ protected String AS_JSON;
+
+ protected JAXBContext context;
+
+ protected Class<?> clazz;
+
+ protected ObjectMapper mapper;
+
+ protected TestModelBase(Class<?> clazz) throws Exception {
+ super();
+ this.clazz = clazz;
+ context = new JAXBContextResolver().getContext(clazz);
+ mapper = new JacksonJaxbJsonProvider().locateMapper(clazz,
+ MediaType.APPLICATION_JSON_TYPE);
+ }
+
+ protected abstract T buildTestModel();
+
+ @SuppressWarnings("unused")
+ protected String toXML(T model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ protected String toJSON(T model) throws JAXBException, IOException {
+ StringWriter writer = new StringWriter();
+ mapper.writeValue(writer, model);
+// original marshaller, uncomment this and comment mapper to verify backward compatibility
+// ((JSONJAXBContext)context).createJSONMarshaller().marshallToJSON(model, writer);
+ return writer.toString();
+ }
+
+ public T fromJSON(String json) throws JAXBException, IOException {
+ return (T)
+ mapper.readValue(json, clazz);
+ }
+
+ public T fromXML(String xml) throws JAXBException {
+ return (T)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ @SuppressWarnings("unused")
+ protected byte[] toPB(ProtobufMessageHandler model) {
+ return model.createProtobufOutput();
+ }
+
+ protected T fromPB(String pb) throws
+ Exception {
+ return (T)clazz.getMethod("getObjectFromMessage", byte[].class).invoke(
+ clazz.newInstance(),
+ Base64.decode(AS_PB));
+ }
+
+ protected abstract void checkModel(T model);
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testToXML() throws Exception {
+ assertEquals(AS_XML, toXML(buildTestModel()));
+ }
+
+ public void testToJSON() throws Exception {
+ try {
+ ObjectNode expObj = mapper.readValue(AS_JSON, ObjectNode.class);
+ ObjectNode actObj = mapper.readValue(toJSON(buildTestModel()), ObjectNode.class);
+ assertEquals(expObj, actObj);
+ } catch(Exception e) {
+ assertEquals(AS_JSON, toJSON(buildTestModel()));
+ }
+ }
+
+ public void testFromJSON() throws Exception {
+ checkModel(fromJSON(AS_JSON));
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java
new file mode 100644
index 0000000..e0068c8
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java
@@ -0,0 +1,79 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestRowModel extends TestModelBase<RowModel> {
+
+ private static final byte[] ROW1 = Bytes.toBytes("testrow1");
+ private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1");
+ private static final byte[] VALUE1 = Bytes.toBytes("testvalue1");
+ private static final long TIMESTAMP1 = 1245219839331L;
+
+ private JAXBContext context;
+
+ public TestRowModel() throws Exception {
+ super(RowModel.class);
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Row key=\"dGVzdHJvdzE=\">" +
+ "<Cell column=\"dGVzdGNvbHVtbjE=\" timestamp=\"1245219839331\">dGVzdHZhbHVlMQ==</Cell></Row>";
+
+ AS_JSON =
+ "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," +
+ "\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVlMQ==\"}]}";
+ }
+
+ protected RowModel buildTestModel() {
+ RowModel model = new RowModel();
+ model.setKey(ROW1);
+ model.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1));
+ return model;
+ }
+
+ protected void checkModel(RowModel model) {
+ assertTrue(Bytes.equals(ROW1, model.getKey()));
+ Iterator<CellModel> cells = model.getCells().iterator();
+ CellModel cell = cells.next();
+ assertTrue(Bytes.equals(COLUMN1, cell.getColumn()));
+ assertTrue(Bytes.equals(VALUE1, cell.getValue()));
+ assertTrue(cell.hasUserTimestamp());
+ assertEquals(cell.getTimestamp(), TIMESTAMP1);
+ assertFalse(cells.hasNext());
+ }
+
+ @Override
+ public void testFromPB() throws Exception {
+ //do nothing row model has no PB
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
new file mode 100644
index 0000000..988872e
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
@@ -0,0 +1,109 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestScannerModel extends TestModelBase<ScannerModel> {
+ private static final String PRIVATE = "private";
+ private static final String PUBLIC = "public";
+ private static final byte[] START_ROW = Bytes.toBytes("abracadabra");
+ private static final byte[] END_ROW = Bytes.toBytes("zzyzx");
+ private static final byte[] COLUMN1 = Bytes.toBytes("column1");
+ private static final byte[] COLUMN2 = Bytes.toBytes("column2:foo");
+ private static final long START_TIME = 1245219839331L;
+ private static final long END_TIME = 1245393318192L;
+ private static final int CACHING = 1000;
+ private static final int BATCH = 100;
+ private static final boolean CACHE_BLOCKS = false;
+
+ public TestScannerModel() throws Exception {
+ super(ScannerModel.class);
+ AS_XML = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"
+ + "<Scanner batch=\"100\" cacheBlocks=\"false\" caching=\"1000\" endRow=\"enp5eng=\" "
+ + "endTime=\"1245393318192\" maxVersions=\"2147483647\" startRow=\"YWJyYWNhZGFicmE=\" "
+ + "startTime=\"1245219839331\">"
+ + "<column>Y29sdW1uMQ==</column><column>Y29sdW1uMjpmb28=</column>"
+ + "<labels>private</labels><labels>public</labels>"
+ + "</Scanner>";
+
+ AS_JSON = "{\"batch\":100,\"caching\":1000,\"cacheBlocks\":false,\"endRow\":\"enp5eng=\","
+ + "\"endTime\":1245393318192,\"maxVersions\":2147483647,\"startRow\":\"YWJyYWNhZGFicmE=\","
+ + "\"startTime\":1245219839331,\"column\":[\"Y29sdW1uMQ==\",\"Y29sdW1uMjpmb28=\"],"
+ +"\"labels\":[\"private\",\"public\"]"
+ +"}";
+
+ AS_PB = "CgthYnJhY2FkYWJyYRIFenp5engaB2NvbHVtbjEaC2NvbHVtbjI6Zm9vIGQo47qL554kMLDi57mf"
+ + "JDj/////B0joB1IHcHJpdmF0ZVIGcHVibGljWAA=";
+ }
+
+ protected ScannerModel buildTestModel() {
+ ScannerModel model = new ScannerModel();
+ model.setStartRow(START_ROW);
+ model.setEndRow(END_ROW);
+ model.addColumn(COLUMN1);
+ model.addColumn(COLUMN2);
+ model.setStartTime(START_TIME);
+ model.setEndTime(END_TIME);
+ model.setBatch(BATCH);
+ model.setCaching(CACHING);
+ model.addLabel(PRIVATE);
+ model.addLabel(PUBLIC);
+ model.setCacheBlocks(CACHE_BLOCKS);
+ return model;
+ }
+
+ protected void checkModel(ScannerModel model) {
+ assertTrue(Bytes.equals(model.getStartRow(), START_ROW));
+ assertTrue(Bytes.equals(model.getEndRow(), END_ROW));
+ boolean foundCol1 = false, foundCol2 = false;
+ for (byte[] column : model.getColumns()) {
+ if (Bytes.equals(column, COLUMN1)) {
+ foundCol1 = true;
+ } else if (Bytes.equals(column, COLUMN2)) {
+ foundCol2 = true;
+ }
+ }
+ assertTrue(foundCol1);
+ assertTrue(foundCol2);
+ assertEquals(model.getStartTime(), START_TIME);
+ assertEquals(model.getEndTime(), END_TIME);
+ assertEquals(model.getBatch(), BATCH);
+ assertEquals(model.getCaching(), CACHING);
+ assertEquals(model.getCacheBlocks(), CACHE_BLOCKS);
+ boolean foundLabel1 = false;
+ boolean foundLabel2 = false;
+ if (model.getLabels() != null && model.getLabels().size() > 0) {
+ for (String label : model.getLabels()) {
+ if (label.equals(PRIVATE)) {
+ foundLabel1 = true;
+ } else if (label.equals(PUBLIC)) {
+ foundLabel2 = true;
+ }
+ }
+ assertTrue(foundLabel1);
+ assertTrue(foundLabel2);
+ }
+ }
+
+}
[17/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
deleted file mode 100644
index fbede44..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ /dev/null
@@ -1,825 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.client;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Row;
-import org.apache.hadoop.hbase.client.RowMutations;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.io.TimeRange;
-import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.hadoop.hbase.rest.Constants;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.rest.model.ScannerModel;
-import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.util.StringUtils;
-
-import com.google.protobuf.Descriptors;
-import com.google.protobuf.Message;
-import com.google.protobuf.Service;
-import com.google.protobuf.ServiceException;
-
-/**
- * HTable interface to remote tables accessed via REST gateway
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class RemoteHTable implements HTableInterface {
-
- private static final Log LOG = LogFactory.getLog(RemoteHTable.class);
-
- final Client client;
- final Configuration conf;
- final byte[] name;
- final int maxRetries;
- final long sleepTime;
-
- @SuppressWarnings("rawtypes")
- protected String buildRowSpec(final byte[] row, final Map familyMap,
- final long startTime, final long endTime, final int maxVersions) {
- StringBuffer sb = new StringBuffer();
- sb.append('/');
- sb.append(Bytes.toStringBinary(name));
- sb.append('/');
- sb.append(Bytes.toStringBinary(row));
- Set families = familyMap.entrySet();
- if (families != null) {
- Iterator i = familyMap.entrySet().iterator();
- sb.append('/');
- while (i.hasNext()) {
- Map.Entry e = (Map.Entry)i.next();
- Collection quals = (Collection)e.getValue();
- if (quals == null || quals.isEmpty()) {
- // this is an unqualified family. append the family name and NO ':'
- sb.append(Bytes.toStringBinary((byte[])e.getKey()));
- } else {
- Iterator ii = quals.iterator();
- while (ii.hasNext()) {
- sb.append(Bytes.toStringBinary((byte[])e.getKey()));
- sb.append(':');
- Object o = ii.next();
- // Puts use byte[] but Deletes use KeyValue
- if (o instanceof byte[]) {
- sb.append(Bytes.toStringBinary((byte[])o));
- } else if (o instanceof KeyValue) {
- sb.append(Bytes.toStringBinary(((KeyValue)o).getQualifier()));
- } else {
- throw new RuntimeException("object type not handled");
- }
- if (ii.hasNext()) {
- sb.append(',');
- }
- }
- }
- if (i.hasNext()) {
- sb.append(',');
- }
- }
- }
- if (startTime >= 0 && endTime != Long.MAX_VALUE) {
- sb.append('/');
- sb.append(startTime);
- if (startTime != endTime) {
- sb.append(',');
- sb.append(endTime);
- }
- } else if (endTime != Long.MAX_VALUE) {
- sb.append('/');
- sb.append(endTime);
- }
- if (maxVersions > 1) {
- sb.append("?v=");
- sb.append(maxVersions);
- }
- return sb.toString();
- }
-
- protected String buildMultiRowSpec(final byte[][] rows, int maxVersions) {
- StringBuilder sb = new StringBuilder();
- sb.append('/');
- sb.append(Bytes.toStringBinary(name));
- sb.append("/multiget/");
- if (rows == null || rows.length == 0) {
- return sb.toString();
- }
- sb.append("?");
- for(int i=0; i<rows.length; i++) {
- byte[] rk = rows[i];
- if (i != 0) {
- sb.append('&');
- }
- sb.append("row=");
- sb.append(Bytes.toStringBinary(rk));
- }
- sb.append("&v=");
- sb.append(maxVersions);
-
- return sb.toString();
- }
-
- protected Result[] buildResultFromModel(final CellSetModel model) {
- List<Result> results = new ArrayList<Result>();
- for (RowModel row: model.getRows()) {
- List<Cell> kvs = new ArrayList<Cell>();
- for (CellModel cell: row.getCells()) {
- byte[][] split = KeyValue.parseColumn(cell.getColumn());
- byte[] column = split[0];
- byte[] qualifier = null;
- if (split.length == 1) {
- qualifier = HConstants.EMPTY_BYTE_ARRAY;
- } else if (split.length == 2) {
- qualifier = split[1];
- } else {
- throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
- }
- kvs.add(new KeyValue(row.getKey(), column, qualifier,
- cell.getTimestamp(), cell.getValue()));
- }
- results.add(Result.create(kvs));
- }
- return results.toArray(new Result[results.size()]);
- }
-
- protected CellSetModel buildModelFromPut(Put put) {
- RowModel row = new RowModel(put.getRow());
- long ts = put.getTimeStamp();
- for (List<Cell> cells: put.getFamilyCellMap().values()) {
- for (Cell cell: cells) {
- KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
- row.addCell(new CellModel(kv.getFamily(), kv.getQualifier(),
- ts != HConstants.LATEST_TIMESTAMP ? ts : kv.getTimestamp(),
- kv.getValue()));
- }
- }
- CellSetModel model = new CellSetModel();
- model.addRow(row);
- return model;
- }
-
- /**
- * Constructor
- * @param client
- * @param name
- */
- public RemoteHTable(Client client, String name) {
- this(client, HBaseConfiguration.create(), Bytes.toBytes(name));
- }
-
- /**
- * Constructor
- * @param client
- * @param conf
- * @param name
- */
- public RemoteHTable(Client client, Configuration conf, String name) {
- this(client, conf, Bytes.toBytes(name));
- }
-
- /**
- * Constructor
- * @param client
- * @param conf
- * @param name
- */
- public RemoteHTable(Client client, Configuration conf, byte[] name) {
- this.client = client;
- this.conf = conf;
- this.name = name;
- this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10);
- this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000);
- }
-
- public byte[] getTableName() {
- return name.clone();
- }
-
- @Override
- public TableName getName() {
- return TableName.valueOf(name);
- }
-
- public Configuration getConfiguration() {
- return conf;
- }
-
- public HTableDescriptor getTableDescriptor() throws IOException {
- StringBuilder sb = new StringBuilder();
- sb.append('/');
- sb.append(Bytes.toStringBinary(name));
- sb.append('/');
- sb.append("schema");
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
- int code = response.getCode();
- switch (code) {
- case 200:
- TableSchemaModel schema = new TableSchemaModel();
- schema.getObjectFromMessage(response.getBody());
- return schema.getTableDescriptor();
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("schema request returned " + code);
- }
- }
- throw new IOException("schema request timed out");
- }
-
- public void close() throws IOException {
- client.shutdown();
- }
-
- public Result get(Get get) throws IOException {
- TimeRange range = get.getTimeRange();
- String spec = buildRowSpec(get.getRow(), get.getFamilyMap(),
- range.getMin(), range.getMax(), get.getMaxVersions());
- if (get.getFilter() != null) {
- LOG.warn("filters not supported on gets");
- }
- Result[] results = getResults(spec);
- if (results.length > 0) {
- if (results.length > 1) {
- LOG.warn("too many results for get (" + results.length + ")");
- }
- return results[0];
- } else {
- return new Result();
- }
- }
-
- public Result[] get(List<Get> gets) throws IOException {
- byte[][] rows = new byte[gets.size()][];
- int maxVersions = 1;
- int count = 0;
-
- for(Get g:gets) {
-
- if ( count == 0 ) {
- maxVersions = g.getMaxVersions();
- } else if (g.getMaxVersions() != maxVersions) {
- LOG.warn("MaxVersions on Gets do not match, using the first in the list ("+maxVersions+")");
- }
-
- if (g.getFilter() != null) {
- LOG.warn("filters not supported on gets");
- }
-
- rows[count] = g.getRow();
- count ++;
- }
-
- String spec = buildMultiRowSpec(rows, maxVersions);
-
- return getResults(spec);
- }
-
- private Result[] getResults(String spec) throws IOException {
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.get(spec, Constants.MIMETYPE_PROTOBUF);
- int code = response.getCode();
- switch (code) {
- case 200:
- CellSetModel model = new CellSetModel();
- model.getObjectFromMessage(response.getBody());
- Result[] results = buildResultFromModel(model);
- if ( results.length > 0) {
- return results;
- }
- // fall through
- case 404:
- return new Result[0];
-
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("get request returned " + code);
- }
- }
- throw new IOException("get request timed out");
- }
-
- public boolean exists(Get get) throws IOException {
- LOG.warn("exists() is really get(), just use get()");
- Result result = get(get);
- return (result != null && !(result.isEmpty()));
- }
-
- /**
- * exists(List) is really a list of get() calls. Just use get().
- * @param gets list of Get to test for the existence
- */
- public Boolean[] exists(List<Get> gets) throws IOException {
- LOG.warn("exists(List<Get>) is really list of get() calls, just use get()");
- Boolean[] results = new Boolean[gets.size()];
- for (int i = 0; i < results.length; i++) {
- results[i] = exists(gets.get(i));
- }
- return results;
- }
-
- public void put(Put put) throws IOException {
- CellSetModel model = buildModelFromPut(put);
- StringBuilder sb = new StringBuilder();
- sb.append('/');
- sb.append(Bytes.toStringBinary(name));
- sb.append('/');
- sb.append(Bytes.toStringBinary(put.getRow()));
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
- model.createProtobufOutput());
- int code = response.getCode();
- switch (code) {
- case 200:
- return;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("put request failed with " + code);
- }
- }
- throw new IOException("put request timed out");
- }
-
- public void put(List<Put> puts) throws IOException {
- // this is a trick: The gateway accepts multiple rows in a cell set and
- // ignores the row specification in the URI
-
- // separate puts by row
- TreeMap<byte[],List<Cell>> map =
- new TreeMap<byte[],List<Cell>>(Bytes.BYTES_COMPARATOR);
- for (Put put: puts) {
- byte[] row = put.getRow();
- List<Cell> cells = map.get(row);
- if (cells == null) {
- cells = new ArrayList<Cell>();
- map.put(row, cells);
- }
- for (List<Cell> l: put.getFamilyCellMap().values()) {
- cells.addAll(l);
- }
- }
-
- // build the cell set
- CellSetModel model = new CellSetModel();
- for (Map.Entry<byte[], List<Cell>> e: map.entrySet()) {
- RowModel row = new RowModel(e.getKey());
- for (Cell cell: e.getValue()) {
- KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
- row.addCell(new CellModel(kv));
- }
- model.addRow(row);
- }
-
- // build path for multiput
- StringBuilder sb = new StringBuilder();
- sb.append('/');
- sb.append(Bytes.toStringBinary(name));
- sb.append("/$multiput"); // can be any nonexistent row
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
- model.createProtobufOutput());
- int code = response.getCode();
- switch (code) {
- case 200:
- return;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("multiput request failed with " + code);
- }
- }
- throw new IOException("multiput request timed out");
- }
-
- public void delete(Delete delete) throws IOException {
- String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(),
- delete.getTimeStamp(), delete.getTimeStamp(), 1);
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.delete(spec);
- int code = response.getCode();
- switch (code) {
- case 200:
- return;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("delete request failed with " + code);
- }
- }
- throw new IOException("delete request timed out");
- }
-
- public void delete(List<Delete> deletes) throws IOException {
- for (Delete delete: deletes) {
- delete(delete);
- }
- }
-
- public void flushCommits() throws IOException {
- // no-op
- }
-
- class Scanner implements ResultScanner {
-
- String uri;
-
- public Scanner(Scan scan) throws IOException {
- ScannerModel model;
- try {
- model = ScannerModel.fromScan(scan);
- } catch (Exception e) {
- throw new IOException(e);
- }
- StringBuffer sb = new StringBuffer();
- sb.append('/');
- sb.append(Bytes.toStringBinary(name));
- sb.append('/');
- sb.append("scanner");
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.post(sb.toString(),
- Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
- int code = response.getCode();
- switch (code) {
- case 201:
- uri = response.getLocation();
- return;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("scan request failed with " + code);
- }
- }
- throw new IOException("scan request timed out");
- }
-
- @Override
- public Result[] next(int nbRows) throws IOException {
- StringBuilder sb = new StringBuilder(uri);
- sb.append("?n=");
- sb.append(nbRows);
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.get(sb.toString(),
- Constants.MIMETYPE_PROTOBUF);
- int code = response.getCode();
- switch (code) {
- case 200:
- CellSetModel model = new CellSetModel();
- model.getObjectFromMessage(response.getBody());
- return buildResultFromModel(model);
- case 204:
- case 206:
- return null;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("scanner.next request failed with " + code);
- }
- }
- throw new IOException("scanner.next request timed out");
- }
-
- @Override
- public Result next() throws IOException {
- Result[] results = next(1);
- if (results == null || results.length < 1) {
- return null;
- }
- return results[0];
- }
-
- class Iter implements Iterator<Result> {
-
- Result cache;
-
- public Iter() {
- try {
- cache = Scanner.this.next();
- } catch (IOException e) {
- LOG.warn(StringUtils.stringifyException(e));
- }
- }
-
- @Override
- public boolean hasNext() {
- return cache != null;
- }
-
- @Override
- public Result next() {
- Result result = cache;
- try {
- cache = Scanner.this.next();
- } catch (IOException e) {
- LOG.warn(StringUtils.stringifyException(e));
- cache = null;
- }
- return result;
- }
-
- @Override
- public void remove() {
- throw new RuntimeException("remove() not supported");
- }
-
- }
-
- @Override
- public Iterator<Result> iterator() {
- return new Iter();
- }
-
- @Override
- public void close() {
- try {
- client.delete(uri);
- } catch (IOException e) {
- LOG.warn(StringUtils.stringifyException(e));
- }
- }
-
- }
-
- public ResultScanner getScanner(Scan scan) throws IOException {
- return new Scanner(scan);
- }
-
- public ResultScanner getScanner(byte[] family) throws IOException {
- Scan scan = new Scan();
- scan.addFamily(family);
- return new Scanner(scan);
- }
-
- public ResultScanner getScanner(byte[] family, byte[] qualifier)
- throws IOException {
- Scan scan = new Scan();
- scan.addColumn(family, qualifier);
- return new Scanner(scan);
- }
-
- public boolean isAutoFlush() {
- return true;
- }
-
- public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
- throw new IOException("getRowOrBefore not supported");
- }
-
- public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
- byte[] value, Put put) throws IOException {
- // column to check-the-value
- put.add(new KeyValue(row, family, qualifier, value));
-
- CellSetModel model = buildModelFromPut(put);
- StringBuilder sb = new StringBuilder();
- sb.append('/');
- sb.append(Bytes.toStringBinary(name));
- sb.append('/');
- sb.append(Bytes.toStringBinary(put.getRow()));
- sb.append("?check=put");
-
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.put(sb.toString(),
- Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
- int code = response.getCode();
- switch (code) {
- case 200:
- return true;
- case 304: // NOT-MODIFIED
- return false;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (final InterruptedException e) {
- }
- break;
- default:
- throw new IOException("checkAndPut request failed with " + code);
- }
- }
- throw new IOException("checkAndPut request timed out");
- }
-
- public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
- byte[] value, Delete delete) throws IOException {
- Put put = new Put(row);
- // column to check-the-value
- put.add(new KeyValue(row, family, qualifier, value));
- CellSetModel model = buildModelFromPut(put);
- StringBuilder sb = new StringBuilder();
- sb.append('/');
- sb.append(Bytes.toStringBinary(name));
- sb.append('/');
- sb.append(Bytes.toStringBinary(row));
- sb.append("?check=delete");
-
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.put(sb.toString(),
- Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
- int code = response.getCode();
- switch (code) {
- case 200:
- return true;
- case 304: // NOT-MODIFIED
- return false;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (final InterruptedException e) {
- }
- break;
- default:
- throw new IOException("checkAndDelete request failed with " + code);
- }
- }
- throw new IOException("checkAndDelete request timed out");
- }
-
- public Result increment(Increment increment) throws IOException {
- throw new IOException("Increment not supported");
- }
-
- public Result append(Append append) throws IOException {
- throw new IOException("Append not supported");
- }
-
- public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
- long amount) throws IOException {
- throw new IOException("incrementColumnValue not supported");
- }
-
- public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
- long amount, Durability durability) throws IOException {
- throw new IOException("incrementColumnValue not supported");
- }
-
- @Override
- public void batch(List<? extends Row> actions, Object[] results) throws IOException {
- throw new IOException("batch not supported");
- }
-
- @Override
- public Object[] batch(List<? extends Row> actions) throws IOException {
- throw new IOException("batch not supported");
- }
-
- @Override
- public <R> void batchCallback(List<? extends Row> actions, Object[] results,
- Batch.Callback<R> callback) throws IOException, InterruptedException {
- throw new IOException("batchCallback not supported");
- }
-
- @Override
- public <R> Object[] batchCallback(List<? extends Row> actions, Batch.Callback<R> callback)
- throws IOException, InterruptedException {
- throw new IOException("batchCallback not supported");
- }
-
- @Override
- public CoprocessorRpcChannel coprocessorService(byte[] row) {
- throw new UnsupportedOperationException("coprocessorService not implemented");
- }
-
- @Override
- public <T extends Service, R> Map<byte[], R> coprocessorService(Class<T> service,
- byte[] startKey, byte[] endKey, Batch.Call<T, R> callable)
- throws ServiceException, Throwable {
- throw new UnsupportedOperationException("coprocessorService not implemented");
- }
-
- @Override
- public <T extends Service, R> void coprocessorService(Class<T> service,
- byte[] startKey, byte[] endKey, Batch.Call<T, R> callable, Batch.Callback<R> callback)
- throws ServiceException, Throwable {
- throw new UnsupportedOperationException("coprocessorService not implemented");
- }
-
- @Override
- public void mutateRow(RowMutations rm) throws IOException {
- throw new IOException("atomicMutation not supported");
- }
-
- @Override
- public void setAutoFlush(boolean autoFlush) {
- throw new UnsupportedOperationException("setAutoFlush not implemented");
- }
-
- @Override
- public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
- throw new UnsupportedOperationException("setAutoFlush not implemented");
- }
-
- @Override
- public void setAutoFlushTo(boolean autoFlush) {
- throw new UnsupportedOperationException("setAutoFlushTo not implemented");
- }
-
- @Override
- public long getWriteBufferSize() {
- throw new UnsupportedOperationException("getWriteBufferSize not implemented");
- }
-
- @Override
- public void setWriteBufferSize(long writeBufferSize) throws IOException {
- throw new IOException("setWriteBufferSize not supported");
- }
-
- @Override
- public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
- long amount, boolean writeToWAL) throws IOException {
- throw new IOException("incrementColumnValue not supported");
- }
-
- @Override
- public <R extends Message> Map<byte[], R> batchCoprocessorService(
- Descriptors.MethodDescriptor method, Message request,
- byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
- throw new UnsupportedOperationException("batchCoprocessorService not implemented");
- }
-
- @Override
- public <R extends Message> void batchCoprocessorService(
- Descriptors.MethodDescriptor method, Message request,
- byte[] startKey, byte[] endKey, R responsePrototype, Callback<R> callback)
- throws ServiceException, Throwable {
- throw new UnsupportedOperationException("batchCoprocessorService not implemented");
- }
-
- @Override
- public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
- byte[] value, RowMutations mutation) throws IOException {
- throw new UnsupportedOperationException("checkAndMutate not implemented");
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
deleted file mode 100644
index 871b646..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.client;
-
-import java.io.InputStream;
-
-import org.apache.commons.httpclient.Header;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * The HTTP result code, response headers, and body of a HTTP response.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class Response {
- private int code;
- private Header[] headers;
- private byte[] body;
- private InputStream stream;
-
- /**
- * Constructor
- * @param code the HTTP response code
- */
- public Response(int code) {
- this(code, null, null);
- }
-
- /**
- * Constructor
- * @param code the HTTP response code
- * @param headers the HTTP response headers
- */
- public Response(int code, Header[] headers) {
- this(code, headers, null);
- }
-
- /**
- * Constructor
- * @param code the HTTP response code
- * @param headers the HTTP response headers
- * @param body the response body, can be null
- */
- public Response(int code, Header[] headers, byte[] body) {
- this.code = code;
- this.headers = headers;
- this.body = body;
- }
-
- /**
- * Constructor
- * @param code the HTTP response code
- * @param headers headers the HTTP response headers
- * @param body the response body, can be null
- * @param in Inputstream if the response had one.
- */
- public Response(int code, Header[] headers, byte[] body, InputStream in) {
- this.code = code;
- this.headers = headers;
- this.body = body;
- this.stream = in;
- }
-
- /**
- * @return the HTTP response code
- */
- public int getCode() {
- return code;
- }
-
- /**
- * Gets the input stream instance.
- *
- * @return an instance of InputStream class.
- */
- public InputStream getStream(){
- return this.stream;
- }
-
- /**
- * @return the HTTP response headers
- */
- public Header[] getHeaders() {
- return headers;
- }
-
- public String getHeader(String key) {
- for (Header header: headers) {
- if (header.getName().equalsIgnoreCase(key)) {
- return header.getValue();
- }
- }
- return null;
- }
-
- /**
- * @return the value of the Location header
- */
- public String getLocation() {
- return getHeader("Location");
- }
-
- /**
- * @return true if a response body was sent
- */
- public boolean hasBody() {
- return body != null;
- }
-
- /**
- * @return the HTTP response body
- */
- public byte[] getBody() {
- return body;
- }
-
- /**
- * @param code the HTTP response code
- */
- public void setCode(int code) {
- this.code = code;
- }
-
- /**
- * @param headers the HTTP response headers
- */
- public void setHeaders(Header[] headers) {
- this.headers = headers;
- }
-
- /**
- * @param body the response body
- */
- public void setBody(byte[] body) {
- this.body = body;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
deleted file mode 100644
index 6d68cdd..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest.filter;
-
-import static org.apache.hadoop.hbase.rest.Constants.REST_AUTHENTICATION_PRINCIPAL;
-import static org.apache.hadoop.hbase.rest.Constants.REST_DNS_INTERFACE;
-import static org.apache.hadoop.hbase.rest.Constants.REST_DNS_NAMESERVER;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.Properties;
-
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-
-public class AuthFilter extends AuthenticationFilter {
- private static final Log LOG = LogFactory.getLog(AuthFilter.class);
- private static final String REST_PREFIX = "hbase.rest.authentication.";
- private static final int REST_PREFIX_LEN = REST_PREFIX.length();
-
- /**
- * Returns the configuration to be used by the authentication filter
- * to initialize the authentication handler.
- *
- * This filter retrieves all HBase configurations and passes those started
- * with REST_PREFIX to the authentication handler. It is useful to support
- * plugging different authentication handlers.
- */
- @Override
- protected Properties getConfiguration(
- String configPrefix, FilterConfig filterConfig) throws ServletException {
- Properties props = super.getConfiguration(configPrefix, filterConfig);
- //setting the cookie path to root '/' so it is used for all resources.
- props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
-
- Configuration conf = HBaseConfiguration.create();
- for (Map.Entry<String, String> entry : conf) {
- String name = entry.getKey();
- if (name.startsWith(REST_PREFIX)) {
- String value = entry.getValue();
- if(name.equals(REST_AUTHENTICATION_PRINCIPAL)) {
- try {
- String machineName = Strings.domainNamePointerToHostName(
- DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"),
- conf.get(REST_DNS_NAMESERVER, "default")));
- value = SecurityUtil.getServerPrincipal(value, machineName);
- } catch (IOException ie) {
- throw new ServletException("Failed to retrieve server principal", ie);
- }
- }
- LOG.debug("Setting property " + name + "=" + value);
- name = name.substring(REST_PREFIX_LEN);
- props.setProperty(name, value);
- }
- }
- return props;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java
deleted file mode 100644
index 02957e9..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.filter;
-
-import java.io.IOException;
-import java.util.zip.GZIPInputStream;
-
-import javax.servlet.ServletInputStream;
-import javax.servlet.http.HttpServletRequest;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-@InterfaceAudience.Private
-public class GZIPRequestStream extends ServletInputStream
-{
- private GZIPInputStream in;
-
- public GZIPRequestStream(HttpServletRequest request) throws IOException {
- this.in = new GZIPInputStream(request.getInputStream());
- }
-
- @Override
- public int read() throws IOException {
- return in.read();
- }
-
- @Override
- public int read(byte[] b) throws IOException {
- return in.read(b);
- }
-
- @Override
- public int read(byte[] b, int off, int len) throws IOException {
- return in.read(b, off, len);
- }
-
- @Override
- public void close() throws IOException {
- in.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java
deleted file mode 100644
index 361e442..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.filter;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-
-import javax.servlet.ServletInputStream;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletRequestWrapper;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-@InterfaceAudience.Private
-public class GZIPRequestWrapper extends HttpServletRequestWrapper {
- private ServletInputStream is;
- private BufferedReader reader;
-
- public GZIPRequestWrapper(HttpServletRequest request) throws IOException {
- super(request);
- this.is = new GZIPRequestStream(request);
- this.reader = new BufferedReader(new InputStreamReader(this.is));
- }
-
- @Override
- public ServletInputStream getInputStream() throws IOException {
- return is;
- }
-
- @Override
- public BufferedReader getReader() throws IOException {
- return reader;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java
deleted file mode 100644
index cc74f9c..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.filter;
-
-import java.io.IOException;
-import java.util.zip.GZIPOutputStream;
-
-import javax.servlet.ServletOutputStream;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-@InterfaceAudience.Private
-public class GZIPResponseStream extends ServletOutputStream
-{
- private HttpServletResponse response;
- private GZIPOutputStream out;
-
- public GZIPResponseStream(HttpServletResponse response) throws IOException {
- this.response = response;
- this.out = new GZIPOutputStream(response.getOutputStream());
- response.addHeader("Content-Encoding", "gzip");
- }
-
- public void resetBuffer() {
- if (out != null && !response.isCommitted()) {
- response.setHeader("Content-Encoding", null);
- }
- out = null;
- }
-
- @Override
- public void write(int b) throws IOException {
- out.write(b);
- }
-
- @Override
- public void write(byte[] b) throws IOException {
- out.write(b);
- }
-
- @Override
- public void write(byte[] b, int off, int len) throws IOException {
- out.write(b, off, len);
- }
-
- @Override
- public void close() throws IOException {
- finish();
- out.close();
- }
-
- @Override
- public void flush() throws IOException {
- out.flush();
- }
-
- public void finish() throws IOException {
- out.finish();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
deleted file mode 100644
index 2cfea1b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.filter;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-
-import javax.servlet.ServletOutputStream;
-import javax.servlet.http.HttpServletResponse;
-import javax.servlet.http.HttpServletResponseWrapper;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-@InterfaceAudience.Private
-public class GZIPResponseWrapper extends HttpServletResponseWrapper {
- private HttpServletResponse response;
- private ServletOutputStream os;
- private PrintWriter writer;
- private boolean compress = true;
-
- public GZIPResponseWrapper(HttpServletResponse response) {
- super(response);
- this.response = response;
- }
-
- @Override
- public void setStatus(int status) {
- super.setStatus(status);
- if (status < 200 || status >= 300) {
- compress = false;
- }
- }
-
- @Override
- public void addHeader(String name, String value) {
- if (!"content-length".equalsIgnoreCase(name)) {
- super.addHeader(name, value);
- }
- }
-
- @Override
- public void setContentLength(int length) {
- // do nothing
- }
-
- @Override
- public void setIntHeader(String name, int value) {
- if (!"content-length".equalsIgnoreCase(name)) {
- super.setIntHeader(name, value);
- }
- }
-
- @Override
- public void setHeader(String name, String value) {
- if (!"content-length".equalsIgnoreCase(name)) {
- super.setHeader(name, value);
- }
- }
-
- @Override
- public void flushBuffer() throws IOException {
- if (writer != null) {
- writer.flush();
- }
- if (os != null && (os instanceof GZIPResponseStream)) {
- ((GZIPResponseStream)os).finish();
- } else {
- getResponse().flushBuffer();
- }
- }
-
- @Override
- public void reset() {
- super.reset();
- if (os != null && (os instanceof GZIPResponseStream)) {
- ((GZIPResponseStream)os).resetBuffer();
- }
- writer = null;
- os = null;
- compress = true;
- }
-
- @Override
- public void resetBuffer() {
- super.resetBuffer();
- if (os != null && (os instanceof GZIPResponseStream)) {
- ((GZIPResponseStream)os).resetBuffer();
- }
- writer = null;
- os = null;
- }
-
- @Override
- public void sendError(int status, String msg) throws IOException {
- resetBuffer();
- super.sendError(status, msg);
- }
-
- @Override
- public void sendError(int status) throws IOException {
- resetBuffer();
- super.sendError(status);
- }
-
- @Override
- public void sendRedirect(String location) throws IOException {
- resetBuffer();
- super.sendRedirect(location);
- }
-
- @Override
- public ServletOutputStream getOutputStream() throws IOException {
- if (os == null) {
- if (!response.isCommitted() && compress) {
- os = (ServletOutputStream)new GZIPResponseStream(response);
- } else {
- os = response.getOutputStream();
- }
- }
- return os;
- }
-
- @Override
- public PrintWriter getWriter() throws IOException {
- if (writer == null) {
- writer = new PrintWriter(getOutputStream());
- }
- return writer;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
deleted file mode 100644
index 4995b86..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.filter;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.StringTokenizer;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class GzipFilter implements Filter {
- private Set<String> mimeTypes = new HashSet<String>();
-
- @Override
- public void init(FilterConfig filterConfig) throws ServletException {
- String s = filterConfig.getInitParameter("mimeTypes");
- if (s != null) {
- StringTokenizer tok = new StringTokenizer(s, ",", false);
- while (tok.hasMoreTokens()) {
- mimeTypes.add(tok.nextToken());
- }
- }
- }
-
- @Override
- public void destroy() {
- }
-
- @Override
- public void doFilter(ServletRequest req, ServletResponse rsp,
- FilterChain chain) throws IOException, ServletException {
- HttpServletRequest request = (HttpServletRequest)req;
- HttpServletResponse response = (HttpServletResponse)rsp;
- String contentEncoding = request.getHeader("content-encoding");
- String acceptEncoding = request.getHeader("accept-encoding");
- String contentType = request.getHeader("content-type");
- if ((contentEncoding != null) &&
- (contentEncoding.toLowerCase().indexOf("gzip") > -1)) {
- request = new GZIPRequestWrapper(request);
- }
- if (((acceptEncoding != null) &&
- (acceptEncoding.toLowerCase().indexOf("gzip") > -1)) ||
- ((contentType != null) && mimeTypes.contains(contentType))) {
- response = new GZIPResponseWrapper(response);
- }
- chain.doFilter(request, response);
- if (response instanceof GZIPResponseWrapper) {
- OutputStream os = response.getOutputStream();
- if (os instanceof GZIPResponseStream) {
- ((GZIPResponseStream)os).finish();
- }
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
deleted file mode 100644
index 349d352..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlValue;
-
-import org.apache.hadoop.hbase.util.ByteStringer;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- * Representation of a cell. A cell is a single value associated a column and
- * optional qualifier, and either the timestamp when it was stored or the user-
- * provided timestamp if one was explicitly supplied.
- *
- * <pre>
- * <complexType name="Cell">
- * <sequence>
- * <element name="value" maxOccurs="1" minOccurs="1">
- * <simpleType>
- * <restriction base="base64Binary"/>
- * </simpleType>
- * </element>
- * </sequence>
- * <attribute name="column" type="base64Binary" />
- * <attribute name="timestamp" type="int" />
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="Cell")
-@XmlAccessorType(XmlAccessType.FIELD)
-@InterfaceAudience.Private
-public class CellModel implements ProtobufMessageHandler, Serializable {
- private static final long serialVersionUID = 1L;
-
- @JsonProperty("column")
- @XmlAttribute
- private byte[] column;
-
- @JsonProperty("timestamp")
- @XmlAttribute
- private long timestamp = HConstants.LATEST_TIMESTAMP;
-
- @JsonProperty("$")
- @XmlValue
- private byte[] value;
-
- /**
- * Default constructor
- */
- public CellModel() {}
-
- /**
- * Constructor
- * @param column
- * @param value
- */
- public CellModel(byte[] column, byte[] value) {
- this(column, HConstants.LATEST_TIMESTAMP, value);
- }
-
- /**
- * Constructor
- * @param column
- * @param qualifier
- * @param value
- */
- public CellModel(byte[] column, byte[] qualifier, byte[] value) {
- this(column, qualifier, HConstants.LATEST_TIMESTAMP, value);
- }
-
- /**
- * Constructor from KeyValue
- * @param kv
- */
- public CellModel(KeyValue kv) {
- this(kv.getFamily(), kv.getQualifier(), kv.getTimestamp(), kv.getValue());
- }
-
- /**
- * Constructor
- * @param column
- * @param timestamp
- * @param value
- */
- public CellModel(byte[] column, long timestamp, byte[] value) {
- this.column = column;
- this.timestamp = timestamp;
- this.value = value;
- }
-
- /**
- * Constructor
- * @param column
- * @param qualifier
- * @param timestamp
- * @param value
- */
- public CellModel(byte[] column, byte[] qualifier, long timestamp,
- byte[] value) {
- this.column = KeyValue.makeColumn(column, qualifier);
- this.timestamp = timestamp;
- this.value = value;
- }
-
- /**
- * @return the column
- */
- public byte[] getColumn() {
- return column;
- }
-
- /**
- * @param column the column to set
- */
- public void setColumn(byte[] column) {
- this.column = column;
- }
-
- /**
- * @return true if the timestamp property has been specified by the
- * user
- */
- public boolean hasUserTimestamp() {
- return timestamp != HConstants.LATEST_TIMESTAMP;
- }
-
- /**
- * @return the timestamp
- */
- public long getTimestamp() {
- return timestamp;
- }
-
- /**
- * @param timestamp the timestamp to set
- */
- public void setTimestamp(long timestamp) {
- this.timestamp = timestamp;
- }
-
- /**
- * @return the value
- */
- public byte[] getValue() {
- return value;
- }
-
- /**
- * @param value the value to set
- */
- public void setValue(byte[] value) {
- this.value = value;
- }
-
- @Override
- public byte[] createProtobufOutput() {
- Cell.Builder builder = Cell.newBuilder();
- builder.setColumn(ByteStringer.wrap(getColumn()));
- builder.setData(ByteStringer.wrap(getValue()));
- if (hasUserTimestamp()) {
- builder.setTimestamp(getTimestamp());
- }
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- Cell.Builder builder = Cell.newBuilder();
- builder.mergeFrom(message);
- setColumn(builder.getColumn().toByteArray());
- setValue(builder.getData().toByteArray());
- if (builder.hasTimestamp()) {
- setTimestamp(builder.getTimestamp());
- }
- return this;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
deleted file mode 100644
index 094da36..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.util.ByteStringer;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell;
-import org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet;
-
-/**
- * Representation of a grouping of cells. May contain cells from more than
- * one row. Encapsulates RowModel and CellModel models.
- *
- * <pre>
- * <complexType name="CellSet">
- * <sequence>
- * <element name="row" type="tns:Row" maxOccurs="unbounded"
- * minOccurs="1"></element>
- * </sequence>
- * </complexType>
- *
- * <complexType name="Row">
- * <sequence>
- * <element name="key" type="base64Binary"></element>
- * <element name="cell" type="tns:Cell"
- * maxOccurs="unbounded" minOccurs="1"></element>
- * </sequence>
- * </complexType>
- *
- * <complexType name="Cell">
- * <sequence>
- * <element name="value" maxOccurs="1" minOccurs="1">
- * <simpleType>
- * <restriction base="base64Binary"/>
- * </simpleType>
- * </element>
- * </sequence>
- * <attribute name="column" type="base64Binary" />
- * <attribute name="timestamp" type="int" />
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="CellSet")
-@XmlAccessorType(XmlAccessType.FIELD)
-@InterfaceAudience.Private
-public class CellSetModel implements Serializable, ProtobufMessageHandler {
-
- private static final long serialVersionUID = 1L;
-
- @XmlElement(name="Row")
- private List<RowModel> rows;
-
- /**
- * Constructor
- */
- public CellSetModel() {
- this.rows = new ArrayList<RowModel>();
- }
-
- /**
- * @param rows the rows
- */
- public CellSetModel(List<RowModel> rows) {
- super();
- this.rows = rows;
- }
-
- /**
- * Add a row to this cell set
- * @param row the row
- */
- public void addRow(RowModel row) {
- rows.add(row);
- }
-
- /**
- * @return the rows
- */
- public List<RowModel> getRows() {
- return rows;
- }
-
- @Override
- public byte[] createProtobufOutput() {
- CellSet.Builder builder = CellSet.newBuilder();
- for (RowModel row: getRows()) {
- CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder();
- rowBuilder.setKey(ByteStringer.wrap(row.getKey()));
- for (CellModel cell: row.getCells()) {
- Cell.Builder cellBuilder = Cell.newBuilder();
- cellBuilder.setColumn(ByteStringer.wrap(cell.getColumn()));
- cellBuilder.setData(ByteStringer.wrap(cell.getValue()));
- if (cell.hasUserTimestamp()) {
- cellBuilder.setTimestamp(cell.getTimestamp());
- }
- rowBuilder.addValues(cellBuilder);
- }
- builder.addRows(rowBuilder);
- }
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- CellSet.Builder builder = CellSet.newBuilder();
- builder.mergeFrom(message);
- for (CellSet.Row row: builder.getRowsList()) {
- RowModel rowModel = new RowModel(row.getKey().toByteArray());
- for (Cell cell: row.getValuesList()) {
- long timestamp = HConstants.LATEST_TIMESTAMP;
- if (cell.hasTimestamp()) {
- timestamp = cell.getTimestamp();
- }
- rowModel.addCell(
- new CellModel(cell.getColumn().toByteArray(), timestamp,
- cell.getData().toByteArray()));
- }
- addRow(rowModel);
- }
- return this;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
deleted file mode 100644
index ba0eed8..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.Serializable;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import javax.xml.bind.annotation.XmlAnyAttribute;
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.namespace.QName;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.codehaus.jackson.annotate.JsonAnyGetter;
-import org.codehaus.jackson.annotate.JsonAnySetter;
-
-/**
- * Representation of a column family schema.
- *
- * <pre>
- * <complexType name="ColumnSchema">
- * <attribute name="name" type="string"></attribute>
- * <anyAttribute></anyAttribute>
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="ColumnSchema")
-@InterfaceAudience.Private
-public class ColumnSchemaModel implements Serializable {
- private static final long serialVersionUID = 1L;
- private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE);
- private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE);
- private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER);
- private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION);
- private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
- private static QName TTL = new QName(HColumnDescriptor.TTL);
- private static QName VERSIONS = new QName(HConstants.VERSIONS);
-
- private String name;
- private Map<QName,Object> attrs = new LinkedHashMap<QName,Object>();
-
- /**
- * Default constructor
- */
- public ColumnSchemaModel() {}
-
- /**
- * Add an attribute to the column family schema
- * @param name the attribute name
- * @param value the attribute value
- */
- @JsonAnySetter
- public void addAttribute(String name, Object value) {
- attrs.put(new QName(name), value);
- }
-
- /**
- * @param name the attribute name
- * @return the attribute value
- */
- public String getAttribute(String name) {
- Object o = attrs.get(new QName(name));
- return o != null ? o.toString(): null;
- }
-
- /**
- * @return the column name
- */
- @XmlAttribute
- public String getName() {
- return name;
- }
-
- /**
- * @return the map for holding unspecified (user) attributes
- */
- @XmlAnyAttribute
- @JsonAnyGetter
- public Map<QName,Object> getAny() {
- return attrs;
- }
-
- /**
- * @param name the table name
- */
- public void setName(String name) {
- this.name = name;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append("{ NAME => '");
- sb.append(name);
- sb.append('\'');
- for (Map.Entry<QName,Object> e: attrs.entrySet()) {
- sb.append(", ");
- sb.append(e.getKey().getLocalPart());
- sb.append(" => '");
- sb.append(e.getValue().toString());
- sb.append('\'');
- }
- sb.append(" }");
- return sb.toString();
- }
-
- // getters and setters for common schema attributes
-
- // cannot be standard bean type getters and setters, otherwise this would
- // confuse JAXB
-
- /**
- * @return true if the BLOCKCACHE attribute is present and true
- */
- public boolean __getBlockcache() {
- Object o = attrs.get(BLOCKCACHE);
- return o != null ?
- Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE;
- }
-
- /**
- * @return the value of the BLOCKSIZE attribute or its default if it is unset
- */
- public int __getBlocksize() {
- Object o = attrs.get(BLOCKSIZE);
- return o != null ?
- Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE;
- }
-
- /**
- * @return the value of the BLOOMFILTER attribute or its default if unset
- */
- public String __getBloomfilter() {
- Object o = attrs.get(BLOOMFILTER);
- return o != null ? o.toString() : HColumnDescriptor.DEFAULT_BLOOMFILTER;
- }
-
- /**
- * @return the value of the COMPRESSION attribute or its default if unset
- */
- public String __getCompression() {
- Object o = attrs.get(COMPRESSION);
- return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION;
- }
-
- /**
- * @return true if the IN_MEMORY attribute is present and true
- */
- public boolean __getInMemory() {
- Object o = attrs.get(IN_MEMORY);
- return o != null ?
- Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY;
- }
-
- /**
- * @return the value of the TTL attribute or its default if it is unset
- */
- public int __getTTL() {
- Object o = attrs.get(TTL);
- return o != null ?
- Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_TTL;
- }
-
- /**
- * @return the value of the VERSIONS attribute or its default if it is unset
- */
- public int __getVersions() {
- Object o = attrs.get(VERSIONS);
- return o != null ?
- Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS;
- }
-
- /**
- * @param value the desired value of the BLOCKSIZE attribute
- */
- public void __setBlocksize(int value) {
- attrs.put(BLOCKSIZE, Integer.toString(value));
- }
-
- /**
- * @param value the desired value of the BLOCKCACHE attribute
- */
- public void __setBlockcache(boolean value) {
- attrs.put(BLOCKCACHE, Boolean.toString(value));
- }
-
- public void __setBloomfilter(String value) {
- attrs.put(BLOOMFILTER, value);
- }
-
- /**
- * @param value the desired value of the COMPRESSION attribute
- */
- public void __setCompression(String value) {
- attrs.put(COMPRESSION, value);
- }
-
- /**
- * @param value the desired value of the IN_MEMORY attribute
- */
- public void __setInMemory(boolean value) {
- attrs.put(IN_MEMORY, Boolean.toString(value));
- }
-
- /**
- * @param value the desired value of the TTL attribute
- */
- public void __setTTL(int value) {
- attrs.put(TTL, Integer.toString(value));
- }
-
- /**
- * @param value the desired value of the VERSIONS attribute
- */
- public void __setVersions(int value) {
- attrs.put(VERSIONS, Integer.toString(value));
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
deleted file mode 100644
index 596c754..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- * Representation of a row. A row is a related set of cells, grouped by common
- * row key. RowModels do not appear in results by themselves. They are always
- * encapsulated within CellSetModels.
- *
- * <pre>
- * <complexType name="Row">
- * <sequence>
- * <element name="key" type="base64Binary"></element>
- * <element name="cell" type="tns:Cell"
- * maxOccurs="unbounded" minOccurs="1"></element>
- * </sequence>
- * </complexType>
- * </pre>
- */
-@XmlRootElement(name="Row")
-@XmlAccessorType(XmlAccessType.FIELD)
-@InterfaceAudience.Private
-public class RowModel implements ProtobufMessageHandler, Serializable {
- private static final long serialVersionUID = 1L;
-
- @JsonProperty("key")
- @XmlAttribute
- private byte[] key;
-
- @JsonProperty("Cell")
- @XmlElement(name="Cell")
- private List<CellModel> cells = new ArrayList<CellModel>();
-
-
- /**
- * Default constructor
- */
- public RowModel() { }
-
- /**
- * Constructor
- * @param key the row key
- */
- public RowModel(final String key) {
- this(key.getBytes());
- }
-
- /**
- * Constructor
- * @param key the row key
- */
- public RowModel(final byte[] key) {
- this.key = key;
- cells = new ArrayList<CellModel>();
- }
-
- /**
- * Constructor
- * @param key the row key
- * @param cells the cells
- */
- public RowModel(final String key, final List<CellModel> cells) {
- this(key.getBytes(), cells);
- }
-
- /**
- * Constructor
- * @param key the row key
- * @param cells the cells
- */
- public RowModel(final byte[] key, final List<CellModel> cells) {
- this.key = key;
- this.cells = cells;
- }
-
- /**
- * Adds a cell to the list of cells for this row
- * @param cell the cell
- */
- public void addCell(CellModel cell) {
- cells.add(cell);
- }
-
- /**
- * @return the row key
- */
- public byte[] getKey() {
- return key;
- }
-
- /**
- * @param key the row key
- */
- public void setKey(byte[] key) {
- this.key = key;
- }
-
- /**
- * @return the cells
- */
- public List<CellModel> getCells() {
- return cells;
- }
-
- @Override
- public byte[] createProtobufOutput() {
- // there is no standalone row protobuf message
- throw new UnsupportedOperationException(
- "no protobuf equivalent to RowModel");
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- // there is no standalone row protobuf message
- throw new UnsupportedOperationException(
- "no protobuf equivalent to RowModel");
- }
-}
[03/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
deleted file mode 100644
index 0fc97e8..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
+++ /dev/null
@@ -1,999 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.ByteArrayInputStream;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
-import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.PrefixFilter;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.RegexStringComparator;
-import org.apache.hadoop.hbase.filter.RowFilter;
-import org.apache.hadoop.hbase.filter.SkipFilter;
-import org.apache.hadoop.hbase.filter.SubstringComparator;
-import org.apache.hadoop.hbase.filter.ValueFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.rest.model.ScannerModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import static org.junit.Assert.*;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestScannersWithFilters {
-
- private static final Log LOG = LogFactory.getLog(TestScannersWithFilters.class);
-
- private static final String TABLE = "TestScannersWithFilters";
-
- private static final byte [][] ROWS_ONE = {
- Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"),
- Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3")
- };
-
- private static final byte [][] ROWS_TWO = {
- Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"),
- Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3")
- };
-
- private static final byte [][] FAMILIES = {
- Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo")
- };
-
- private static final byte [][] QUALIFIERS_ONE = {
- Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"),
- Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3")
- };
-
- private static final byte [][] QUALIFIERS_TWO = {
- Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"),
- Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3")
- };
-
- private static final byte [][] VALUES = {
- Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo")
- };
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- private static Client client;
- private static JAXBContext context;
- private static Marshaller marshaller;
- private static Unmarshaller unmarshaller;
- private static long numRows = ROWS_ONE.length + ROWS_TWO.length;
- private static long colsPerRow = FAMILIES.length * QUALIFIERS_ONE.length;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- TEST_UTIL.startMiniCluster(3);
- REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
- context = JAXBContext.newInstance(
- CellModel.class,
- CellSetModel.class,
- RowModel.class,
- ScannerModel.class);
- marshaller = context.createMarshaller();
- unmarshaller = context.createUnmarshaller();
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (!admin.tableExists(TABLE)) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
- htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
- htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
- admin.createTable(htd);
- HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
- // Insert first half
- for(byte [] ROW : ROWS_ONE) {
- Put p = new Put(ROW);
- p.setDurability(Durability.SKIP_WAL);
- for(byte [] QUALIFIER : QUALIFIERS_ONE) {
- p.add(FAMILIES[0], QUALIFIER, VALUES[0]);
- }
- table.put(p);
- }
- for(byte [] ROW : ROWS_TWO) {
- Put p = new Put(ROW);
- p.setDurability(Durability.SKIP_WAL);
- for(byte [] QUALIFIER : QUALIFIERS_TWO) {
- p.add(FAMILIES[1], QUALIFIER, VALUES[1]);
- }
- table.put(p);
- }
-
- // Insert second half (reverse families)
- for(byte [] ROW : ROWS_ONE) {
- Put p = new Put(ROW);
- p.setDurability(Durability.SKIP_WAL);
- for(byte [] QUALIFIER : QUALIFIERS_ONE) {
- p.add(FAMILIES[1], QUALIFIER, VALUES[0]);
- }
- table.put(p);
- }
- for(byte [] ROW : ROWS_TWO) {
- Put p = new Put(ROW);
- p.setDurability(Durability.SKIP_WAL);
- for(byte [] QUALIFIER : QUALIFIERS_TWO) {
- p.add(FAMILIES[0], QUALIFIER, VALUES[1]);
- }
- table.put(p);
- }
-
- // Delete the second qualifier from all rows and families
- for(byte [] ROW : ROWS_ONE) {
- Delete d = new Delete(ROW);
- d.deleteColumns(FAMILIES[0], QUALIFIERS_ONE[1]);
- d.deleteColumns(FAMILIES[1], QUALIFIERS_ONE[1]);
- table.delete(d);
- }
- for(byte [] ROW : ROWS_TWO) {
- Delete d = new Delete(ROW);
- d.deleteColumns(FAMILIES[0], QUALIFIERS_TWO[1]);
- d.deleteColumns(FAMILIES[1], QUALIFIERS_TWO[1]);
- table.delete(d);
- }
- colsPerRow -= 2;
-
- // Delete the second rows from both groups, one column at a time
- for(byte [] QUALIFIER : QUALIFIERS_ONE) {
- Delete d = new Delete(ROWS_ONE[1]);
- d.deleteColumns(FAMILIES[0], QUALIFIER);
- d.deleteColumns(FAMILIES[1], QUALIFIER);
- table.delete(d);
- }
- for(byte [] QUALIFIER : QUALIFIERS_TWO) {
- Delete d = new Delete(ROWS_TWO[1]);
- d.deleteColumns(FAMILIES[0], QUALIFIER);
- d.deleteColumns(FAMILIES[1], QUALIFIER);
- table.delete(d);
- }
- numRows -= 2;
- table.close();
- }
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- private static void verifyScan(Scan s, long expectedRows, long expectedKeys)
- throws Exception {
- ScannerModel model = ScannerModel.fromScan(s);
- model.setBatch(Integer.MAX_VALUE); // fetch it all at once
- StringWriter writer = new StringWriter();
- marshaller.marshal(model, writer);
- LOG.debug(writer.toString());
- byte[] body = Bytes.toBytes(writer.toString());
- Response response = client.put("/" + TABLE + "/scanner",
- Constants.MIMETYPE_XML, body);
- assertEquals(response.getCode(), 201);
- String scannerURI = response.getLocation();
- assertNotNull(scannerURI);
-
- // get a cell set
- response = client.get(scannerURI, Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- CellSetModel cells = (CellSetModel)
- unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
-
- int rows = cells.getRows().size();
- assertTrue("Scanned too many rows! Only expected " + expectedRows +
- " total but scanned " + rows, expectedRows == rows);
- for (RowModel row: cells.getRows()) {
- int count = row.getCells().size();
- assertEquals("Expected " + expectedKeys + " keys per row but " +
- "returned " + count, expectedKeys, count);
- }
-
- // delete the scanner
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 200);
- }
-
- private static void verifyScanFull(Scan s, KeyValue [] kvs)
- throws Exception {
- ScannerModel model = ScannerModel.fromScan(s);
- model.setBatch(Integer.MAX_VALUE); // fetch it all at once
- StringWriter writer = new StringWriter();
- marshaller.marshal(model, writer);
- LOG.debug(writer.toString());
- byte[] body = Bytes.toBytes(writer.toString());
- Response response = client.put("/" + TABLE + "/scanner",
- Constants.MIMETYPE_XML, body);
- assertEquals(response.getCode(), 201);
- String scannerURI = response.getLocation();
- assertNotNull(scannerURI);
-
- // get a cell set
- response = client.get(scannerURI, Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- CellSetModel cellSet = (CellSetModel)
- unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
-
- // delete the scanner
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 200);
-
- int row = 0;
- int idx = 0;
- Iterator<RowModel> i = cellSet.getRows().iterator();
- for (boolean done = true; done; row++) {
- done = i.hasNext();
- if (!done) break;
- RowModel rowModel = i.next();
- List<CellModel> cells = rowModel.getCells();
- if (cells.isEmpty()) break;
- assertTrue("Scanned too many keys! Only expected " + kvs.length +
- " total but already scanned " + (cells.size() + idx),
- kvs.length >= idx + cells.size());
- for (CellModel cell: cells) {
- assertTrue("Row mismatch",
- Bytes.equals(rowModel.getKey(), kvs[idx].getRow()));
- byte[][] split = KeyValue.parseColumn(cell.getColumn());
- assertTrue("Family mismatch",
- Bytes.equals(split[0], kvs[idx].getFamily()));
- assertTrue("Qualifier mismatch",
- Bytes.equals(split[1], kvs[idx].getQualifier()));
- assertTrue("Value mismatch",
- Bytes.equals(cell.getValue(), kvs[idx].getValue()));
- idx++;
- }
- }
- assertEquals("Expected " + kvs.length + " total keys but scanned " + idx,
- kvs.length, idx);
- }
-
- private static void verifyScanNoEarlyOut(Scan s, long expectedRows,
- long expectedKeys) throws Exception {
- ScannerModel model = ScannerModel.fromScan(s);
- model.setBatch(Integer.MAX_VALUE); // fetch it all at once
- StringWriter writer = new StringWriter();
- marshaller.marshal(model, writer);
- LOG.debug(writer.toString());
- byte[] body = Bytes.toBytes(writer.toString());
- Response response = client.put("/" + TABLE + "/scanner",
- Constants.MIMETYPE_XML, body);
- assertEquals(response.getCode(), 201);
- String scannerURI = response.getLocation();
- assertNotNull(scannerURI);
-
- // get a cell set
- response = client.get(scannerURI, Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- CellSetModel cellSet = (CellSetModel)
- unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
-
- // delete the scanner
- response = client.delete(scannerURI);
- assertEquals(response.getCode(), 200);
-
- Iterator<RowModel> i = cellSet.getRows().iterator();
- int j = 0;
- for (boolean done = true; done; j++) {
- done = i.hasNext();
- if (!done) break;
- RowModel rowModel = i.next();
- List<CellModel> cells = rowModel.getCells();
- if (cells.isEmpty()) break;
- assertTrue("Scanned too many rows! Only expected " + expectedRows +
- " total but already scanned " + (j+1), expectedRows > j);
- assertEquals("Expected " + expectedKeys + " keys per row but " +
- "returned " + cells.size(), expectedKeys, cells.size());
- }
- assertEquals("Expected " + expectedRows + " rows but scanned " + j +
- " rows", expectedRows, j);
- }
-
- @Test
- public void testNoFilter() throws Exception {
- // No filter
- long expectedRows = numRows;
- long expectedKeys = colsPerRow;
-
- // Both families
- Scan s = new Scan();
- verifyScan(s, expectedRows, expectedKeys);
-
- // One family
- s = new Scan();
- s.addFamily(FAMILIES[0]);
- verifyScan(s, expectedRows, expectedKeys/2);
- }
-
- @Test
- public void testPrefixFilter() throws Exception {
- // Grab rows from group one (half of total)
- long expectedRows = numRows / 2;
- long expectedKeys = colsPerRow;
- Scan s = new Scan();
- s.setFilter(new PrefixFilter(Bytes.toBytes("testRowOne")));
- verifyScan(s, expectedRows, expectedKeys);
- }
-
- @Test
- public void testPageFilter() throws Exception {
- // KVs in first 6 rows
- KeyValue [] expectedKVs = {
- // testRowOne-0
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowOne-2
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowOne-3
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowTwo-0
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-2
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-3
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1])
- };
-
- // Grab all 6 rows
- long expectedRows = 6;
- long expectedKeys = colsPerRow;
- Scan s = new Scan();
- s.setFilter(new PageFilter(expectedRows));
- verifyScan(s, expectedRows, expectedKeys);
- s.setFilter(new PageFilter(expectedRows));
- verifyScanFull(s, expectedKVs);
-
- // Grab first 4 rows (6 cols per row)
- expectedRows = 4;
- expectedKeys = colsPerRow;
- s = new Scan();
- s.setFilter(new PageFilter(expectedRows));
- verifyScan(s, expectedRows, expectedKeys);
- s.setFilter(new PageFilter(expectedRows));
- verifyScanFull(s, Arrays.copyOf(expectedKVs, 24));
-
- // Grab first 2 rows
- expectedRows = 2;
- expectedKeys = colsPerRow;
- s = new Scan();
- s.setFilter(new PageFilter(expectedRows));
- verifyScan(s, expectedRows, expectedKeys);
- s.setFilter(new PageFilter(expectedRows));
- verifyScanFull(s, Arrays.copyOf(expectedKVs, 12));
-
- // Grab first row
- expectedRows = 1;
- expectedKeys = colsPerRow;
- s = new Scan();
- s.setFilter(new PageFilter(expectedRows));
- verifyScan(s, expectedRows, expectedKeys);
- s.setFilter(new PageFilter(expectedRows));
- verifyScanFull(s, Arrays.copyOf(expectedKVs, 6));
- }
-
- @Test
- public void testInclusiveStopFilter() throws Exception {
- // Grab rows from group one
-
- // If we just use start/stop row, we get total/2 - 1 rows
- long expectedRows = (numRows / 2) - 1;
- long expectedKeys = colsPerRow;
- Scan s = new Scan(Bytes.toBytes("testRowOne-0"),
- Bytes.toBytes("testRowOne-3"));
- verifyScan(s, expectedRows, expectedKeys);
-
- // Now use start row with inclusive stop filter
- expectedRows = numRows / 2;
- s = new Scan(Bytes.toBytes("testRowOne-0"));
- s.setFilter(new InclusiveStopFilter(Bytes.toBytes("testRowOne-3")));
- verifyScan(s, expectedRows, expectedKeys);
-
- // Grab rows from group two
-
- // If we just use start/stop row, we get total/2 - 1 rows
- expectedRows = (numRows / 2) - 1;
- expectedKeys = colsPerRow;
- s = new Scan(Bytes.toBytes("testRowTwo-0"),
- Bytes.toBytes("testRowTwo-3"));
- verifyScan(s, expectedRows, expectedKeys);
-
- // Now use start row with inclusive stop filter
- expectedRows = numRows / 2;
- s = new Scan(Bytes.toBytes("testRowTwo-0"));
- s.setFilter(new InclusiveStopFilter(Bytes.toBytes("testRowTwo-3")));
- verifyScan(s, expectedRows, expectedKeys);
- }
-
- @Test
- public void testQualifierFilter() throws Exception {
- // Match two keys (one from each family) in half the rows
- long expectedRows = numRows / 2;
- long expectedKeys = 2;
- Filter f = new QualifierFilter(CompareOp.EQUAL,
- new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
- Scan s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match keys less than same qualifier
- // Expect only two keys (one from each family) in half the rows
- expectedRows = numRows / 2;
- expectedKeys = 2;
- f = new QualifierFilter(CompareOp.LESS,
- new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match keys less than or equal
- // Expect four keys (two from each family) in half the rows
- expectedRows = numRows / 2;
- expectedKeys = 4;
- f = new QualifierFilter(CompareOp.LESS_OR_EQUAL,
- new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match keys not equal
- // Expect four keys (two from each family)
- // Only look in first group of rows
- expectedRows = numRows / 2;
- expectedKeys = 4;
- f = new QualifierFilter(CompareOp.NOT_EQUAL,
- new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
- s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo"));
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match keys greater or equal
- // Expect four keys (two from each family)
- // Only look in first group of rows
- expectedRows = numRows / 2;
- expectedKeys = 4;
- f = new QualifierFilter(CompareOp.GREATER_OR_EQUAL,
- new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
- s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo"));
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match keys greater
- // Expect two keys (one from each family)
- // Only look in first group of rows
- expectedRows = numRows / 2;
- expectedKeys = 2;
- f = new QualifierFilter(CompareOp.GREATER,
- new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
- s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo"));
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match keys not equal to
- // Look across rows and fully validate the keys and ordering
- // Expect varied numbers of keys, 4 per row in group one, 6 per row in
- // group two
- f = new QualifierFilter(CompareOp.NOT_EQUAL,
- new BinaryComparator(QUALIFIERS_ONE[2]));
- s = new Scan();
- s.setFilter(f);
-
- KeyValue [] kvs = {
- // testRowOne-0
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowOne-2
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowOne-3
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowTwo-0
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-2
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-3
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- };
- verifyScanFull(s, kvs);
-
- // Test across rows and groups with a regex
- // Filter out "test*-2"
- // Expect 4 keys per row across both groups
- f = new QualifierFilter(CompareOp.NOT_EQUAL,
- new RegexStringComparator("test.+-2"));
- s = new Scan();
- s.setFilter(f);
-
- kvs = new KeyValue [] {
- // testRowOne-0
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowOne-2
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowOne-3
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowTwo-0
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-2
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-3
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- };
- verifyScanFull(s, kvs);
- }
-
- @Test
- public void testRowFilter() throws Exception {
- // Match a single row, all keys
- long expectedRows = 1;
- long expectedKeys = colsPerRow;
- Filter f = new RowFilter(CompareOp.EQUAL,
- new BinaryComparator(Bytes.toBytes("testRowOne-2")));
- Scan s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match a two rows, one from each group, using regex
- expectedRows = 2;
- expectedKeys = colsPerRow;
- f = new RowFilter(CompareOp.EQUAL,
- new RegexStringComparator("testRow.+-2"));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match rows less than
- // Expect all keys in one row
- expectedRows = 1;
- expectedKeys = colsPerRow;
- f = new RowFilter(CompareOp.LESS,
- new BinaryComparator(Bytes.toBytes("testRowOne-2")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match rows less than or equal
- // Expect all keys in two rows
- expectedRows = 2;
- expectedKeys = colsPerRow;
- f = new RowFilter(CompareOp.LESS_OR_EQUAL,
- new BinaryComparator(Bytes.toBytes("testRowOne-2")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match rows not equal
- // Expect all keys in all but one row
- expectedRows = numRows - 1;
- expectedKeys = colsPerRow;
- f = new RowFilter(CompareOp.NOT_EQUAL,
- new BinaryComparator(Bytes.toBytes("testRowOne-2")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match keys greater or equal
- // Expect all keys in all but one row
- expectedRows = numRows - 1;
- expectedKeys = colsPerRow;
- f = new RowFilter(CompareOp.GREATER_OR_EQUAL,
- new BinaryComparator(Bytes.toBytes("testRowOne-2")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match keys greater
- // Expect all keys in all but two rows
- expectedRows = numRows - 2;
- expectedKeys = colsPerRow;
- f = new RowFilter(CompareOp.GREATER,
- new BinaryComparator(Bytes.toBytes("testRowOne-2")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match rows not equal to testRowTwo-2
- // Look across rows and fully validate the keys and ordering
- // Should see all keys in all rows but testRowTwo-2
- f = new RowFilter(CompareOp.NOT_EQUAL,
- new BinaryComparator(Bytes.toBytes("testRowOne-2")));
- s = new Scan();
- s.setFilter(f);
-
- KeyValue [] kvs = {
- // testRowOne-0
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowOne-3
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowTwo-0
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-2
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-3
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- };
- verifyScanFull(s, kvs);
-
- // Test across rows and groups with a regex
- // Filter out everything that doesn't match "*-2"
- // Expect all keys in two rows
- f = new RowFilter(CompareOp.EQUAL,
- new RegexStringComparator(".+-2"));
- s = new Scan();
- s.setFilter(f);
-
- kvs = new KeyValue [] {
- // testRowOne-2
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
- // testRowTwo-2
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1])
- };
- verifyScanFull(s, kvs);
- }
-
- @Test
- public void testValueFilter() throws Exception {
- // Match group one rows
- long expectedRows = numRows / 2;
- long expectedKeys = colsPerRow;
- Filter f = new ValueFilter(CompareOp.EQUAL,
- new BinaryComparator(Bytes.toBytes("testValueOne")));
- Scan s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match group two rows
- expectedRows = numRows / 2;
- expectedKeys = colsPerRow;
- f = new ValueFilter(CompareOp.EQUAL,
- new BinaryComparator(Bytes.toBytes("testValueTwo")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match all values using regex
- expectedRows = numRows;
- expectedKeys = colsPerRow;
- f = new ValueFilter(CompareOp.EQUAL,
- new RegexStringComparator("testValue((One)|(Two))"));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match values less than
- // Expect group one rows
- expectedRows = numRows / 2;
- expectedKeys = colsPerRow;
- f = new ValueFilter(CompareOp.LESS,
- new BinaryComparator(Bytes.toBytes("testValueTwo")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match values less than or equal
- // Expect all rows
- expectedRows = numRows;
- expectedKeys = colsPerRow;
- f = new ValueFilter(CompareOp.LESS_OR_EQUAL,
- new BinaryComparator(Bytes.toBytes("testValueTwo")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match values less than or equal
- // Expect group one rows
- expectedRows = numRows / 2;
- expectedKeys = colsPerRow;
- f = new ValueFilter(CompareOp.LESS_OR_EQUAL,
- new BinaryComparator(Bytes.toBytes("testValueOne")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match values not equal
- // Expect half the rows
- expectedRows = numRows / 2;
- expectedKeys = colsPerRow;
- f = new ValueFilter(CompareOp.NOT_EQUAL,
- new BinaryComparator(Bytes.toBytes("testValueOne")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match values greater or equal
- // Expect all rows
- expectedRows = numRows;
- expectedKeys = colsPerRow;
- f = new ValueFilter(CompareOp.GREATER_OR_EQUAL,
- new BinaryComparator(Bytes.toBytes("testValueOne")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match values greater
- // Expect half rows
- expectedRows = numRows / 2;
- expectedKeys = colsPerRow;
- f = new ValueFilter(CompareOp.GREATER,
- new BinaryComparator(Bytes.toBytes("testValueOne")));
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
-
- // Match values not equal to testValueOne
- // Look across rows and fully validate the keys and ordering
- // Should see all keys in all group two rows
- f = new ValueFilter(CompareOp.NOT_EQUAL,
- new BinaryComparator(Bytes.toBytes("testValueOne")));
- s = new Scan();
- s.setFilter(f);
-
- KeyValue [] kvs = {
- // testRowTwo-0
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-2
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-3
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- };
- verifyScanFull(s, kvs);
- }
-
- @Test
- public void testSkipFilter() throws Exception {
- // Test for qualifier regex: "testQualifierOne-2"
- // Should only get rows from second group, and all keys
- Filter f = new SkipFilter(new QualifierFilter(CompareOp.NOT_EQUAL,
- new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))));
- Scan s = new Scan();
- s.setFilter(f);
-
- KeyValue [] kvs = {
- // testRowTwo-0
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-2
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- // testRowTwo-3
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
- };
- verifyScanFull(s, kvs);
- }
-
- @Test
- public void testFilterList() throws Exception {
- // Test getting a single row, single key using Row, Qualifier, and Value
- // regular expression and substring filters
- // Use must pass all
- List<Filter> filters = new ArrayList<Filter>();
- filters.add(new RowFilter(CompareOp.EQUAL,
- new RegexStringComparator(".+-2")));
- filters.add(new QualifierFilter(CompareOp.EQUAL,
- new RegexStringComparator(".+-2")));
- filters.add(new ValueFilter(CompareOp.EQUAL,
- new SubstringComparator("One")));
- Filter f = new FilterList(Operator.MUST_PASS_ALL, filters);
- Scan s = new Scan();
- s.addFamily(FAMILIES[0]);
- s.setFilter(f);
- KeyValue [] kvs = {
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0])
- };
- verifyScanFull(s, kvs);
-
- // Test getting everything with a MUST_PASS_ONE filter including row, qf,
- // val, regular expression and substring filters
- filters.clear();
- filters.add(new RowFilter(CompareOp.EQUAL,
- new RegexStringComparator(".+Two.+")));
- filters.add(new QualifierFilter(CompareOp.EQUAL,
- new RegexStringComparator(".+-2")));
- filters.add(new ValueFilter(CompareOp.EQUAL,
- new SubstringComparator("One")));
- f = new FilterList(Operator.MUST_PASS_ONE, filters);
- s = new Scan();
- s.setFilter(f);
- verifyScanNoEarlyOut(s, numRows, colsPerRow);
- }
-
- @Test
- public void testFirstKeyOnlyFilter() throws Exception {
- Scan s = new Scan();
- s.setFilter(new FirstKeyOnlyFilter());
- // Expected KVs, the first KV from each of the remaining 6 rows
- KeyValue [] kvs = {
- new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
- new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
- new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1])
- };
- verifyScanFull(s, kvs);
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
deleted file mode 100644
index e6845f7..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */package org.apache.hadoop.hbase.rest;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.StringWriter;
-import java.security.PrivilegedExceptionAction;
-import java.util.Iterator;
-import java.util.Random;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.rest.model.ScannerModel;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.visibility.CellVisibility;
-import org.apache.hadoop.hbase.security.visibility.ScanLabelGenerator;
-import org.apache.hadoop.hbase.security.visibility.SimpleScanLabelGenerator;
-import org.apache.hadoop.hbase.security.visibility.VisibilityClient;
-import org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
-import org.apache.hadoop.hbase.security.visibility.VisibilityController;
-import org.apache.hadoop.hbase.security.visibility.VisibilityUtils;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestScannersWithLabels {
- private static final String TABLE = "TestScannersWithLabels";
- private static final String CFA = "a";
- private static final String CFB = "b";
- private static final String COLUMN_1 = CFA + ":1";
- private static final String COLUMN_2 = CFB + ":2";
- private final static String TOPSECRET = "topsecret";
- private final static String PUBLIC = "public";
- private final static String PRIVATE = "private";
- private final static String CONFIDENTIAL = "confidential";
- private final static String SECRET = "secret";
- private static User SUPERUSER;
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility();
- private static Client client;
- private static JAXBContext context;
- private static Marshaller marshaller;
- private static Unmarshaller unmarshaller;
- private static Configuration conf;
-
- private static int insertData(String tableName, String column, double prob) throws IOException {
- Random rng = new Random();
- int count = 0;
- HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
- byte[] k = new byte[3];
- byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
-
- for (int i = 0; i < 9; i++) {
- Put put = new Put(Bytes.toBytes("row" + i));
- put.setDurability(Durability.SKIP_WAL);
- put.add(famAndQf[0], famAndQf[1], k);
- put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
- + TOPSECRET));
- table.put(put);
- count++;
- }
- table.flushCommits();
- return count;
- }
-
- private static int countCellSet(CellSetModel model) {
- int count = 0;
- Iterator<RowModel> rows = model.getRows().iterator();
- while (rows.hasNext()) {
- RowModel row = rows.next();
- Iterator<CellModel> cells = row.getCells().iterator();
- while (cells.hasNext()) {
- cells.next();
- count++;
- }
- }
- return count;
- }
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- SUPERUSER = User.createUserForTesting(conf, "admin",
- new String[] { "supergroup" });
- conf = TEST_UTIL.getConfiguration();
- conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS,
- SimpleScanLabelGenerator.class, ScanLabelGenerator.class);
- conf.setInt("hfile.format.version", 3);
- conf.set("hbase.superuser", SUPERUSER.getShortName());
- conf.set("hbase.coprocessor.master.classes", VisibilityController.class.getName());
- conf.set("hbase.coprocessor.region.classes", VisibilityController.class.getName());
- TEST_UTIL.startMiniCluster(1);
- // Wait for the labels table to become available
- TEST_UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000);
- createLabels();
- setAuths();
- REST_TEST_UTIL.startServletContainer(conf);
- client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
- context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class,
- ScannerModel.class);
- marshaller = context.createMarshaller();
- unmarshaller = context.createUnmarshaller();
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (admin.tableExists(TABLE)) {
- return;
- }
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
- htd.addFamily(new HColumnDescriptor(CFA));
- htd.addFamily(new HColumnDescriptor(CFB));
- admin.createTable(htd);
- insertData(TABLE, COLUMN_1, 1.0);
- insertData(TABLE, COLUMN_2, 0.5);
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- private static void createLabels() throws IOException, InterruptedException {
- PrivilegedExceptionAction<VisibilityLabelsResponse> action = new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
- public VisibilityLabelsResponse run() throws Exception {
- String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, PUBLIC, TOPSECRET };
- try {
- VisibilityClient.addLabels(conf, labels);
- } catch (Throwable t) {
- throw new IOException(t);
- }
- return null;
- }
- };
- SUPERUSER.runAs(action);
- }
- private static void setAuths() throws Exception {
- String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, PUBLIC, TOPSECRET };
- try {
- VisibilityClient.setAuths(conf, labels, User.getCurrent().getShortName());
- } catch (Throwable t) {
- throw new IOException(t);
- }
- }
- @Test
- public void testSimpleScannerXMLWithLabelsThatReceivesNoData() throws IOException, JAXBException {
- final int BATCH_SIZE = 5;
- // new scanner
- ScannerModel model = new ScannerModel();
- model.setBatch(BATCH_SIZE);
- model.addColumn(Bytes.toBytes(COLUMN_1));
- model.addLabel(PUBLIC);
- StringWriter writer = new StringWriter();
- marshaller.marshal(model, writer);
- byte[] body = Bytes.toBytes(writer.toString());
- // recall previous put operation with read-only off
- conf.set("hbase.rest.readonly", "false");
- Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body);
- assertEquals(response.getCode(), 201);
- String scannerURI = response.getLocation();
- assertNotNull(scannerURI);
-
- // get a cell set
- response = client.get(scannerURI, Constants.MIMETYPE_XML);
- // Respond with 204 as there are no cells to be retrieved
- assertEquals(response.getCode(), 204);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- }
-
- @Test
- public void testSimpleScannerXMLWithLabelsThatReceivesData() throws IOException, JAXBException {
- // new scanner
- ScannerModel model = new ScannerModel();
- model.setBatch(5);
- model.addColumn(Bytes.toBytes(COLUMN_1));
- model.addLabel(SECRET);
- StringWriter writer = new StringWriter();
- marshaller.marshal(model, writer);
- byte[] body = Bytes.toBytes(writer.toString());
-
- // recall previous put operation with read-only off
- conf.set("hbase.rest.readonly", "false");
- Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body);
- assertEquals(response.getCode(), 201);
- String scannerURI = response.getLocation();
- assertNotNull(scannerURI);
-
- // get a cell set
- response = client.get(scannerURI, Constants.MIMETYPE_XML);
- // Respond with 204 as there are no cells to be retrieved
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- CellSetModel cellSet = (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response
- .getBody()));
- assertEquals(countCellSet(cellSet), 5);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
deleted file mode 100644
index ac3dfdb..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.StringWriter;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
-import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
-import org.apache.hadoop.hbase.rest.model.TestTableSchemaModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import static org.junit.Assert.*;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestSchemaResource {
- private static String TABLE1 = "TestSchemaResource1";
- private static String TABLE2 = "TestSchemaResource2";
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- private static Client client;
- private static JAXBContext context;
- private static Configuration conf;
- private static TestTableSchemaModel testTableSchemaModel;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- conf = TEST_UTIL.getConfiguration();
- TEST_UTIL.startMiniCluster();
- REST_TEST_UTIL.startServletContainer(conf);
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- testTableSchemaModel = new TestTableSchemaModel();
- context = JAXBContext.newInstance(
- ColumnSchemaModel.class,
- TableSchemaModel.class);
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- private static byte[] toXML(TableSchemaModel model) throws JAXBException {
- StringWriter writer = new StringWriter();
- context.createMarshaller().marshal(model, writer);
- return Bytes.toBytes(writer.toString());
- }
-
- private static TableSchemaModel fromXML(byte[] content)
- throws JAXBException {
- return (TableSchemaModel) context.createUnmarshaller()
- .unmarshal(new ByteArrayInputStream(content));
- }
-
- @Test
- public void testTableCreateAndDeleteXML() throws IOException, JAXBException {
- String schemaPath = "/" + TABLE1 + "/schema";
- TableSchemaModel model;
- Response response;
-
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- assertFalse(admin.tableExists(TABLE1));
-
- // create the table
- model = testTableSchemaModel.buildTestModel(TABLE1);
- testTableSchemaModel.checkModel(model, TABLE1);
- response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model));
- assertEquals(response.getCode(), 201);
-
- // recall the same put operation but in read-only mode
- conf.set("hbase.rest.readonly", "true");
- response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model));
- assertEquals(response.getCode(), 403);
-
- // retrieve the schema and validate it
- response = client.get(schemaPath, Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- model = fromXML(response.getBody());
- testTableSchemaModel.checkModel(model, TABLE1);
-
- // with json retrieve the schema and validate it
- response = client.get(schemaPath, Constants.MIMETYPE_JSON);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- model = testTableSchemaModel.fromJSON(Bytes.toString(response.getBody()));
- testTableSchemaModel.checkModel(model, TABLE1);
-
- // test delete schema operation is forbidden in read-only mode
- response = client.delete(schemaPath);
- assertEquals(response.getCode(), 403);
-
- // return read-only setting back to default
- conf.set("hbase.rest.readonly", "false");
-
- // delete the table and make sure HBase concurs
- response = client.delete(schemaPath);
- assertEquals(response.getCode(), 200);
- assertFalse(admin.tableExists(TABLE1));
- }
-
- @Test
- public void testTableCreateAndDeletePB() throws IOException, JAXBException {
- String schemaPath = "/" + TABLE2 + "/schema";
- TableSchemaModel model;
- Response response;
-
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- assertFalse(admin.tableExists(TABLE2));
-
- // create the table
- model = testTableSchemaModel.buildTestModel(TABLE2);
- testTableSchemaModel.checkModel(model, TABLE2);
- response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF,
- model.createProtobufOutput());
- assertEquals(response.getCode(), 201);
-
- // recall the same put operation but in read-only mode
- conf.set("hbase.rest.readonly", "true");
- response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF,
- model.createProtobufOutput());
- assertEquals(response.getCode(), 403);
-
- // retrieve the schema and validate it
- response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- model = new TableSchemaModel();
- model.getObjectFromMessage(response.getBody());
- testTableSchemaModel.checkModel(model, TABLE2);
-
- // retrieve the schema and validate it with alternate pbuf type
- response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF_IETF);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
- model = new TableSchemaModel();
- model.getObjectFromMessage(response.getBody());
- testTableSchemaModel.checkModel(model, TABLE2);
-
- // test delete schema operation is forbidden in read-only mode
- response = client.delete(schemaPath);
- assertEquals(response.getCode(), 403);
-
- // return read-only setting back to default
- conf.set("hbase.rest.readonly", "false");
-
- // delete the table and make sure HBase concurs
- response = client.delete(schemaPath);
- assertEquals(response.getCode(), 200);
- assertFalse(admin.tableExists(TABLE2));
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
deleted file mode 100644
index c9aa191..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import static org.junit.Assert.*;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestStatusResource {
- private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME+",,1");
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- private static Client client;
- private static JAXBContext context;
-
- private static void validate(StorageClusterStatusModel model) {
- assertNotNull(model);
- assertTrue(model.getRegions() + ">= 1", model.getRegions() >= 1);
- assertTrue(model.getRequests() >= 0);
- assertTrue(model.getAverageLoad() >= 0.0);
- assertNotNull(model.getLiveNodes());
- assertNotNull(model.getDeadNodes());
- assertFalse(model.getLiveNodes().isEmpty());
- boolean foundMeta = false;
- for (StorageClusterStatusModel.Node node: model.getLiveNodes()) {
- assertNotNull(node.getName());
- assertTrue(node.getStartCode() > 0L);
- assertTrue(node.getRequests() >= 0);
- for (StorageClusterStatusModel.Node.Region region: node.getRegions()) {
- if (Bytes.equals(region.getName(), META_REGION_NAME)) {
- foundMeta = true;
- }
- }
- }
- assertTrue(foundMeta);
- }
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- TEST_UTIL.startMiniCluster();
- REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- context = JAXBContext.newInstance(StorageClusterStatusModel.class);
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- @Test
- public void testGetClusterStatusXML() throws IOException, JAXBException {
- Response response = client.get("/status/cluster", Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- StorageClusterStatusModel model = (StorageClusterStatusModel)
- context.createUnmarshaller().unmarshal(
- new ByteArrayInputStream(response.getBody()));
- validate(model);
- }
-
- @Test
- public void testGetClusterStatusPB() throws IOException {
- Response response = client.get("/status/cluster", Constants.MIMETYPE_PROTOBUF);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- StorageClusterStatusModel model = new StorageClusterStatusModel();
- model.getObjectFromMessage(response.getBody());
- validate(model);
- response = client.get("/status/cluster", Constants.MIMETYPE_PROTOBUF_IETF);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
- model = new StorageClusterStatusModel();
- model.getObjectFromMessage(response.getBody());
- validate(model);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
deleted file mode 100644
index aac2ab7..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.Iterator;
-import java.util.Map;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.TableModel;
-import org.apache.hadoop.hbase.rest.model.TableInfoModel;
-import org.apache.hadoop.hbase.rest.model.TableListModel;
-import org.apache.hadoop.hbase.rest.model.TableRegionModel;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.util.StringUtils;
-
-import static org.junit.Assert.*;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestTableResource {
- private static final Log LOG = LogFactory.getLog(TestTableResource.class);
-
- private static String TABLE = "TestTableResource";
- private static String COLUMN_FAMILY = "test";
- private static String COLUMN = COLUMN_FAMILY + ":qualifier";
- private static Map<HRegionInfo, ServerName> regionMap;
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- private static Client client;
- private static JAXBContext context;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- TEST_UTIL.startMiniCluster(3);
- REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- context = JAXBContext.newInstance(
- TableModel.class,
- TableInfoModel.class,
- TableListModel.class,
- TableRegionModel.class);
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (admin.tableExists(TABLE)) {
- return;
- }
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
- htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
- admin.createTable(htd);
- HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
- byte[] k = new byte[3];
- byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN));
- for (byte b1 = 'a'; b1 < 'z'; b1++) {
- for (byte b2 = 'a'; b2 < 'z'; b2++) {
- for (byte b3 = 'a'; b3 < 'z'; b3++) {
- k[0] = b1;
- k[1] = b2;
- k[2] = b3;
- Put put = new Put(k);
- put.setDurability(Durability.SKIP_WAL);
- put.add(famAndQf[0], famAndQf[1], k);
- table.put(put);
- }
- }
- }
- table.flushCommits();
- // get the initial layout (should just be one region)
- Map<HRegionInfo, ServerName> m = table.getRegionLocations();
- assertEquals(m.size(), 1);
- // tell the master to split the table
- admin.split(TABLE);
- // give some time for the split to happen
-
- long timeout = System.currentTimeMillis() + (15 * 1000);
- while (System.currentTimeMillis() < timeout && m.size()!=2){
- try {
- Thread.sleep(250);
- } catch (InterruptedException e) {
- LOG.warn(StringUtils.stringifyException(e));
- }
- // check again
- m = table.getRegionLocations();
- }
-
- // should have two regions now
- assertEquals(m.size(), 2);
- regionMap = m;
- LOG.info("regions: " + regionMap);
- table.close();
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- private static void checkTableList(TableListModel model) {
- boolean found = false;
- Iterator<TableModel> tables = model.getTables().iterator();
- assertTrue(tables.hasNext());
- while (tables.hasNext()) {
- TableModel table = tables.next();
- if (table.getName().equals(TABLE)) {
- found = true;
- break;
- }
- }
- assertTrue(found);
- }
-
- void checkTableInfo(TableInfoModel model) {
- assertEquals(model.getName(), TABLE);
- Iterator<TableRegionModel> regions = model.getRegions().iterator();
- assertTrue(regions.hasNext());
- while (regions.hasNext()) {
- TableRegionModel region = regions.next();
- boolean found = false;
- for (Map.Entry<HRegionInfo, ServerName> e: regionMap.entrySet()) {
- HRegionInfo hri = e.getKey();
- String hriRegionName = hri.getRegionNameAsString();
- String regionName = region.getName();
- if (hriRegionName.equals(regionName)) {
- found = true;
- byte[] startKey = hri.getStartKey();
- byte[] endKey = hri.getEndKey();
- InetSocketAddress sa = new InetSocketAddress(e.getValue().getHostname(), e.getValue().getPort());
- String location = sa.getHostName() + ":" +
- Integer.valueOf(sa.getPort());
- assertEquals(hri.getRegionId(), region.getId());
- assertTrue(Bytes.equals(startKey, region.getStartKey()));
- assertTrue(Bytes.equals(endKey, region.getEndKey()));
- assertEquals(location, region.getLocation());
- break;
- }
- }
- assertTrue(found);
- }
- }
-
- @Test
- public void testTableListText() throws IOException {
- Response response = client.get("/", Constants.MIMETYPE_TEXT);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
- }
-
- @Test
- public void testTableListXML() throws IOException, JAXBException {
- Response response = client.get("/", Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- TableListModel model = (TableListModel)
- context.createUnmarshaller()
- .unmarshal(new ByteArrayInputStream(response.getBody()));
- checkTableList(model);
- }
-
- @Test
- public void testTableListJSON() throws IOException {
- Response response = client.get("/", Constants.MIMETYPE_JSON);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- }
-
- @Test
- public void testTableListPB() throws IOException, JAXBException {
- Response response = client.get("/", Constants.MIMETYPE_PROTOBUF);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- TableListModel model = new TableListModel();
- model.getObjectFromMessage(response.getBody());
- checkTableList(model);
- response = client.get("/", Constants.MIMETYPE_PROTOBUF_IETF);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
- model = new TableListModel();
- model.getObjectFromMessage(response.getBody());
- checkTableList(model);
- }
-
- @Test
- public void testTableInfoText() throws IOException {
- Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_TEXT);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
- }
-
- @Test
- public void testTableInfoXML() throws IOException, JAXBException {
- Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_XML);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- TableInfoModel model = (TableInfoModel)
- context.createUnmarshaller()
- .unmarshal(new ByteArrayInputStream(response.getBody()));
- checkTableInfo(model);
- }
-
- @Test
- public void testTableInfoJSON() throws IOException {
- Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_JSON);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- }
-
- @Test
- public void testTableInfoPB() throws IOException, JAXBException {
- Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_PROTOBUF);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- TableInfoModel model = new TableInfoModel();
- model.getObjectFromMessage(response.getBody());
- checkTableInfo(model);
- response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_PROTOBUF_IETF);
- assertEquals(response.getCode(), 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
- model = new TableInfoModel();
- model.getObjectFromMessage(response.getBody());
- checkTableInfo(model);
- }
-
-}
-
[22/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
new file mode 100644
index 0000000..0fc97e8
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
@@ -0,0 +1,999 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.ByteArrayInputStream;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.Marshaller;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.hbase.filter.RegexStringComparator;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SkipFilter;
+import org.apache.hadoop.hbase.filter.SubstringComparator;
+import org.apache.hadoop.hbase.filter.ValueFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import static org.junit.Assert.*;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestScannersWithFilters {
+
+ private static final Log LOG = LogFactory.getLog(TestScannersWithFilters.class);
+
+ private static final String TABLE = "TestScannersWithFilters";
+
+ private static final byte [][] ROWS_ONE = {
+ Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"),
+ Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3")
+ };
+
+ private static final byte [][] ROWS_TWO = {
+ Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"),
+ Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3")
+ };
+
+ private static final byte [][] FAMILIES = {
+ Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo")
+ };
+
+ private static final byte [][] QUALIFIERS_ONE = {
+ Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"),
+ Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3")
+ };
+
+ private static final byte [][] QUALIFIERS_TWO = {
+ Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"),
+ Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3")
+ };
+
+ private static final byte [][] VALUES = {
+ Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo")
+ };
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ private static Client client;
+ private static JAXBContext context;
+ private static Marshaller marshaller;
+ private static Unmarshaller unmarshaller;
+ private static long numRows = ROWS_ONE.length + ROWS_TWO.length;
+ private static long colsPerRow = FAMILIES.length * QUALIFIERS_ONE.length;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniCluster(3);
+ REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
+ context = JAXBContext.newInstance(
+ CellModel.class,
+ CellSetModel.class,
+ RowModel.class,
+ ScannerModel.class);
+ marshaller = context.createMarshaller();
+ unmarshaller = context.createUnmarshaller();
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (!admin.tableExists(TABLE)) {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
+ htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
+ htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
+ admin.createTable(htd);
+ HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
+ // Insert first half
+ for(byte [] ROW : ROWS_ONE) {
+ Put p = new Put(ROW);
+ p.setDurability(Durability.SKIP_WAL);
+ for(byte [] QUALIFIER : QUALIFIERS_ONE) {
+ p.add(FAMILIES[0], QUALIFIER, VALUES[0]);
+ }
+ table.put(p);
+ }
+ for(byte [] ROW : ROWS_TWO) {
+ Put p = new Put(ROW);
+ p.setDurability(Durability.SKIP_WAL);
+ for(byte [] QUALIFIER : QUALIFIERS_TWO) {
+ p.add(FAMILIES[1], QUALIFIER, VALUES[1]);
+ }
+ table.put(p);
+ }
+
+ // Insert second half (reverse families)
+ for(byte [] ROW : ROWS_ONE) {
+ Put p = new Put(ROW);
+ p.setDurability(Durability.SKIP_WAL);
+ for(byte [] QUALIFIER : QUALIFIERS_ONE) {
+ p.add(FAMILIES[1], QUALIFIER, VALUES[0]);
+ }
+ table.put(p);
+ }
+ for(byte [] ROW : ROWS_TWO) {
+ Put p = new Put(ROW);
+ p.setDurability(Durability.SKIP_WAL);
+ for(byte [] QUALIFIER : QUALIFIERS_TWO) {
+ p.add(FAMILIES[0], QUALIFIER, VALUES[1]);
+ }
+ table.put(p);
+ }
+
+ // Delete the second qualifier from all rows and families
+ for(byte [] ROW : ROWS_ONE) {
+ Delete d = new Delete(ROW);
+ d.deleteColumns(FAMILIES[0], QUALIFIERS_ONE[1]);
+ d.deleteColumns(FAMILIES[1], QUALIFIERS_ONE[1]);
+ table.delete(d);
+ }
+ for(byte [] ROW : ROWS_TWO) {
+ Delete d = new Delete(ROW);
+ d.deleteColumns(FAMILIES[0], QUALIFIERS_TWO[1]);
+ d.deleteColumns(FAMILIES[1], QUALIFIERS_TWO[1]);
+ table.delete(d);
+ }
+ colsPerRow -= 2;
+
+ // Delete the second rows from both groups, one column at a time
+ for(byte [] QUALIFIER : QUALIFIERS_ONE) {
+ Delete d = new Delete(ROWS_ONE[1]);
+ d.deleteColumns(FAMILIES[0], QUALIFIER);
+ d.deleteColumns(FAMILIES[1], QUALIFIER);
+ table.delete(d);
+ }
+ for(byte [] QUALIFIER : QUALIFIERS_TWO) {
+ Delete d = new Delete(ROWS_TWO[1]);
+ d.deleteColumns(FAMILIES[0], QUALIFIER);
+ d.deleteColumns(FAMILIES[1], QUALIFIER);
+ table.delete(d);
+ }
+ numRows -= 2;
+ table.close();
+ }
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ private static void verifyScan(Scan s, long expectedRows, long expectedKeys)
+ throws Exception {
+ ScannerModel model = ScannerModel.fromScan(s);
+ model.setBatch(Integer.MAX_VALUE); // fetch it all at once
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ LOG.debug(writer.toString());
+ byte[] body = Bytes.toBytes(writer.toString());
+ Response response = client.put("/" + TABLE + "/scanner",
+ Constants.MIMETYPE_XML, body);
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ CellSetModel cells = (CellSetModel)
+ unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+
+ int rows = cells.getRows().size();
+ assertTrue("Scanned too many rows! Only expected " + expectedRows +
+ " total but scanned " + rows, expectedRows == rows);
+ for (RowModel row: cells.getRows()) {
+ int count = row.getCells().size();
+ assertEquals("Expected " + expectedKeys + " keys per row but " +
+ "returned " + count, expectedKeys, count);
+ }
+
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ }
+
+ private static void verifyScanFull(Scan s, KeyValue [] kvs)
+ throws Exception {
+ ScannerModel model = ScannerModel.fromScan(s);
+ model.setBatch(Integer.MAX_VALUE); // fetch it all at once
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ LOG.debug(writer.toString());
+ byte[] body = Bytes.toBytes(writer.toString());
+ Response response = client.put("/" + TABLE + "/scanner",
+ Constants.MIMETYPE_XML, body);
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ CellSetModel cellSet = (CellSetModel)
+ unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+
+ int row = 0;
+ int idx = 0;
+ Iterator<RowModel> i = cellSet.getRows().iterator();
+ for (boolean done = true; done; row++) {
+ done = i.hasNext();
+ if (!done) break;
+ RowModel rowModel = i.next();
+ List<CellModel> cells = rowModel.getCells();
+ if (cells.isEmpty()) break;
+ assertTrue("Scanned too many keys! Only expected " + kvs.length +
+ " total but already scanned " + (cells.size() + idx),
+ kvs.length >= idx + cells.size());
+ for (CellModel cell: cells) {
+ assertTrue("Row mismatch",
+ Bytes.equals(rowModel.getKey(), kvs[idx].getRow()));
+ byte[][] split = KeyValue.parseColumn(cell.getColumn());
+ assertTrue("Family mismatch",
+ Bytes.equals(split[0], kvs[idx].getFamily()));
+ assertTrue("Qualifier mismatch",
+ Bytes.equals(split[1], kvs[idx].getQualifier()));
+ assertTrue("Value mismatch",
+ Bytes.equals(cell.getValue(), kvs[idx].getValue()));
+ idx++;
+ }
+ }
+ assertEquals("Expected " + kvs.length + " total keys but scanned " + idx,
+ kvs.length, idx);
+ }
+
+ private static void verifyScanNoEarlyOut(Scan s, long expectedRows,
+ long expectedKeys) throws Exception {
+ ScannerModel model = ScannerModel.fromScan(s);
+ model.setBatch(Integer.MAX_VALUE); // fetch it all at once
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ LOG.debug(writer.toString());
+ byte[] body = Bytes.toBytes(writer.toString());
+ Response response = client.put("/" + TABLE + "/scanner",
+ Constants.MIMETYPE_XML, body);
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ CellSetModel cellSet = (CellSetModel)
+ unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+
+ Iterator<RowModel> i = cellSet.getRows().iterator();
+ int j = 0;
+ for (boolean done = true; done; j++) {
+ done = i.hasNext();
+ if (!done) break;
+ RowModel rowModel = i.next();
+ List<CellModel> cells = rowModel.getCells();
+ if (cells.isEmpty()) break;
+ assertTrue("Scanned too many rows! Only expected " + expectedRows +
+ " total but already scanned " + (j+1), expectedRows > j);
+ assertEquals("Expected " + expectedKeys + " keys per row but " +
+ "returned " + cells.size(), expectedKeys, cells.size());
+ }
+ assertEquals("Expected " + expectedRows + " rows but scanned " + j +
+ " rows", expectedRows, j);
+ }
+
+ @Test
+ public void testNoFilter() throws Exception {
+ // No filter
+ long expectedRows = numRows;
+ long expectedKeys = colsPerRow;
+
+ // Both families
+ Scan s = new Scan();
+ verifyScan(s, expectedRows, expectedKeys);
+
+ // One family
+ s = new Scan();
+ s.addFamily(FAMILIES[0]);
+ verifyScan(s, expectedRows, expectedKeys/2);
+ }
+
+ @Test
+ public void testPrefixFilter() throws Exception {
+ // Grab rows from group one (half of total)
+ long expectedRows = numRows / 2;
+ long expectedKeys = colsPerRow;
+ Scan s = new Scan();
+ s.setFilter(new PrefixFilter(Bytes.toBytes("testRowOne")));
+ verifyScan(s, expectedRows, expectedKeys);
+ }
+
+ @Test
+ public void testPageFilter() throws Exception {
+ // KVs in first 6 rows
+ KeyValue [] expectedKVs = {
+ // testRowOne-0
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-2
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-3
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1])
+ };
+
+ // Grab all 6 rows
+ long expectedRows = 6;
+ long expectedKeys = colsPerRow;
+ Scan s = new Scan();
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScan(s, expectedRows, expectedKeys);
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScanFull(s, expectedKVs);
+
+ // Grab first 4 rows (6 cols per row)
+ expectedRows = 4;
+ expectedKeys = colsPerRow;
+ s = new Scan();
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScan(s, expectedRows, expectedKeys);
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScanFull(s, Arrays.copyOf(expectedKVs, 24));
+
+ // Grab first 2 rows
+ expectedRows = 2;
+ expectedKeys = colsPerRow;
+ s = new Scan();
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScan(s, expectedRows, expectedKeys);
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScanFull(s, Arrays.copyOf(expectedKVs, 12));
+
+ // Grab first row
+ expectedRows = 1;
+ expectedKeys = colsPerRow;
+ s = new Scan();
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScan(s, expectedRows, expectedKeys);
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScanFull(s, Arrays.copyOf(expectedKVs, 6));
+ }
+
+ @Test
+ public void testInclusiveStopFilter() throws Exception {
+ // Grab rows from group one
+
+ // If we just use start/stop row, we get total/2 - 1 rows
+ long expectedRows = (numRows / 2) - 1;
+ long expectedKeys = colsPerRow;
+ Scan s = new Scan(Bytes.toBytes("testRowOne-0"),
+ Bytes.toBytes("testRowOne-3"));
+ verifyScan(s, expectedRows, expectedKeys);
+
+ // Now use start row with inclusive stop filter
+ expectedRows = numRows / 2;
+ s = new Scan(Bytes.toBytes("testRowOne-0"));
+ s.setFilter(new InclusiveStopFilter(Bytes.toBytes("testRowOne-3")));
+ verifyScan(s, expectedRows, expectedKeys);
+
+ // Grab rows from group two
+
+ // If we just use start/stop row, we get total/2 - 1 rows
+ expectedRows = (numRows / 2) - 1;
+ expectedKeys = colsPerRow;
+ s = new Scan(Bytes.toBytes("testRowTwo-0"),
+ Bytes.toBytes("testRowTwo-3"));
+ verifyScan(s, expectedRows, expectedKeys);
+
+ // Now use start row with inclusive stop filter
+ expectedRows = numRows / 2;
+ s = new Scan(Bytes.toBytes("testRowTwo-0"));
+ s.setFilter(new InclusiveStopFilter(Bytes.toBytes("testRowTwo-3")));
+ verifyScan(s, expectedRows, expectedKeys);
+ }
+
+ @Test
+ public void testQualifierFilter() throws Exception {
+ // Match two keys (one from each family) in half the rows
+ long expectedRows = numRows / 2;
+ long expectedKeys = 2;
+ Filter f = new QualifierFilter(CompareOp.EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ Scan s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys less than same qualifier
+ // Expect only two keys (one from each family) in half the rows
+ expectedRows = numRows / 2;
+ expectedKeys = 2;
+ f = new QualifierFilter(CompareOp.LESS,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys less than or equal
+ // Expect four keys (two from each family) in half the rows
+ expectedRows = numRows / 2;
+ expectedKeys = 4;
+ f = new QualifierFilter(CompareOp.LESS_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys not equal
+ // Expect four keys (two from each family)
+ // Only look in first group of rows
+ expectedRows = numRows / 2;
+ expectedKeys = 4;
+ f = new QualifierFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo"));
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys greater or equal
+ // Expect four keys (two from each family)
+ // Only look in first group of rows
+ expectedRows = numRows / 2;
+ expectedKeys = 4;
+ f = new QualifierFilter(CompareOp.GREATER_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo"));
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys greater
+ // Expect two keys (one from each family)
+ // Only look in first group of rows
+ expectedRows = numRows / 2;
+ expectedKeys = 2;
+ f = new QualifierFilter(CompareOp.GREATER,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo"));
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys not equal to
+ // Look across rows and fully validate the keys and ordering
+ // Expect varied numbers of keys, 4 per row in group one, 6 per row in
+ // group two
+ f = new QualifierFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(QUALIFIERS_ONE[2]));
+ s = new Scan();
+ s.setFilter(f);
+
+ KeyValue [] kvs = {
+ // testRowOne-0
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-2
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-3
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+
+ // Test across rows and groups with a regex
+ // Filter out "test*-2"
+ // Expect 4 keys per row across both groups
+ f = new QualifierFilter(CompareOp.NOT_EQUAL,
+ new RegexStringComparator("test.+-2"));
+ s = new Scan();
+ s.setFilter(f);
+
+ kvs = new KeyValue [] {
+ // testRowOne-0
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-2
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-3
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+ }
+
+ @Test
+ public void testRowFilter() throws Exception {
+ // Match a single row, all keys
+ long expectedRows = 1;
+ long expectedKeys = colsPerRow;
+ Filter f = new RowFilter(CompareOp.EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ Scan s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match a two rows, one from each group, using regex
+ expectedRows = 2;
+ expectedKeys = colsPerRow;
+ f = new RowFilter(CompareOp.EQUAL,
+ new RegexStringComparator("testRow.+-2"));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match rows less than
+ // Expect all keys in one row
+ expectedRows = 1;
+ expectedKeys = colsPerRow;
+ f = new RowFilter(CompareOp.LESS,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match rows less than or equal
+ // Expect all keys in two rows
+ expectedRows = 2;
+ expectedKeys = colsPerRow;
+ f = new RowFilter(CompareOp.LESS_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match rows not equal
+ // Expect all keys in all but one row
+ expectedRows = numRows - 1;
+ expectedKeys = colsPerRow;
+ f = new RowFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys greater or equal
+ // Expect all keys in all but one row
+ expectedRows = numRows - 1;
+ expectedKeys = colsPerRow;
+ f = new RowFilter(CompareOp.GREATER_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys greater
+ // Expect all keys in all but two rows
+ expectedRows = numRows - 2;
+ expectedKeys = colsPerRow;
+ f = new RowFilter(CompareOp.GREATER,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match rows not equal to testRowTwo-2
+ // Look across rows and fully validate the keys and ordering
+ // Should see all keys in all rows but testRowTwo-2
+ f = new RowFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+
+ KeyValue [] kvs = {
+ // testRowOne-0
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-3
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+
+ // Test across rows and groups with a regex
+ // Filter out everything that doesn't match "*-2"
+ // Expect all keys in two rows
+ f = new RowFilter(CompareOp.EQUAL,
+ new RegexStringComparator(".+-2"));
+ s = new Scan();
+ s.setFilter(f);
+
+ kvs = new KeyValue [] {
+ // testRowOne-2
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1])
+ };
+ verifyScanFull(s, kvs);
+ }
+
+ @Test
+ public void testValueFilter() throws Exception {
+ // Match group one rows
+ long expectedRows = numRows / 2;
+ long expectedKeys = colsPerRow;
+ Filter f = new ValueFilter(CompareOp.EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ Scan s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match group two rows
+ expectedRows = numRows / 2;
+ expectedKeys = colsPerRow;
+ f = new ValueFilter(CompareOp.EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueTwo")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match all values using regex
+ expectedRows = numRows;
+ expectedKeys = colsPerRow;
+ f = new ValueFilter(CompareOp.EQUAL,
+ new RegexStringComparator("testValue((One)|(Two))"));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values less than
+ // Expect group one rows
+ expectedRows = numRows / 2;
+ expectedKeys = colsPerRow;
+ f = new ValueFilter(CompareOp.LESS,
+ new BinaryComparator(Bytes.toBytes("testValueTwo")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values less than or equal
+ // Expect all rows
+ expectedRows = numRows;
+ expectedKeys = colsPerRow;
+ f = new ValueFilter(CompareOp.LESS_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueTwo")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values less than or equal
+ // Expect group one rows
+ expectedRows = numRows / 2;
+ expectedKeys = colsPerRow;
+ f = new ValueFilter(CompareOp.LESS_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values not equal
+ // Expect half the rows
+ expectedRows = numRows / 2;
+ expectedKeys = colsPerRow;
+ f = new ValueFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values greater or equal
+ // Expect all rows
+ expectedRows = numRows;
+ expectedKeys = colsPerRow;
+ f = new ValueFilter(CompareOp.GREATER_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values greater
+ // Expect half rows
+ expectedRows = numRows / 2;
+ expectedKeys = colsPerRow;
+ f = new ValueFilter(CompareOp.GREATER,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values not equal to testValueOne
+ // Look across rows and fully validate the keys and ordering
+ // Should see all keys in all group two rows
+ f = new ValueFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+
+ KeyValue [] kvs = {
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+ }
+
+ @Test
+ public void testSkipFilter() throws Exception {
+ // Test for qualifier regex: "testQualifierOne-2"
+ // Should only get rows from second group, and all keys
+ Filter f = new SkipFilter(new QualifierFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))));
+ Scan s = new Scan();
+ s.setFilter(f);
+
+ KeyValue [] kvs = {
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+ }
+
+ @Test
+ public void testFilterList() throws Exception {
+ // Test getting a single row, single key using Row, Qualifier, and Value
+ // regular expression and substring filters
+ // Use must pass all
+ List<Filter> filters = new ArrayList<Filter>();
+ filters.add(new RowFilter(CompareOp.EQUAL,
+ new RegexStringComparator(".+-2")));
+ filters.add(new QualifierFilter(CompareOp.EQUAL,
+ new RegexStringComparator(".+-2")));
+ filters.add(new ValueFilter(CompareOp.EQUAL,
+ new SubstringComparator("One")));
+ Filter f = new FilterList(Operator.MUST_PASS_ALL, filters);
+ Scan s = new Scan();
+ s.addFamily(FAMILIES[0]);
+ s.setFilter(f);
+ KeyValue [] kvs = {
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0])
+ };
+ verifyScanFull(s, kvs);
+
+ // Test getting everything with a MUST_PASS_ONE filter including row, qf,
+ // val, regular expression and substring filters
+ filters.clear();
+ filters.add(new RowFilter(CompareOp.EQUAL,
+ new RegexStringComparator(".+Two.+")));
+ filters.add(new QualifierFilter(CompareOp.EQUAL,
+ new RegexStringComparator(".+-2")));
+ filters.add(new ValueFilter(CompareOp.EQUAL,
+ new SubstringComparator("One")));
+ f = new FilterList(Operator.MUST_PASS_ONE, filters);
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, numRows, colsPerRow);
+ }
+
+ @Test
+ public void testFirstKeyOnlyFilter() throws Exception {
+ Scan s = new Scan();
+ s.setFilter(new FirstKeyOnlyFilter());
+ // Expected KVs, the first KV from each of the remaining 6 rows
+ KeyValue [] kvs = {
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1])
+ };
+ verifyScanFull(s, kvs);
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
new file mode 100644
index 0000000..e6845f7
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */package org.apache.hadoop.hbase.rest;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.security.PrivilegedExceptionAction;
+import java.util.Iterator;
+import java.util.Random;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.visibility.CellVisibility;
+import org.apache.hadoop.hbase.security.visibility.ScanLabelGenerator;
+import org.apache.hadoop.hbase.security.visibility.SimpleScanLabelGenerator;
+import org.apache.hadoop.hbase.security.visibility.VisibilityClient;
+import org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
+import org.apache.hadoop.hbase.security.visibility.VisibilityController;
+import org.apache.hadoop.hbase.security.visibility.VisibilityUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestScannersWithLabels {
+ private static final String TABLE = "TestScannersWithLabels";
+ private static final String CFA = "a";
+ private static final String CFB = "b";
+ private static final String COLUMN_1 = CFA + ":1";
+ private static final String COLUMN_2 = CFB + ":2";
+ private final static String TOPSECRET = "topsecret";
+ private final static String PUBLIC = "public";
+ private final static String PRIVATE = "private";
+ private final static String CONFIDENTIAL = "confidential";
+ private final static String SECRET = "secret";
+ private static User SUPERUSER;
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility();
+ private static Client client;
+ private static JAXBContext context;
+ private static Marshaller marshaller;
+ private static Unmarshaller unmarshaller;
+ private static Configuration conf;
+
+ private static int insertData(String tableName, String column, double prob) throws IOException {
+ Random rng = new Random();
+ int count = 0;
+ HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
+ byte[] k = new byte[3];
+ byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
+
+ for (int i = 0; i < 9; i++) {
+ Put put = new Put(Bytes.toBytes("row" + i));
+ put.setDurability(Durability.SKIP_WAL);
+ put.add(famAndQf[0], famAndQf[1], k);
+ put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
+ + TOPSECRET));
+ table.put(put);
+ count++;
+ }
+ table.flushCommits();
+ return count;
+ }
+
+ private static int countCellSet(CellSetModel model) {
+ int count = 0;
+ Iterator<RowModel> rows = model.getRows().iterator();
+ while (rows.hasNext()) {
+ RowModel row = rows.next();
+ Iterator<CellModel> cells = row.getCells().iterator();
+ while (cells.hasNext()) {
+ cells.next();
+ count++;
+ }
+ }
+ return count;
+ }
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ SUPERUSER = User.createUserForTesting(conf, "admin",
+ new String[] { "supergroup" });
+ conf = TEST_UTIL.getConfiguration();
+ conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS,
+ SimpleScanLabelGenerator.class, ScanLabelGenerator.class);
+ conf.setInt("hfile.format.version", 3);
+ conf.set("hbase.superuser", SUPERUSER.getShortName());
+ conf.set("hbase.coprocessor.master.classes", VisibilityController.class.getName());
+ conf.set("hbase.coprocessor.region.classes", VisibilityController.class.getName());
+ TEST_UTIL.startMiniCluster(1);
+ // Wait for the labels table to become available
+ TEST_UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000);
+ createLabels();
+ setAuths();
+ REST_TEST_UTIL.startServletContainer(conf);
+ client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
+ context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class,
+ ScannerModel.class);
+ marshaller = context.createMarshaller();
+ unmarshaller = context.createUnmarshaller();
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (admin.tableExists(TABLE)) {
+ return;
+ }
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
+ htd.addFamily(new HColumnDescriptor(CFA));
+ htd.addFamily(new HColumnDescriptor(CFB));
+ admin.createTable(htd);
+ insertData(TABLE, COLUMN_1, 1.0);
+ insertData(TABLE, COLUMN_2, 0.5);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ private static void createLabels() throws IOException, InterruptedException {
+ PrivilegedExceptionAction<VisibilityLabelsResponse> action = new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
+ public VisibilityLabelsResponse run() throws Exception {
+ String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, PUBLIC, TOPSECRET };
+ try {
+ VisibilityClient.addLabels(conf, labels);
+ } catch (Throwable t) {
+ throw new IOException(t);
+ }
+ return null;
+ }
+ };
+ SUPERUSER.runAs(action);
+ }
+ private static void setAuths() throws Exception {
+ String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, PUBLIC, TOPSECRET };
+ try {
+ VisibilityClient.setAuths(conf, labels, User.getCurrent().getShortName());
+ } catch (Throwable t) {
+ throw new IOException(t);
+ }
+ }
+ @Test
+ public void testSimpleScannerXMLWithLabelsThatReceivesNoData() throws IOException, JAXBException {
+ final int BATCH_SIZE = 5;
+ // new scanner
+ ScannerModel model = new ScannerModel();
+ model.setBatch(BATCH_SIZE);
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+ model.addLabel(PUBLIC);
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ byte[] body = Bytes.toBytes(writer.toString());
+ // recall previous put operation with read-only off
+ conf.set("hbase.rest.readonly", "false");
+ Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body);
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, Constants.MIMETYPE_XML);
+ // Respond with 204 as there are no cells to be retrieved
+ assertEquals(response.getCode(), 204);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ }
+
+ @Test
+ public void testSimpleScannerXMLWithLabelsThatReceivesData() throws IOException, JAXBException {
+ // new scanner
+ ScannerModel model = new ScannerModel();
+ model.setBatch(5);
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+ model.addLabel(SECRET);
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ byte[] body = Bytes.toBytes(writer.toString());
+
+ // recall previous put operation with read-only off
+ conf.set("hbase.rest.readonly", "false");
+ Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body);
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, Constants.MIMETYPE_XML);
+ // Respond with 204 as there are no cells to be retrieved
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ CellSetModel cellSet = (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response
+ .getBody()));
+ assertEquals(countCellSet(cellSet), 5);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
new file mode 100644
index 0000000..ac3dfdb
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
@@ -0,0 +1,191 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+import org.apache.hadoop.hbase.rest.model.TestTableSchemaModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import static org.junit.Assert.*;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestSchemaResource {
+ private static String TABLE1 = "TestSchemaResource1";
+ private static String TABLE2 = "TestSchemaResource2";
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ private static Client client;
+ private static JAXBContext context;
+ private static Configuration conf;
+ private static TestTableSchemaModel testTableSchemaModel;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ TEST_UTIL.startMiniCluster();
+ REST_TEST_UTIL.startServletContainer(conf);
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ testTableSchemaModel = new TestTableSchemaModel();
+ context = JAXBContext.newInstance(
+ ColumnSchemaModel.class,
+ TableSchemaModel.class);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ private static byte[] toXML(TableSchemaModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return Bytes.toBytes(writer.toString());
+ }
+
+ private static TableSchemaModel fromXML(byte[] content)
+ throws JAXBException {
+ return (TableSchemaModel) context.createUnmarshaller()
+ .unmarshal(new ByteArrayInputStream(content));
+ }
+
+ @Test
+ public void testTableCreateAndDeleteXML() throws IOException, JAXBException {
+ String schemaPath = "/" + TABLE1 + "/schema";
+ TableSchemaModel model;
+ Response response;
+
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ assertFalse(admin.tableExists(TABLE1));
+
+ // create the table
+ model = testTableSchemaModel.buildTestModel(TABLE1);
+ testTableSchemaModel.checkModel(model, TABLE1);
+ response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model));
+ assertEquals(response.getCode(), 201);
+
+ // recall the same put operation but in read-only mode
+ conf.set("hbase.rest.readonly", "true");
+ response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model));
+ assertEquals(response.getCode(), 403);
+
+ // retrieve the schema and validate it
+ response = client.get(schemaPath, Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ model = fromXML(response.getBody());
+ testTableSchemaModel.checkModel(model, TABLE1);
+
+ // with json retrieve the schema and validate it
+ response = client.get(schemaPath, Constants.MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ model = testTableSchemaModel.fromJSON(Bytes.toString(response.getBody()));
+ testTableSchemaModel.checkModel(model, TABLE1);
+
+ // test delete schema operation is forbidden in read-only mode
+ response = client.delete(schemaPath);
+ assertEquals(response.getCode(), 403);
+
+ // return read-only setting back to default
+ conf.set("hbase.rest.readonly", "false");
+
+ // delete the table and make sure HBase concurs
+ response = client.delete(schemaPath);
+ assertEquals(response.getCode(), 200);
+ assertFalse(admin.tableExists(TABLE1));
+ }
+
+ @Test
+ public void testTableCreateAndDeletePB() throws IOException, JAXBException {
+ String schemaPath = "/" + TABLE2 + "/schema";
+ TableSchemaModel model;
+ Response response;
+
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ assertFalse(admin.tableExists(TABLE2));
+
+ // create the table
+ model = testTableSchemaModel.buildTestModel(TABLE2);
+ testTableSchemaModel.checkModel(model, TABLE2);
+ response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF,
+ model.createProtobufOutput());
+ assertEquals(response.getCode(), 201);
+
+ // recall the same put operation but in read-only mode
+ conf.set("hbase.rest.readonly", "true");
+ response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF,
+ model.createProtobufOutput());
+ assertEquals(response.getCode(), 403);
+
+ // retrieve the schema and validate it
+ response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ model = new TableSchemaModel();
+ model.getObjectFromMessage(response.getBody());
+ testTableSchemaModel.checkModel(model, TABLE2);
+
+ // retrieve the schema and validate it with alternate pbuf type
+ response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF_IETF);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
+ model = new TableSchemaModel();
+ model.getObjectFromMessage(response.getBody());
+ testTableSchemaModel.checkModel(model, TABLE2);
+
+ // test delete schema operation is forbidden in read-only mode
+ response = client.delete(schemaPath);
+ assertEquals(response.getCode(), 403);
+
+ // return read-only setting back to default
+ conf.set("hbase.rest.readonly", "false");
+
+ // delete the table and make sure HBase concurs
+ response = client.delete(schemaPath);
+ assertEquals(response.getCode(), 200);
+ assertFalse(admin.tableExists(TABLE2));
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
new file mode 100644
index 0000000..c9aa191
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
@@ -0,0 +1,117 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import static org.junit.Assert.*;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestStatusResource {
+ private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME+",,1");
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ private static Client client;
+ private static JAXBContext context;
+
+ private static void validate(StorageClusterStatusModel model) {
+ assertNotNull(model);
+ assertTrue(model.getRegions() + ">= 1", model.getRegions() >= 1);
+ assertTrue(model.getRequests() >= 0);
+ assertTrue(model.getAverageLoad() >= 0.0);
+ assertNotNull(model.getLiveNodes());
+ assertNotNull(model.getDeadNodes());
+ assertFalse(model.getLiveNodes().isEmpty());
+ boolean foundMeta = false;
+ for (StorageClusterStatusModel.Node node: model.getLiveNodes()) {
+ assertNotNull(node.getName());
+ assertTrue(node.getStartCode() > 0L);
+ assertTrue(node.getRequests() >= 0);
+ for (StorageClusterStatusModel.Node.Region region: node.getRegions()) {
+ if (Bytes.equals(region.getName(), META_REGION_NAME)) {
+ foundMeta = true;
+ }
+ }
+ }
+ assertTrue(foundMeta);
+ }
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniCluster();
+ REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ context = JAXBContext.newInstance(StorageClusterStatusModel.class);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testGetClusterStatusXML() throws IOException, JAXBException {
+ Response response = client.get("/status/cluster", Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ StorageClusterStatusModel model = (StorageClusterStatusModel)
+ context.createUnmarshaller().unmarshal(
+ new ByteArrayInputStream(response.getBody()));
+ validate(model);
+ }
+
+ @Test
+ public void testGetClusterStatusPB() throws IOException {
+ Response response = client.get("/status/cluster", Constants.MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ StorageClusterStatusModel model = new StorageClusterStatusModel();
+ model.getObjectFromMessage(response.getBody());
+ validate(model);
+ response = client.get("/status/cluster", Constants.MIMETYPE_PROTOBUF_IETF);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
+ model = new StorageClusterStatusModel();
+ model.getObjectFromMessage(response.getBody());
+ validate(model);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
new file mode 100644
index 0000000..aac2ab7
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
@@ -0,0 +1,262 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Iterator;
+import java.util.Map;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.TableModel;
+import org.apache.hadoop.hbase.rest.model.TableInfoModel;
+import org.apache.hadoop.hbase.rest.model.TableListModel;
+import org.apache.hadoop.hbase.rest.model.TableRegionModel;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.StringUtils;
+
+import static org.junit.Assert.*;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestTableResource {
+ private static final Log LOG = LogFactory.getLog(TestTableResource.class);
+
+ private static String TABLE = "TestTableResource";
+ private static String COLUMN_FAMILY = "test";
+ private static String COLUMN = COLUMN_FAMILY + ":qualifier";
+ private static Map<HRegionInfo, ServerName> regionMap;
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ private static Client client;
+ private static JAXBContext context;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniCluster(3);
+ REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ context = JAXBContext.newInstance(
+ TableModel.class,
+ TableInfoModel.class,
+ TableListModel.class,
+ TableRegionModel.class);
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (admin.tableExists(TABLE)) {
+ return;
+ }
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
+ htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
+ admin.createTable(htd);
+ HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
+ byte[] k = new byte[3];
+ byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN));
+ for (byte b1 = 'a'; b1 < 'z'; b1++) {
+ for (byte b2 = 'a'; b2 < 'z'; b2++) {
+ for (byte b3 = 'a'; b3 < 'z'; b3++) {
+ k[0] = b1;
+ k[1] = b2;
+ k[2] = b3;
+ Put put = new Put(k);
+ put.setDurability(Durability.SKIP_WAL);
+ put.add(famAndQf[0], famAndQf[1], k);
+ table.put(put);
+ }
+ }
+ }
+ table.flushCommits();
+ // get the initial layout (should just be one region)
+ Map<HRegionInfo, ServerName> m = table.getRegionLocations();
+ assertEquals(m.size(), 1);
+ // tell the master to split the table
+ admin.split(TABLE);
+ // give some time for the split to happen
+
+ long timeout = System.currentTimeMillis() + (15 * 1000);
+ while (System.currentTimeMillis() < timeout && m.size()!=2){
+ try {
+ Thread.sleep(250);
+ } catch (InterruptedException e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ // check again
+ m = table.getRegionLocations();
+ }
+
+ // should have two regions now
+ assertEquals(m.size(), 2);
+ regionMap = m;
+ LOG.info("regions: " + regionMap);
+ table.close();
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ private static void checkTableList(TableListModel model) {
+ boolean found = false;
+ Iterator<TableModel> tables = model.getTables().iterator();
+ assertTrue(tables.hasNext());
+ while (tables.hasNext()) {
+ TableModel table = tables.next();
+ if (table.getName().equals(TABLE)) {
+ found = true;
+ break;
+ }
+ }
+ assertTrue(found);
+ }
+
+ void checkTableInfo(TableInfoModel model) {
+ assertEquals(model.getName(), TABLE);
+ Iterator<TableRegionModel> regions = model.getRegions().iterator();
+ assertTrue(regions.hasNext());
+ while (regions.hasNext()) {
+ TableRegionModel region = regions.next();
+ boolean found = false;
+ for (Map.Entry<HRegionInfo, ServerName> e: regionMap.entrySet()) {
+ HRegionInfo hri = e.getKey();
+ String hriRegionName = hri.getRegionNameAsString();
+ String regionName = region.getName();
+ if (hriRegionName.equals(regionName)) {
+ found = true;
+ byte[] startKey = hri.getStartKey();
+ byte[] endKey = hri.getEndKey();
+ InetSocketAddress sa = new InetSocketAddress(e.getValue().getHostname(), e.getValue().getPort());
+ String location = sa.getHostName() + ":" +
+ Integer.valueOf(sa.getPort());
+ assertEquals(hri.getRegionId(), region.getId());
+ assertTrue(Bytes.equals(startKey, region.getStartKey()));
+ assertTrue(Bytes.equals(endKey, region.getEndKey()));
+ assertEquals(location, region.getLocation());
+ break;
+ }
+ }
+ assertTrue(found);
+ }
+ }
+
+ @Test
+ public void testTableListText() throws IOException {
+ Response response = client.get("/", Constants.MIMETYPE_TEXT);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
+ }
+
+ @Test
+ public void testTableListXML() throws IOException, JAXBException {
+ Response response = client.get("/", Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ TableListModel model = (TableListModel)
+ context.createUnmarshaller()
+ .unmarshal(new ByteArrayInputStream(response.getBody()));
+ checkTableList(model);
+ }
+
+ @Test
+ public void testTableListJSON() throws IOException {
+ Response response = client.get("/", Constants.MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ }
+
+ @Test
+ public void testTableListPB() throws IOException, JAXBException {
+ Response response = client.get("/", Constants.MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ TableListModel model = new TableListModel();
+ model.getObjectFromMessage(response.getBody());
+ checkTableList(model);
+ response = client.get("/", Constants.MIMETYPE_PROTOBUF_IETF);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
+ model = new TableListModel();
+ model.getObjectFromMessage(response.getBody());
+ checkTableList(model);
+ }
+
+ @Test
+ public void testTableInfoText() throws IOException {
+ Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_TEXT);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
+ }
+
+ @Test
+ public void testTableInfoXML() throws IOException, JAXBException {
+ Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ TableInfoModel model = (TableInfoModel)
+ context.createUnmarshaller()
+ .unmarshal(new ByteArrayInputStream(response.getBody()));
+ checkTableInfo(model);
+ }
+
+ @Test
+ public void testTableInfoJSON() throws IOException {
+ Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ }
+
+ @Test
+ public void testTableInfoPB() throws IOException, JAXBException {
+ Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ TableInfoModel model = new TableInfoModel();
+ model.getObjectFromMessage(response.getBody());
+ checkTableInfo(model);
+ response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_PROTOBUF_IETF);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
+ model = new TableInfoModel();
+ model.getObjectFromMessage(response.getBody());
+ checkTableInfo(model);
+ }
+
+}
+
[25/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java
new file mode 100644
index 0000000..590b0d3
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java
@@ -0,0 +1,1147 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: VersionMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class VersionMessage {
+ private VersionMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface VersionOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string restVersion = 1;
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ boolean hasRestVersion();
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ java.lang.String getRestVersion();
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getRestVersionBytes();
+
+ // optional string jvmVersion = 2;
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ boolean hasJvmVersion();
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ java.lang.String getJvmVersion();
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ com.google.protobuf.ByteString
+ getJvmVersionBytes();
+
+ // optional string osVersion = 3;
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ boolean hasOsVersion();
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ java.lang.String getOsVersion();
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ com.google.protobuf.ByteString
+ getOsVersionBytes();
+
+ // optional string serverVersion = 4;
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ boolean hasServerVersion();
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ java.lang.String getServerVersion();
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ com.google.protobuf.ByteString
+ getServerVersionBytes();
+
+ // optional string jerseyVersion = 5;
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ boolean hasJerseyVersion();
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ java.lang.String getJerseyVersion();
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ com.google.protobuf.ByteString
+ getJerseyVersionBytes();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Version}
+ */
+ public static final class Version extends
+ com.google.protobuf.GeneratedMessage
+ implements VersionOrBuilder {
+ // Use Version.newBuilder() to construct.
+ private Version(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Version(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Version defaultInstance;
+ public static Version getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Version getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Version(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ restVersion_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ jvmVersion_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ osVersion_ = input.readBytes();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ serverVersion_ = input.readBytes();
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000010;
+ jerseyVersion_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.class, org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Version> PARSER =
+ new com.google.protobuf.AbstractParser<Version>() {
+ public Version parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Version(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Version> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string restVersion = 1;
+ public static final int RESTVERSION_FIELD_NUMBER = 1;
+ private java.lang.Object restVersion_;
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ public boolean hasRestVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ public java.lang.String getRestVersion() {
+ java.lang.Object ref = restVersion_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ restVersion_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getRestVersionBytes() {
+ java.lang.Object ref = restVersion_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ restVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string jvmVersion = 2;
+ public static final int JVMVERSION_FIELD_NUMBER = 2;
+ private java.lang.Object jvmVersion_;
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ public boolean hasJvmVersion() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ public java.lang.String getJvmVersion() {
+ java.lang.Object ref = jvmVersion_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ jvmVersion_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getJvmVersionBytes() {
+ java.lang.Object ref = jvmVersion_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ jvmVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string osVersion = 3;
+ public static final int OSVERSION_FIELD_NUMBER = 3;
+ private java.lang.Object osVersion_;
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ public boolean hasOsVersion() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ public java.lang.String getOsVersion() {
+ java.lang.Object ref = osVersion_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ osVersion_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ public com.google.protobuf.ByteString
+ getOsVersionBytes() {
+ java.lang.Object ref = osVersion_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ osVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string serverVersion = 4;
+ public static final int SERVERVERSION_FIELD_NUMBER = 4;
+ private java.lang.Object serverVersion_;
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ public boolean hasServerVersion() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ public java.lang.String getServerVersion() {
+ java.lang.Object ref = serverVersion_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ serverVersion_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ public com.google.protobuf.ByteString
+ getServerVersionBytes() {
+ java.lang.Object ref = serverVersion_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ serverVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string jerseyVersion = 5;
+ public static final int JERSEYVERSION_FIELD_NUMBER = 5;
+ private java.lang.Object jerseyVersion_;
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ public boolean hasJerseyVersion() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ public java.lang.String getJerseyVersion() {
+ java.lang.Object ref = jerseyVersion_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ jerseyVersion_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ public com.google.protobuf.ByteString
+ getJerseyVersionBytes() {
+ java.lang.Object ref = jerseyVersion_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ jerseyVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ restVersion_ = "";
+ jvmVersion_ = "";
+ osVersion_ = "";
+ serverVersion_ = "";
+ jerseyVersion_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getRestVersionBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getJvmVersionBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getOsVersionBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, getServerVersionBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeBytes(5, getJerseyVersionBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getRestVersionBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getJvmVersionBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getOsVersionBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getServerVersionBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(5, getJerseyVersionBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Version}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.VersionOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.class, org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ restVersion_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ jvmVersion_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ osVersion_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ serverVersion_ = "";
+ bitField0_ = (bitField0_ & ~0x00000008);
+ jerseyVersion_ = "";
+ bitField0_ = (bitField0_ & ~0x00000010);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version result = new org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.restVersion_ = restVersion_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.jvmVersion_ = jvmVersion_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.osVersion_ = osVersion_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.serverVersion_ = serverVersion_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.jerseyVersion_ = jerseyVersion_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.getDefaultInstance()) return this;
+ if (other.hasRestVersion()) {
+ bitField0_ |= 0x00000001;
+ restVersion_ = other.restVersion_;
+ onChanged();
+ }
+ if (other.hasJvmVersion()) {
+ bitField0_ |= 0x00000002;
+ jvmVersion_ = other.jvmVersion_;
+ onChanged();
+ }
+ if (other.hasOsVersion()) {
+ bitField0_ |= 0x00000004;
+ osVersion_ = other.osVersion_;
+ onChanged();
+ }
+ if (other.hasServerVersion()) {
+ bitField0_ |= 0x00000008;
+ serverVersion_ = other.serverVersion_;
+ onChanged();
+ }
+ if (other.hasJerseyVersion()) {
+ bitField0_ |= 0x00000010;
+ jerseyVersion_ = other.jerseyVersion_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string restVersion = 1;
+ private java.lang.Object restVersion_ = "";
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ public boolean hasRestVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ public java.lang.String getRestVersion() {
+ java.lang.Object ref = restVersion_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ restVersion_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getRestVersionBytes() {
+ java.lang.Object ref = restVersion_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ restVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ public Builder setRestVersion(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ restVersion_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ public Builder clearRestVersion() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ restVersion_ = getDefaultInstance().getRestVersion();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string restVersion = 1;</code>
+ */
+ public Builder setRestVersionBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ restVersion_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string jvmVersion = 2;
+ private java.lang.Object jvmVersion_ = "";
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ public boolean hasJvmVersion() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ public java.lang.String getJvmVersion() {
+ java.lang.Object ref = jvmVersion_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ jvmVersion_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getJvmVersionBytes() {
+ java.lang.Object ref = jvmVersion_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ jvmVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ public Builder setJvmVersion(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ jvmVersion_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ public Builder clearJvmVersion() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ jvmVersion_ = getDefaultInstance().getJvmVersion();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string jvmVersion = 2;</code>
+ */
+ public Builder setJvmVersionBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ jvmVersion_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string osVersion = 3;
+ private java.lang.Object osVersion_ = "";
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ public boolean hasOsVersion() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ public java.lang.String getOsVersion() {
+ java.lang.Object ref = osVersion_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ osVersion_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ public com.google.protobuf.ByteString
+ getOsVersionBytes() {
+ java.lang.Object ref = osVersion_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ osVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ public Builder setOsVersion(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ osVersion_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ public Builder clearOsVersion() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ osVersion_ = getDefaultInstance().getOsVersion();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string osVersion = 3;</code>
+ */
+ public Builder setOsVersionBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ osVersion_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string serverVersion = 4;
+ private java.lang.Object serverVersion_ = "";
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ public boolean hasServerVersion() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ public java.lang.String getServerVersion() {
+ java.lang.Object ref = serverVersion_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ serverVersion_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ public com.google.protobuf.ByteString
+ getServerVersionBytes() {
+ java.lang.Object ref = serverVersion_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ serverVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ public Builder setServerVersion(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ serverVersion_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ public Builder clearServerVersion() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ serverVersion_ = getDefaultInstance().getServerVersion();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string serverVersion = 4;</code>
+ */
+ public Builder setServerVersionBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ serverVersion_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string jerseyVersion = 5;
+ private java.lang.Object jerseyVersion_ = "";
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ public boolean hasJerseyVersion() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ public java.lang.String getJerseyVersion() {
+ java.lang.Object ref = jerseyVersion_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ jerseyVersion_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ public com.google.protobuf.ByteString
+ getJerseyVersionBytes() {
+ java.lang.Object ref = jerseyVersion_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ jerseyVersion_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ public Builder setJerseyVersion(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ jerseyVersion_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ public Builder clearJerseyVersion() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ jerseyVersion_ = getDefaultInstance().getJerseyVersion();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string jerseyVersion = 5;</code>
+ */
+ public Builder setJerseyVersionBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ jerseyVersion_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Version)
+ }
+
+ static {
+ defaultInstance = new Version(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Version)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\024VersionMessage.proto\022/org.apache.hadoo" +
+ "p.hbase.rest.protobuf.generated\"s\n\007Versi" +
+ "on\022\023\n\013restVersion\030\001 \001(\t\022\022\n\njvmVersion\030\002 " +
+ "\001(\t\022\021\n\tosVersion\030\003 \001(\t\022\025\n\rserverVersion\030" +
+ "\004 \001(\t\022\025\n\rjerseyVersion\030\005 \001(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor,
+ new java.lang.String[] { "RestVersion", "JvmVersion", "OsVersion", "ServerVersion", "JerseyVersion", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java
new file mode 100644
index 0000000..3b8b8ca
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java
@@ -0,0 +1,89 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.provider;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.ws.rs.ext.ContextResolver;
+import javax.ws.rs.ext.Provider;
+import javax.xml.bind.JAXBContext;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
+import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
+import org.apache.hadoop.hbase.rest.model.TableInfoModel;
+import org.apache.hadoop.hbase.rest.model.TableListModel;
+import org.apache.hadoop.hbase.rest.model.TableModel;
+import org.apache.hadoop.hbase.rest.model.TableRegionModel;
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+import org.apache.hadoop.hbase.rest.model.VersionModel;
+
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.api.json.JSONJAXBContext;
+
+/**
+ * Plumbing for hooking up Jersey's JSON entity body encoding and decoding
+ * support to JAXB. Modify how the context is created (by using e.g. a
+ * different configuration builder) to control how JSON is processed and
+ * created.
+ */
+@Provider
+@InterfaceAudience.Private
+public class JAXBContextResolver implements ContextResolver<JAXBContext> {
+
+ private final JAXBContext context;
+
+ private final Set<Class<?>> types;
+
+ private final Class<?>[] cTypes = {
+ CellModel.class,
+ CellSetModel.class,
+ ColumnSchemaModel.class,
+ RowModel.class,
+ ScannerModel.class,
+ StorageClusterStatusModel.class,
+ StorageClusterVersionModel.class,
+ TableInfoModel.class,
+ TableListModel.class,
+ TableModel.class,
+ TableRegionModel.class,
+ TableSchemaModel.class,
+ VersionModel.class
+ };
+
+ @SuppressWarnings("unchecked")
+ public JAXBContextResolver() throws Exception {
+ this.types = new HashSet(Arrays.asList(cTypes));
+ this.context = new JSONJAXBContext(JSONConfiguration.natural().build(),
+ cTypes);
+ }
+
+ @Override
+ public JAXBContext getContext(Class<?> objectType) {
+ return (types.contains(objectType)) ? context : null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JacksonProvider.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JacksonProvider.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JacksonProvider.java
new file mode 100644
index 0000000..7791d02
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JacksonProvider.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.provider;
+
+import org.codehaus.jackson.jaxrs.JacksonJaxbJsonProvider;
+
+import javax.ws.rs.ext.Provider;
+
+//create a class in the defined resource package name
+//so it gets activated
+//Use jackson to take care of json
+//since it has better support for object
+//deserializaiton and less clunky to deal with
+@Provider
+public class JacksonProvider extends JacksonJaxbJsonProvider {
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
new file mode 100644
index 0000000..ec39db0
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
@@ -0,0 +1,88 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.provider.consumer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyReader;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.Constants;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+
+/**
+ * Adapter for hooking up Jersey content processing dispatch to
+ * ProtobufMessageHandler interface capable handlers for decoding protobuf input.
+ */
+@Provider
+@Consumes({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF})
+@InterfaceAudience.Private
+public class ProtobufMessageBodyConsumer
+ implements MessageBodyReader<ProtobufMessageHandler> {
+ private static final Log LOG =
+ LogFactory.getLog(ProtobufMessageBodyConsumer.class);
+
+ @Override
+ public boolean isReadable(Class<?> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ return ProtobufMessageHandler.class.isAssignableFrom(type);
+ }
+
+ @Override
+ public ProtobufMessageHandler readFrom(Class<ProtobufMessageHandler> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType,
+ MultivaluedMap<String, String> httpHeaders, InputStream inputStream)
+ throws IOException, WebApplicationException {
+ ProtobufMessageHandler obj = null;
+ try {
+ obj = type.newInstance();
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ byte[] buffer = new byte[4096];
+ int read;
+ do {
+ read = inputStream.read(buffer, 0, buffer.length);
+ if (read > 0) {
+ baos.write(buffer, 0, read);
+ }
+ } while (read > 0);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(getClass() + ": read " + baos.size() + " bytes from " +
+ inputStream);
+ }
+ obj = obj.getObjectFromMessage(baos.toByteArray());
+ } catch (InstantiationException e) {
+ throw new WebApplicationException(e);
+ } catch (IllegalAccessException e) {
+ throw new WebApplicationException(e);
+ }
+ return obj;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
new file mode 100644
index 0000000..523692a
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
@@ -0,0 +1,74 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.provider.producer;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.Constants;
+
+/**
+ * An adapter between Jersey and Object.toString(). Hooks up plain text output
+ * to the Jersey content handling framework.
+ * Jersey will first call getSize() to learn the number of bytes that will be
+ * sent, then writeTo to perform the actual I/O.
+ */
+@Provider
+@Produces(Constants.MIMETYPE_TEXT)
+@InterfaceAudience.Private
+public class PlainTextMessageBodyProducer
+ implements MessageBodyWriter<Object> {
+
+ private ThreadLocal<byte[]> buffer = new ThreadLocal<byte[]>();
+
+ @Override
+ public boolean isWriteable(Class<?> arg0, Type arg1, Annotation[] arg2,
+ MediaType arg3) {
+ return true;
+ }
+
+ @Override
+ public long getSize(Object object, Class<?> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ byte[] bytes = object.toString().getBytes();
+ buffer.set(bytes);
+ return bytes.length;
+ }
+
+ @Override
+ public void writeTo(Object object, Class<?> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType,
+ MultivaluedMap<String, Object> httpHeaders, OutputStream outStream)
+ throws IOException, WebApplicationException {
+ byte[] bytes = buffer.get();
+ outStream.write(bytes);
+ buffer.remove();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
new file mode 100644
index 0000000..6d737b5
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
@@ -0,0 +1,81 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.provider.producer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.Constants;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+
+/**
+ * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up
+ * protobuf output producing methods to the Jersey content handling framework.
+ * Jersey will first call getSize() to learn the number of bytes that will be
+ * sent, then writeTo to perform the actual I/O.
+ */
+@Provider
+@Produces({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF})
+@InterfaceAudience.Private
+public class ProtobufMessageBodyProducer
+ implements MessageBodyWriter<ProtobufMessageHandler> {
+
+ private ThreadLocal<byte[]> buffer = new ThreadLocal<byte[]>();
+
+ @Override
+ public boolean isWriteable(Class<?> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ return ProtobufMessageHandler.class.isAssignableFrom(type);
+ }
+
+ @Override
+ public long getSize(ProtobufMessageHandler m, Class<?> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ try {
+ baos.write(m.createProtobufOutput());
+ } catch (IOException e) {
+ return -1;
+ }
+ byte[] bytes = baos.toByteArray();
+ buffer.set(bytes);
+ return bytes.length;
+ }
+
+ public void writeTo(ProtobufMessageHandler m, Class<?> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType,
+ MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream)
+ throws IOException, WebApplicationException {
+ byte[] bytes = buffer.get();
+ entityStream.write(bytes);
+ buffer.remove();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/hbase-webapps/rest/index.html
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/hbase-webapps/rest/index.html b/hbase-rest/src/main/resources/hbase-webapps/rest/index.html
new file mode 100644
index 0000000..e4084b7
--- /dev/null
+++ b/hbase-rest/src/main/resources/hbase-webapps/rest/index.html
@@ -0,0 +1,20 @@
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<meta HTTP-EQUIV="REFRESH" content="0;url=/rest.jsp"/>
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp b/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp
new file mode 100644
index 0000000..810569c
--- /dev/null
+++ b/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp
@@ -0,0 +1,117 @@
+<%--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+--%>
+<%@ page contentType="text/html;charset=UTF-8"
+ import="org.apache.hadoop.conf.Configuration"
+ import="org.apache.hadoop.hbase.HBaseConfiguration"
+ import="org.apache.hadoop.hbase.util.VersionInfo"
+ import="java.util.Date"%>
+<%
+Configuration conf = (Configuration)getServletContext().getAttribute("hbase.conf");
+long startcode = conf.getLong("startcode", System.currentTimeMillis());
+String listenPort = conf.get("hbase.rest.port", "8080");
+%>
+<!--[if IE]>
+<!DOCTYPE html>
+<![endif]-->
+<?xml version="1.0" encoding="UTF-8" ?>
+<html lang="en">
+ <head>
+ <meta charset="utf-8">
+ <title>HBase REST Server: <%= listenPort %></title>
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
+ <meta name="description" content="">
+
+ <link href="/static/css/bootstrap.min.css" rel="stylesheet">
+ <link href="/static/css/bootstrap-theme.min.css" rel="stylesheet">
+ <link href="/static/css/hbase.css" rel="stylesheet">
+ </head>
+
+ <body>
+ <div class="navbar navbar-fixed-top navbar-default">
+ <div class="container">
+ <div class="navbar-header">
+ <button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
+ <span class="icon-bar"></span>
+ <span class="icon-bar"></span>
+ <span class="icon-bar"></span>
+ </button>
+ <a class="navbar-brand" href="/rest.jsp"><img src="/static/hbase_logo_small.png" alt="HBase Logo"/></a>
+ </div>
+ <div class="collapse navbar-collapse">
+ <ul class="nav navbar-nav">
+ <li class="active"><a href="/rest.jsp">Home</a></li>
+ <li><a href="/logs/">Local logs</a></li>
+ <li><a href="/logLevel">Log Level</a></li>
+ <li><a href="/jmx">Metrics Dump</a></li>
+ <% if (HBaseConfiguration.isShowConfInServlet()) { %>
+ <li><a href="/conf">HBase Configuration</a></li>
+ <% } %>
+ </ul>
+ </div><!--/.nav-collapse -->
+ </div>
+ </div>
+
+<div class="container">
+ <div class="row inner_header">
+ <div class="page-header">
+ <h1>RESTServer <small><%= listenPort %></small></h1>
+ </div>
+ </div>
+ <div class="row">
+
+ <section>
+ <h2>Software Attributes</h2>
+ <table id="attributes_table" class="table table-striped">
+ <tr>
+ <th>Attribute Name</th>
+ <th>Value</th>
+ <th>Description</th>
+ </tr>
+ <tr>
+ <td>HBase Version</td>
+ <td><%= VersionInfo.getVersion() %>, r<%= VersionInfo.getRevision() %></td>
+ <td>HBase version and revision</td>
+ </tr>
+ <tr>
+ <td>HBase Compiled</td>
+ <td><%= VersionInfo.getDate() %>, <%= VersionInfo.getUser() %></td>
+ <td>When HBase version was compiled and by whom</td>
+ </tr>
+ <tr>
+ <td>REST Server Start Time</td>
+ <td><%= new Date(startcode) %></td>
+ <td>Date stamp of when this REST server was started</td>
+ </tr>
+ </table>
+ </section>
+ </div>
+ <div class="row">
+
+ <section>
+<a href="http://wiki.apache.org/hadoop/Hbase/Stargate">Apache HBase Wiki on REST</a>
+ </section>
+ </div>
+</div>
+<script src="/static/js/jquery.min.js" type="text/javascript"></script>
+<script src="/static/js/bootstrap.min.js" type="text/javascript"></script>
+<script src="/static/js/tab.js" type="text/javascript"></script>
+</body>
+</html>
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd
new file mode 100644
index 0000000..ab1b722
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd
@@ -0,0 +1,181 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<schema targetNamespace="ModelSchema" elementFormDefault="qualified" xmlns="http://www.w3.org/2001/XMLSchema" xmlns:tns="ModelSchema">
+
+ <element name="Version" type="tns:Version"></element>
+
+ <complexType name="Version">
+ <attribute name="REST" type="string"></attribute>
+ <attribute name="JVM" type="string"></attribute>
+ <attribute name="OS" type="string"></attribute>
+ <attribute name="Server" type="string"></attribute>
+ <attribute name="Jersey" type="string"></attribute>
+ </complexType>
+
+ <element name="TableList" type="tns:TableList"></element>
+
+ <complexType name="TableList">
+ <sequence>
+ <element name="table" type="tns:Table" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ </complexType>
+
+ <complexType name="Table">
+ <sequence>
+ <element name="name" type="string"></element>
+ </sequence>
+ </complexType>
+
+ <element name="TableInfo" type="tns:TableInfo"></element>
+
+ <complexType name="TableInfo">
+ <sequence>
+ <element name="region" type="tns:TableRegion" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ <attribute name="name" type="string"></attribute>
+ </complexType>
+
+ <complexType name="TableRegion">
+ <attribute name="name" type="string"></attribute>
+ <attribute name="id" type="int"></attribute>
+ <attribute name="startKey" type="base64Binary"></attribute>
+ <attribute name="endKey" type="base64Binary"></attribute>
+ <attribute name="location" type="string"></attribute>
+ </complexType>
+
+ <element name="TableSchema" type="tns:TableSchema"></element>
+
+ <complexType name="TableSchema">
+ <sequence>
+ <element name="column" type="tns:ColumnSchema" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ <attribute name="name" type="string"></attribute>
+ <anyAttribute></anyAttribute>
+ </complexType>
+
+ <complexType name="ColumnSchema">
+ <attribute name="name" type="string"></attribute>
+ <anyAttribute></anyAttribute>
+ </complexType>
+
+ <element name="CellSet" type="tns:CellSet"></element>
+
+ <complexType name="CellSet">
+ <sequence>
+ <element name="row" type="tns:Row" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ </complexType>
+
+ <element name="Row" type="tns:Row"></element>
+
+ <complexType name="Row">
+ <sequence>
+ <element name="key" type="base64Binary"></element>
+ <element name="cell" type="tns:Cell" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ </complexType>
+
+ <element name="Cell" type="tns:Cell"></element>
+
+ <complexType name="Cell">
+ <sequence>
+ <element name="value" maxOccurs="1" minOccurs="1">
+ <simpleType><restriction base="base64Binary" />
+ </simpleType>
+ </element>
+ </sequence>
+ <attribute name="column" type="base64Binary" />
+ <attribute name="timestamp" type="int" />
+ </complexType>
+
+ <element name="Scanner" type="tns:Scanner"></element>
+
+ <complexType name="Scanner">
+ <sequence>
+ <element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"></element>
+ </sequence>
+ <attribute name="startRow" type="base64Binary"></attribute>
+ <attribute name="endRow" type="base64Binary"></attribute>
+ <attribute name="batch" type="int"></attribute>
+ <attribute name="startTime" type="int"></attribute>
+ <attribute name="endTime" type="int"></attribute>
+ <attribute name="filter" type="string"></attribute>
+ <attribute name="caching" type="int"></attribute>
+ <sequence>
+ <element name="labels" type="string" minOccurs="0" maxOccurs="unbounded"></element>
+ </sequence>
+ <attribute name="cacheBlocks" type="boolean"></attribute>
+ </complexType>
+
+ <element name="StorageClusterVersion" type="tns:StorageClusterVersion" />
+
+ <complexType name="StorageClusterVersion">
+ <attribute name="version" type="string"></attribute>
+ </complexType>
+
+ <element name="StorageClusterStatus"
+ type="tns:StorageClusterStatus">
+ </element>
+
+ <complexType name="StorageClusterStatus">
+ <sequence>
+ <element name="liveNode" type="tns:Node"
+ maxOccurs="unbounded" minOccurs="0">
+ </element>
+ <element name="deadNode" type="string" maxOccurs="unbounded"
+ minOccurs="0">
+ </element>
+ </sequence>
+ <attribute name="regions" type="int"></attribute>
+ <attribute name="requests" type="int"></attribute>
+ <attribute name="averageLoad" type="float"></attribute>
+ </complexType>
+
+ <complexType name="Node">
+ <sequence>
+ <element name="region" type="tns:Region"
+ maxOccurs="unbounded" minOccurs="0">
+ </element>
+ </sequence>
+ <attribute name="name" type="string"></attribute>
+ <attribute name="startCode" type="int"></attribute>
+ <attribute name="requests" type="int"></attribute>
+ <attribute name="heapSizeMB" type="int"></attribute>
+ <attribute name="maxHeapSizeMB" type="int"></attribute>
+ </complexType>
+
+ <complexType name="Region">
+ <attribute name="name" type="base64Binary"></attribute>
+ <attribute name="stores" type="int"></attribute>
+ <attribute name="storefiles" type="int"></attribute>
+ <attribute name="storefileSizeMB" type="int"></attribute>
+ <attribute name="memstoreSizeMB" type="int"></attribute>
+ <attribute name="storefileIndexSizeMB" type="int"></attribute>
+ <attribute name="readRequestsCount" type="int"></attribute>
+ <attribute name="writeRequestsCount" type="int"></attribute>
+ <attribute name="rootIndexSizeKB" type="int"></attribute>
+ <attribute name="totalStaticIndexSizeKB" type="int"></attribute>
+ <attribute name="totalStaticBloomSizeKB" type="int"></attribute>
+ <attribute name="totalCompactingKVs" type="int"></attribute>
+ <attribute name="currentCompactedKVs" type="int"></attribute>
+ </complexType>
+
+</schema>
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellMessage.proto
new file mode 100644
index 0000000..8d45154
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellMessage.proto
@@ -0,0 +1,25 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+message Cell {
+ optional bytes row = 1; // unused if Cell is in a CellSet
+ optional bytes column = 2;
+ optional int64 timestamp = 3;
+ optional bytes data = 4;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellSetMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellSetMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellSetMessage.proto
new file mode 100644
index 0000000..4774a8d
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellSetMessage.proto
@@ -0,0 +1,28 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import "CellMessage.proto";
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+message CellSet {
+ message Row {
+ required bytes key = 1;
+ repeated Cell values = 2;
+ }
+ repeated Row rows = 1;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ColumnSchemaMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ColumnSchemaMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ColumnSchemaMessage.proto
new file mode 100644
index 0000000..05e33b6
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ColumnSchemaMessage.proto
@@ -0,0 +1,31 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+message ColumnSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ // optional helpful encodings of commonly used attributes
+ optional int32 ttl = 3;
+ optional int32 maxVersions = 4;
+ optional string compression = 5;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ScannerMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ScannerMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ScannerMessage.proto
new file mode 100644
index 0000000..185eac6
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ScannerMessage.proto
@@ -0,0 +1,32 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+message Scanner {
+ optional bytes startRow = 1;
+ optional bytes endRow = 2;
+ repeated bytes columns = 3;
+ optional int32 batch = 4;
+ optional int64 startTime = 5;
+ optional int64 endTime = 6;
+ optional int32 maxVersions = 7;
+ optional string filter = 8;
+ optional int32 caching = 9; // specifies REST scanner caching
+ repeated string labels = 10;
+ optional bool cacheBlocks = 11; // server side block caching hint
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto
new file mode 100644
index 0000000..a0291b4
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto
@@ -0,0 +1,51 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+message StorageClusterStatus {
+ message Region {
+ required bytes name = 1;
+ optional int32 stores = 2;
+ optional int32 storefiles = 3;
+ optional int32 storefileSizeMB = 4;
+ optional int32 memstoreSizeMB = 5;
+ optional int32 storefileIndexSizeMB = 6;
+ optional int64 readRequestsCount = 7;
+ optional int64 writeRequestsCount = 8;
+ optional int32 rootIndexSizeKB = 9;
+ optional int32 totalStaticIndexSizeKB = 10;
+ optional int32 totalStaticBloomSizeKB = 11;
+ optional int64 totalCompactingKVs = 12;
+ optional int64 currentCompactedKVs = 13;
+ }
+ message Node {
+ required string name = 1; // name:port
+ optional int64 startCode = 2;
+ optional int32 requests = 3;
+ optional int32 heapSizeMB = 4;
+ optional int32 maxHeapSizeMB = 5;
+ repeated Region regions = 6;
+ }
+ // node status
+ repeated Node liveNodes = 1;
+ repeated string deadNodes = 2;
+ // summary statistics
+ optional int32 regions = 3;
+ optional int32 requests = 4;
+ optional double averageLoad = 5;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableInfoMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableInfoMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableInfoMessage.proto
new file mode 100644
index 0000000..674499c
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableInfoMessage.proto
@@ -0,0 +1,30 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+message TableInfo {
+ required string name = 1;
+ message Region {
+ required string name = 1;
+ optional bytes startKey = 2;
+ optional bytes endKey = 3;
+ optional int64 id = 4;
+ optional string location = 5;
+ }
+ repeated Region regions = 2;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableListMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableListMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableListMessage.proto
new file mode 100644
index 0000000..fbd76ea
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableListMessage.proto
@@ -0,0 +1,22 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+message TableList {
+ repeated string name = 1;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableSchemaMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableSchemaMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableSchemaMessage.proto
new file mode 100644
index 0000000..47a4da5
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableSchemaMessage.proto
@@ -0,0 +1,33 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import "ColumnSchemaMessage.proto";
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+message TableSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ repeated ColumnSchema columns = 3;
+ // optional helpful encodings of commonly used attributes
+ optional bool inMemory = 4;
+ optional bool readOnly = 5;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/VersionMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/VersionMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/VersionMessage.proto
new file mode 100644
index 0000000..cc107b3
--- /dev/null
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/VersionMessage.proto
@@ -0,0 +1,26 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+message Version {
+ optional string restVersion = 1;
+ optional string jvmVersion = 2;
+ optional string osVersion = 3;
+ optional string serverVersion = 4;
+ optional string jerseyVersion = 5;
+}
[02/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java
deleted file mode 100644
index 2e55181..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java
+++ /dev/null
@@ -1,508 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.DataInputStream;
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.ws.rs.core.MediaType;
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.parsers.SAXParserFactory;
-import javax.xml.stream.XMLStreamException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.rest.provider.JacksonProvider;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.JsonParser;
-import org.codehaus.jackson.JsonToken;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.xml.sax.InputSource;
-import org.xml.sax.XMLReader;
-
-@Category(MediumTests.class)
-public class TestTableScan {
-
- private static final String TABLE = "TestScanResource";
- private static final String CFA = "a";
- private static final String CFB = "b";
- private static final String COLUMN_1 = CFA + ":1";
- private static final String COLUMN_2 = CFB + ":2";
- private static Client client;
- private static int expectedRows1;
- private static int expectedRows2;
- private static Configuration conf;
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- conf = TEST_UTIL.getConfiguration();
- TEST_UTIL.startMiniCluster();
- REST_TEST_UTIL.startServletContainer(conf);
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (!admin.tableExists(TABLE)) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
- htd.addFamily(new HColumnDescriptor(CFA));
- htd.addFamily(new HColumnDescriptor(CFB));
- admin.createTable(htd);
- expectedRows1 = TestScannerResource.insertData(conf, TABLE, COLUMN_1, 1.0);
- expectedRows2 = TestScannerResource.insertData(conf, TABLE, COLUMN_2, 0.5);
- }
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- TEST_UTIL.getHBaseAdmin().disableTable(TABLE);
- TEST_UTIL.getHBaseAdmin().deleteTable(TABLE);
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- @Test
- public void testSimpleScannerXML() throws IOException, JAXBException, XMLStreamException {
- // Test scanning particular columns
- StringBuilder builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_LIMIT + "=10");
- Response response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_XML);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class);
- Unmarshaller ush = ctx.createUnmarshaller();
- CellSetModel model = (CellSetModel) ush.unmarshal(response.getStream());
- int count = TestScannerResource.countCellSet(model);
- assertEquals(10, count);
- checkRowsNotNull(model);
-
- //Test with no limit.
- builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_XML);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- model = (CellSetModel) ush.unmarshal(response.getStream());
- count = TestScannerResource.countCellSet(model);
- assertEquals(expectedRows1, count);
- checkRowsNotNull(model);
-
- //Test with start and end row.
- builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_START_ROW + "=aaa");
- builder.append("&");
- builder.append(Constants.SCAN_END_ROW + "=aay");
- response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_XML);
- assertEquals(200, response.getCode());
- model = (CellSetModel) ush.unmarshal(response.getStream());
- count = TestScannerResource.countCellSet(model);
- RowModel startRow = model.getRows().get(0);
- assertEquals("aaa", Bytes.toString(startRow.getKey()));
- RowModel endRow = model.getRows().get(model.getRows().size() - 1);
- assertEquals("aax", Bytes.toString(endRow.getKey()));
- assertEquals(24, count);
- checkRowsNotNull(model);
-
- //Test with start row and limit.
- builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_START_ROW + "=aaa");
- builder.append("&");
- builder.append(Constants.SCAN_LIMIT + "=15");
- response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_XML);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- model = (CellSetModel) ush.unmarshal(response.getStream());
- startRow = model.getRows().get(0);
- assertEquals("aaa", Bytes.toString(startRow.getKey()));
- count = TestScannerResource.countCellSet(model);
- assertEquals(15, count);
- checkRowsNotNull(model);
-
- }
-
- @Test
- public void testSimpleScannerJson() throws IOException, JAXBException {
- // Test scanning particular columns with limit.
- StringBuilder builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_LIMIT + "=20");
- Response response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_JSON);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- ObjectMapper mapper = new JacksonProvider()
- .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
- CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class);
- int count = TestScannerResource.countCellSet(model);
- assertEquals(20, count);
- checkRowsNotNull(model);
-
- //Test scanning with no limit.
- builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2);
- response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_JSON);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- model = mapper.readValue(response.getStream(), CellSetModel.class);
- count = TestScannerResource.countCellSet(model);
- assertEquals(expectedRows2, count);
- checkRowsNotNull(model);
-
- //Test with start row and end row.
- builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_START_ROW + "=aaa");
- builder.append("&");
- builder.append(Constants.SCAN_END_ROW + "=aay");
- response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_JSON);
- assertEquals(200, response.getCode());
- model = mapper.readValue(response.getStream(), CellSetModel.class);
- RowModel startRow = model.getRows().get(0);
- assertEquals("aaa", Bytes.toString(startRow.getKey()));
- RowModel endRow = model.getRows().get(model.getRows().size() - 1);
- assertEquals("aax", Bytes.toString(endRow.getKey()));
- count = TestScannerResource.countCellSet(model);
- assertEquals(24, count);
- checkRowsNotNull(model);
- }
-
- /**
- * An example to scan using listener in unmarshaller for XML.
- * @throws Exception the exception
- */
- @Test
- public void testScanUsingListenerUnmarshallerXML() throws Exception {
- StringBuilder builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_LIMIT + "=10");
- Response response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_XML);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- JAXBContext context = JAXBContext.newInstance(ClientSideCellSetModel.class, RowModel.class,
- CellModel.class);
- Unmarshaller unmarshaller = context.createUnmarshaller();
-
- final ClientSideCellSetModel.Listener listener = new ClientSideCellSetModel.Listener() {
- @Override
- public void handleRowModel(ClientSideCellSetModel helper, RowModel row) {
- assertTrue(row.getKey() != null);
- assertTrue(row.getCells().size() > 0);
- }
- };
-
- // install the callback on all ClientSideCellSetModel instances
- unmarshaller.setListener(new Unmarshaller.Listener() {
- public void beforeUnmarshal(Object target, Object parent) {
- if (target instanceof ClientSideCellSetModel) {
- ((ClientSideCellSetModel) target).setCellSetModelListener(listener);
- }
- }
-
- public void afterUnmarshal(Object target, Object parent) {
- if (target instanceof ClientSideCellSetModel) {
- ((ClientSideCellSetModel) target).setCellSetModelListener(null);
- }
- }
- });
-
- // create a new XML parser
- SAXParserFactory factory = SAXParserFactory.newInstance();
- factory.setNamespaceAware(true);
- XMLReader reader = factory.newSAXParser().getXMLReader();
- reader.setContentHandler(unmarshaller.getUnmarshallerHandler());
- assertFalse(ClientSideCellSetModel.listenerInvoked);
- reader.parse(new InputSource(response.getStream()));
- assertTrue(ClientSideCellSetModel.listenerInvoked);
-
- }
-
- @Test
- public void testStreamingJSON() throws Exception {
- // Test scanning particular columns with limit.
- StringBuilder builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_LIMIT + "=20");
- Response response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_JSON);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- ObjectMapper mapper = new JacksonProvider()
- .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
- CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class);
- int count = TestScannerResource.countCellSet(model);
- assertEquals(20, count);
- checkRowsNotNull(model);
-
- //Test scanning with no limit.
- builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2);
- response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_JSON);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- model = mapper.readValue(response.getStream(), CellSetModel.class);
- count = TestScannerResource.countCellSet(model);
- assertEquals(expectedRows2, count);
- checkRowsNotNull(model);
-
- //Test with start row and end row.
- builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_START_ROW + "=aaa");
- builder.append("&");
- builder.append(Constants.SCAN_END_ROW + "=aay");
- response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_JSON);
- assertEquals(200, response.getCode());
-
- count = 0;
- JsonFactory jfactory = new JsonFactory(mapper);
- JsonParser jParser = jfactory.createJsonParser(response.getStream());
- boolean found = false;
- while (jParser.nextToken() != JsonToken.END_OBJECT) {
- if(jParser.getCurrentToken() == JsonToken.START_OBJECT && found) {
- RowModel row = jParser.readValueAs(RowModel.class);
- assertNotNull(row.getKey());
- for (int i = 0; i < row.getCells().size(); i++) {
- if (count == 0) {
- assertEquals("aaa", Bytes.toString(row.getKey()));
- }
- if (count == 23) {
- assertEquals("aax", Bytes.toString(row.getKey()));
- }
- count++;
- }
- jParser.skipChildren();
- } else {
- found = jParser.getCurrentToken() == JsonToken.START_ARRAY;
- }
- }
- assertEquals(24, count);
- }
-
- @Test
- public void testSimpleScannerProtobuf() throws Exception {
- StringBuilder builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_LIMIT + "=15");
- Response response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_PROTOBUF);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- int rowCount = readProtobufStream(response.getStream());
- assertEquals(15, rowCount);
-
- //Test with start row and end row.
- builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
- builder.append("&");
- builder.append(Constants.SCAN_START_ROW + "=aaa");
- builder.append("&");
- builder.append(Constants.SCAN_END_ROW + "=aay");
- response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_PROTOBUF);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- rowCount = readProtobufStream(response.getStream());
- assertEquals(24, rowCount);
- }
-
- private void checkRowsNotNull(CellSetModel model) {
- for (RowModel row: model.getRows()) {
- assertTrue(row.getKey() != null);
- assertTrue(row.getCells().size() > 0);
- }
- }
-
- /**
- * Read protobuf stream.
- * @param inputStream the input stream
- * @return The number of rows in the cell set model.
- * @throws IOException Signals that an I/O exception has occurred.
- */
- public int readProtobufStream(InputStream inputStream) throws IOException{
- DataInputStream stream = new DataInputStream(inputStream);
- CellSetModel model = null;
- int rowCount = 0;
- try {
- while (true) {
- byte[] lengthBytes = new byte[2];
- int readBytes = stream.read(lengthBytes);
- if (readBytes == -1) {
- break;
- }
- assertEquals(2, readBytes);
- int length = Bytes.toShort(lengthBytes);
- byte[] cellset = new byte[length];
- stream.read(cellset);
- model = new CellSetModel();
- model.getObjectFromMessage(cellset);
- checkRowsNotNull(model);
- rowCount = rowCount + TestScannerResource.countCellSet(model);
- }
- } catch (EOFException exp) {
- exp.printStackTrace();
- } finally {
- stream.close();
- }
- return rowCount;
- }
-
- @Test
- public void testScanningUnknownColumnJson() throws IOException, JAXBException {
- // Test scanning particular columns with limit.
- StringBuilder builder = new StringBuilder();
- builder.append("/*");
- builder.append("?");
- builder.append(Constants.SCAN_COLUMN + "=a:test");
- Response response = client.get("/" + TABLE + builder.toString(),
- Constants.MIMETYPE_JSON);
- assertEquals(200, response.getCode());
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- ObjectMapper mapper = new JacksonProvider().locateMapper(CellSetModel.class,
- MediaType.APPLICATION_JSON_TYPE);
- CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class);
- int count = TestScannerResource.countCellSet(model);
- assertEquals(0, count);
- }
-
- /**
- * The Class ClientSideCellSetModel which mimics cell set model, and contains listener to perform
- * user defined operations on the row model.
- */
- @XmlRootElement(name = "CellSet")
- @XmlAccessorType(XmlAccessType.FIELD)
- public static class ClientSideCellSetModel implements Serializable {
-
- private static final long serialVersionUID = 1L;
-
- /**
- * This list is not a real list; instead it will notify a listener whenever JAXB has
- * unmarshalled the next row.
- */
- @XmlElement(name="Row")
- private List<RowModel> row;
-
- static boolean listenerInvoked = false;
-
- /**
- * Install a listener for row model on this object. If l is null, the listener
- * is removed again.
- */
- public void setCellSetModelListener(final Listener l) {
- row = (l == null) ? null : new ArrayList<RowModel>() {
- private static final long serialVersionUID = 1L;
-
- public boolean add(RowModel o) {
- l.handleRowModel(ClientSideCellSetModel.this, o);
- listenerInvoked = true;
- return false;
- }
- };
- }
-
- /**
- * This listener is invoked every time a new row model is unmarshalled.
- */
- public static interface Listener {
- void handleRowModel(ClientSideCellSetModel helper, RowModel rowModel);
- }
- }
-}
-
-
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java
deleted file mode 100644
index ef68084..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.rest.client.Client;
-import org.apache.hadoop.hbase.rest.client.Cluster;
-import org.apache.hadoop.hbase.rest.client.Response;
-import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
-import org.apache.hadoop.hbase.rest.model.VersionModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import static org.junit.Assert.*;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestVersionResource {
- private static final Log LOG = LogFactory.getLog(TestVersionResource.class);
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- private static Client client;
- private static JAXBContext context;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- TEST_UTIL.startMiniCluster();
- REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
- client = new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort()));
- context = JAXBContext.newInstance(
- VersionModel.class,
- StorageClusterVersionModel.class);
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- private static void validate(VersionModel model) {
- assertNotNull(model);
- assertNotNull(model.getRESTVersion());
- assertEquals(model.getRESTVersion(), RESTServlet.VERSION_STRING);
- String osVersion = model.getOSVersion();
- assertNotNull(osVersion);
- assertTrue(osVersion.contains(System.getProperty("os.name")));
- assertTrue(osVersion.contains(System.getProperty("os.version")));
- assertTrue(osVersion.contains(System.getProperty("os.arch")));
- String jvmVersion = model.getJVMVersion();
- assertNotNull(jvmVersion);
- assertTrue(jvmVersion.contains(System.getProperty("java.vm.vendor")));
- assertTrue(jvmVersion.contains(System.getProperty("java.version")));
- assertTrue(jvmVersion.contains(System.getProperty("java.vm.version")));
- assertNotNull(model.getServerVersion());
- String jerseyVersion = model.getJerseyVersion();
- assertNotNull(jerseyVersion);
- assertEquals(jerseyVersion, ServletContainer.class.getPackage()
- .getImplementationVersion());
- }
-
- @Test
- public void testGetStargateVersionText() throws IOException {
- Response response = client.get("/version", Constants.MIMETYPE_TEXT);
- assertTrue(response.getCode() == 200);
- assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
- String body = Bytes.toString(response.getBody());
- assertTrue(body.length() > 0);
- assertTrue(body.contains(RESTServlet.VERSION_STRING));
- assertTrue(body.contains(System.getProperty("java.vm.vendor")));
- assertTrue(body.contains(System.getProperty("java.version")));
- assertTrue(body.contains(System.getProperty("java.vm.version")));
- assertTrue(body.contains(System.getProperty("os.name")));
- assertTrue(body.contains(System.getProperty("os.version")));
- assertTrue(body.contains(System.getProperty("os.arch")));
- assertTrue(body.contains(ServletContainer.class.getPackage()
- .getImplementationVersion()));
- }
-
- @Test
- public void testGetStargateVersionXML() throws IOException, JAXBException {
- Response response = client.get("/version", Constants.MIMETYPE_XML);
- assertTrue(response.getCode() == 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- VersionModel model = (VersionModel)
- context.createUnmarshaller().unmarshal(
- new ByteArrayInputStream(response.getBody()));
- validate(model);
- LOG.info("success retrieving Stargate version as XML");
- }
-
- @Test
- public void testGetStargateVersionJSON() throws IOException {
- Response response = client.get("/version", Constants.MIMETYPE_JSON);
- assertTrue(response.getCode() == 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- }
-
- @Test
- public void testGetStargateVersionPB() throws IOException {
- Response response = client.get("/version", Constants.MIMETYPE_PROTOBUF);
- assertTrue(response.getCode() == 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
- VersionModel model = new VersionModel();
- model.getObjectFromMessage(response.getBody());
- validate(model);
- response = client.get("/version", Constants.MIMETYPE_PROTOBUF_IETF);
- assertTrue(response.getCode() == 200);
- assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
- model = new VersionModel();
- model.getObjectFromMessage(response.getBody());
- validate(model);
- }
-
- @Test
- public void testGetStorageClusterVersionText() throws IOException {
- Response response = client.get("/version/cluster", Constants.MIMETYPE_TEXT);
- assertTrue(response.getCode() == 200);
- assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
- }
-
- @Test
- public void testGetStorageClusterVersionXML() throws IOException,
- JAXBException {
- Response response = client.get("/version/cluster",Constants.MIMETYPE_XML);
- assertTrue(response.getCode() == 200);
- assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
- StorageClusterVersionModel clusterVersionModel =
- (StorageClusterVersionModel)
- context.createUnmarshaller().unmarshal(
- new ByteArrayInputStream(response.getBody()));
- assertNotNull(clusterVersionModel);
- assertNotNull(clusterVersionModel.getVersion());
- LOG.info("success retrieving storage cluster version as XML");
- }
-
- @Test
- public void doTestGetStorageClusterVersionJSON() throws IOException {
- Response response = client.get("/version/cluster", Constants.MIMETYPE_JSON);
- assertTrue(response.getCode() == 200);
- assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java
deleted file mode 100644
index a5e5b93..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest.client;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Tests {@link RemoteAdmin} retries.
- */
-@Category(SmallTests.class)
-public class TestRemoteAdminRetries {
-
- private static final int SLEEP_TIME = 50;
- private static final int RETRIES = 3;
- private static final long MAX_TIME = SLEEP_TIME * (RETRIES - 1);
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
- private RemoteAdmin remoteAdmin;
- private Client client;
-
- @Before
- public void setup() throws Exception {
- client = mock(Client.class);
- Response response = new Response(509);
- when(client.get(anyString(), anyString())).thenReturn(response);
- when(client.delete(anyString())).thenReturn(response);
- when(client.put(anyString(), anyString(), any(byte[].class))).thenReturn(response);
- when(client.post(anyString(), anyString(), any(byte[].class))).thenReturn(response);
- Configuration configuration = TEST_UTIL.getConfiguration();
-
- configuration.setInt("hbase.rest.client.max.retries", RETRIES);
- configuration.setInt("hbase.rest.client.sleep", SLEEP_TIME);
-
- remoteAdmin = new RemoteAdmin(client, TEST_UTIL.getConfiguration(), "MyTable");
- }
-
- @Test
- public void testFailingGetRestVersion() throws Exception {
- testTimedOutGetCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteAdmin.getRestVersion();
- }
- });
- }
-
- @Test
- public void testFailingGetClusterStatus() throws Exception {
- testTimedOutGetCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteAdmin.getClusterStatus();
- }
- });
- }
-
- @Test
- public void testFailingGetClusterVersion() throws Exception {
- testTimedOutGetCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteAdmin.getClusterVersion();
- }
- });
- }
-
- @Test
- public void testFailingGetTableAvailable() throws Exception {
- testTimedOutCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteAdmin.isTableAvailable(Bytes.toBytes("TestTable"));
- }
- });
- }
-
- @Test
- @SuppressWarnings("deprecation")
- public void testFailingCreateTable() throws Exception {
- testTimedOutCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteAdmin.createTable(new HTableDescriptor(Bytes.toBytes("TestTable")));
- }
- });
- verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
- }
-
- @Test
- public void testFailingDeleteTable() throws Exception {
- testTimedOutCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteAdmin.deleteTable("TestTable");
- }
- });
- verify(client, times(RETRIES)).delete(anyString());
- }
-
- @Test
- public void testFailingGetTableList() throws Exception {
- testTimedOutGetCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteAdmin.getTableList();
- }
- });
- }
-
- private void testTimedOutGetCall(CallExecutor callExecutor) throws Exception {
- testTimedOutCall(callExecutor);
- verify(client, times(RETRIES)).get(anyString(), anyString());
- }
-
- private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
- long start = System.currentTimeMillis();
- try {
- callExecutor.run();
- fail("should be timeout exception!");
- } catch (IOException e) {
- assertTrue(Pattern.matches(".*MyTable.*timed out", e.toString()));
- }
- assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
- }
-
- private static interface CallExecutor {
- void run() throws Exception;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java
deleted file mode 100644
index 547dfab..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest.client;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Test RemoteHTable retries.
- */
-@Category(SmallTests.class)
-public class TestRemoteHTableRetries {
-
- private static final int SLEEP_TIME = 50;
- private static final int RETRIES = 3;
- private static final long MAX_TIME = SLEEP_TIME * (RETRIES - 1);
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
- private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
- private static final byte[] COLUMN_1 = Bytes.toBytes("a");
- private static final byte[] QUALIFIER_1 = Bytes.toBytes("1");
- private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1");
-
- private Client client;
- private RemoteHTable remoteTable;
-
- @Before
- public void setup() throws Exception {
- client = mock(Client.class);
- Response response = new Response(509);
- when(client.get(anyString(), anyString())).thenReturn(response);
- when(client.delete(anyString())).thenReturn(response);
- when(client.put(anyString(), anyString(), any(byte[].class))).thenReturn(
- response);
- when(client.post(anyString(), anyString(), any(byte[].class))).thenReturn(
- response);
-
- Configuration configuration = TEST_UTIL.getConfiguration();
- configuration.setInt("hbase.rest.client.max.retries", RETRIES);
- configuration.setInt("hbase.rest.client.sleep", SLEEP_TIME);
-
- remoteTable = new RemoteHTable(client, TEST_UTIL.getConfiguration(),
- "MyTable");
- }
-
- @After
- public void tearDownAfterClass() throws Exception {
- remoteTable.close();
- }
-
- @Test
- public void testDelete() throws Exception {
- testTimedOutCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- Delete delete = new Delete(Bytes.toBytes("delete"));
- remoteTable.delete(delete);
- }
- });
- verify(client, times(RETRIES)).delete(anyString());
- }
-
- @Test
- public void testGet() throws Exception {
- testTimedOutGetCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteTable.get(new Get(Bytes.toBytes("Get")));
- }
- });
- }
-
- @Test
- public void testSingleRowPut() throws Exception {
- testTimedOutCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteTable.put(new Put(Bytes.toBytes("Row")));
- }
- });
- verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
- }
-
- @Test
- public void testMultiRowPut() throws Exception {
- testTimedOutCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- Put[] puts = { new Put(Bytes.toBytes("Row1")),
- new Put(Bytes.toBytes("Row2")) };
- remoteTable.put(Arrays.asList(puts));
- }
- });
- verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
- }
-
- @Test
- public void testGetScanner() throws Exception {
- testTimedOutCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- remoteTable.getScanner(new Scan());
- }
- });
- verify(client, times(RETRIES)).post(anyString(), anyString(), any(byte[].class));
- }
-
- @Test
- public void testCheckAndPut() throws Exception {
- testTimedOutCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- Put put = new Put(ROW_1);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, put );
- }
- });
- verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
- }
-
- @Test
- public void testCheckAndDelete() throws Exception {
- testTimedOutCall(new CallExecutor() {
- @Override
- public void run() throws Exception {
- Put put = new Put(ROW_1);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- Delete delete= new Delete(ROW_1);
- remoteTable.checkAndDelete(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, delete );
- }
- });
- }
-
- private void testTimedOutGetCall(CallExecutor callExecutor) throws Exception {
- testTimedOutCall(callExecutor);
- verify(client, times(RETRIES)).get(anyString(), anyString());
- }
-
- private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
- long start = System.currentTimeMillis();
- try {
- callExecutor.run();
- fail("should be timeout exception!");
- } catch (IOException e) {
- assertTrue(Pattern.matches(".*request timed out", e.toString()));
- }
- assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
- }
-
- private static interface CallExecutor {
- void run() throws Exception;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
deleted file mode 100644
index 76fb800..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.client;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.commons.httpclient.Header;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.rest.HBaseRESTTestingUtility;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(MediumTests.class)
-public class TestRemoteTable {
- private static final String TABLE = "TestRemoteTable";
- private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
- private static final byte[] ROW_2 = Bytes.toBytes("testrow2");
- private static final byte[] ROW_3 = Bytes.toBytes("testrow3");
- private static final byte[] ROW_4 = Bytes.toBytes("testrow4");
- private static final byte[] COLUMN_1 = Bytes.toBytes("a");
- private static final byte[] COLUMN_2 = Bytes.toBytes("b");
- private static final byte[] COLUMN_3 = Bytes.toBytes("c");
- private static final byte[] QUALIFIER_1 = Bytes.toBytes("1");
- private static final byte[] QUALIFIER_2 = Bytes.toBytes("2");
- private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1");
- private static final byte[] VALUE_2 = Bytes.toBytes("testvalue2");
-
- private static final long ONE_HOUR = 60 * 60 * 1000;
- private static final long TS_2 = System.currentTimeMillis();
- private static final long TS_1 = TS_2 - ONE_HOUR;
-
- private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static final HBaseRESTTestingUtility REST_TEST_UTIL =
- new HBaseRESTTestingUtility();
- private RemoteHTable remoteTable;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- TEST_UTIL.startMiniCluster();
- REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
- }
-
- @Before
- public void before() throws Exception {
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- if (admin.tableExists(TABLE)) {
- if (admin.isTableEnabled(TABLE)) admin.disableTable(TABLE);
- admin.deleteTable(TABLE);
- }
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
- htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
- htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
- htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
- admin.createTable(htd);
- HTable table = null;
- try {
- table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
- Put put = new Put(ROW_1);
- put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
- table.put(put);
- put = new Put(ROW_2);
- put.add(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1);
- put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2);
- put.add(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
- table.put(put);
- table.flushCommits();
- } finally {
- if (null != table) table.close();
- }
- remoteTable = new RemoteHTable(
- new Client(new Cluster().add("localhost",
- REST_TEST_UTIL.getServletPort())),
- TEST_UTIL.getConfiguration(), TABLE);
- }
-
- @After
- public void after() throws Exception {
- remoteTable.close();
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- REST_TEST_UTIL.shutdownServletContainer();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- @Test
- public void testGetTableDescriptor() throws IOException {
- HTable table = null;
- try {
- table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
- HTableDescriptor local = table.getTableDescriptor();
- assertEquals(remoteTable.getTableDescriptor(), local);
- } finally {
- if (null != table) table.close();
- }
- }
-
- @Test
- public void testGet() throws IOException {
- Get get = new Get(ROW_1);
- Result result = remoteTable.get(get);
- byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_1, value1));
- assertNull(value2);
-
- get = new Get(ROW_1);
- get.addFamily(COLUMN_3);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNull(value1);
- assertNull(value2);
-
- get = new Get(ROW_1);
- get.addColumn(COLUMN_1, QUALIFIER_1);
- get.addColumn(COLUMN_2, QUALIFIER_2);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_1, value1));
- assertNull(value2);
-
- get = new Get(ROW_2);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_2, value1)); // @TS_2
- assertNotNull(value2);
- assertTrue(Bytes.equals(VALUE_2, value2));
-
- get = new Get(ROW_2);
- get.addFamily(COLUMN_1);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_2, value1)); // @TS_2
- assertNull(value2);
-
- get = new Get(ROW_2);
- get.addColumn(COLUMN_1, QUALIFIER_1);
- get.addColumn(COLUMN_2, QUALIFIER_2);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_2, value1)); // @TS_2
- assertNotNull(value2);
- assertTrue(Bytes.equals(VALUE_2, value2));
-
- // test timestamp
-
- get = new Get(ROW_2);
- get.addFamily(COLUMN_1);
- get.addFamily(COLUMN_2);
- get.setTimeStamp(TS_1);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_1, value1)); // @TS_1
- assertNull(value2);
-
- // test timerange
-
- get = new Get(ROW_2);
- get.addFamily(COLUMN_1);
- get.addFamily(COLUMN_2);
- get.setTimeRange(0, TS_1 + 1);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_1, value1)); // @TS_1
- assertNull(value2);
-
- // test maxVersions
-
- get = new Get(ROW_2);
- get.addFamily(COLUMN_1);
- get.setMaxVersions(2);
- result = remoteTable.get(get);
- int count = 0;
- for (Cell kv: result.listCells()) {
- if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_1 == kv.getTimestamp()) {
- assertTrue(CellUtil.matchingValue(kv, VALUE_1)); // @TS_1
- count++;
- }
- if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_2 == kv.getTimestamp()) {
- assertTrue(CellUtil.matchingValue(kv, VALUE_2)); // @TS_2
- count++;
- }
- }
- assertEquals(2, count);
- }
-
- @Test
- public void testMultiGet() throws Exception {
- ArrayList<Get> gets = new ArrayList<Get>();
- gets.add(new Get(ROW_1));
- gets.add(new Get(ROW_2));
- Result[] results = remoteTable.get(gets);
- assertNotNull(results);
- assertEquals(2, results.length);
- assertEquals(1, results[0].size());
- assertEquals(2, results[1].size());
-
- //Test Versions
- gets = new ArrayList<Get>();
- Get g = new Get(ROW_1);
- g.setMaxVersions(3);
- gets.add(g);
- gets.add(new Get(ROW_2));
- results = remoteTable.get(gets);
- assertNotNull(results);
- assertEquals(2, results.length);
- assertEquals(1, results[0].size());
- assertEquals(3, results[1].size());
-
- //404
- gets = new ArrayList<Get>();
- gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
- results = remoteTable.get(gets);
- assertNotNull(results);
- assertEquals(0, results.length);
-
- gets = new ArrayList<Get>();
- gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
- gets.add(new Get(ROW_1));
- gets.add(new Get(ROW_2));
- results = remoteTable.get(gets);
- assertNotNull(results);
- assertEquals(2, results.length);
- }
-
- @Test
- public void testPut() throws IOException {
- Put put = new Put(ROW_3);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- remoteTable.put(put);
-
- Get get = new Get(ROW_3);
- get.addFamily(COLUMN_1);
- Result result = remoteTable.get(get);
- byte[] value = result.getValue(COLUMN_1, QUALIFIER_1);
- assertNotNull(value);
- assertTrue(Bytes.equals(VALUE_1, value));
-
- // multiput
-
- List<Put> puts = new ArrayList<Put>();
- put = new Put(ROW_3);
- put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
- puts.add(put);
- put = new Put(ROW_4);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- puts.add(put);
- put = new Put(ROW_4);
- put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
- puts.add(put);
- remoteTable.put(puts);
-
- get = new Get(ROW_3);
- get.addFamily(COLUMN_2);
- result = remoteTable.get(get);
- value = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value);
- assertTrue(Bytes.equals(VALUE_2, value));
- get = new Get(ROW_4);
- result = remoteTable.get(get);
- value = result.getValue(COLUMN_1, QUALIFIER_1);
- assertNotNull(value);
- assertTrue(Bytes.equals(VALUE_1, value));
- value = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value);
- assertTrue(Bytes.equals(VALUE_2, value));
-
- assertTrue(Bytes.equals(Bytes.toBytes("TestRemoteTable"), remoteTable.getTableName()));
- }
-
- @Test
- public void testDelete() throws IOException {
- Put put = new Put(ROW_3);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
- remoteTable.put(put);
-
- Get get = new Get(ROW_3);
- get.addFamily(COLUMN_1);
- get.addFamily(COLUMN_2);
- Result result = remoteTable.get(get);
- byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_1, value1));
- assertNotNull(value2);
- assertTrue(Bytes.equals(VALUE_2, value2));
-
- Delete delete = new Delete(ROW_3);
- delete.deleteColumn(COLUMN_2, QUALIFIER_2);
- remoteTable.delete(delete);
-
- get = new Get(ROW_3);
- get.addFamily(COLUMN_1);
- get.addFamily(COLUMN_2);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_1, value1));
- assertNull(value2);
-
- delete = new Delete(ROW_3);
- delete.setTimestamp(1L);
- remoteTable.delete(delete);
-
- get = new Get(ROW_3);
- get.addFamily(COLUMN_1);
- get.addFamily(COLUMN_2);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_1, value1));
- assertNull(value2);
-
- delete = new Delete(ROW_3);
- remoteTable.delete(delete);
-
- get = new Get(ROW_3);
- get.addFamily(COLUMN_1);
- get.addFamily(COLUMN_2);
- result = remoteTable.get(get);
- value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNull(value1);
- assertNull(value2);
- }
-
- /**
- * Test RemoteHTable.Scanner
- */
- @Test
- public void testScanner() throws IOException {
- List<Put> puts = new ArrayList<Put>();
- Put put = new Put(ROW_1);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- puts.add(put);
- put = new Put(ROW_2);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- puts.add(put);
- put = new Put(ROW_3);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- puts.add(put);
- put = new Put(ROW_4);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- puts.add(put);
- remoteTable.put(puts);
-
- ResultScanner scanner = remoteTable.getScanner(new Scan());
-
- Result[] results = scanner.next(1);
- assertNotNull(results);
- assertEquals(1, results.length);
- assertTrue(Bytes.equals(ROW_1, results[0].getRow()));
-
- Result result = scanner.next();
- assertNotNull(result);
- assertTrue(Bytes.equals(ROW_2, result.getRow()));
-
- results = scanner.next(2);
- assertNotNull(results);
- assertEquals(2, results.length);
- assertTrue(Bytes.equals(ROW_3, results[0].getRow()));
- assertTrue(Bytes.equals(ROW_4, results[1].getRow()));
-
- results = scanner.next(1);
- assertNull(results);
- scanner.close();
-
- scanner = remoteTable.getScanner(COLUMN_1);
- results = scanner.next(4);
- assertNotNull(results);
- assertEquals(4, results.length);
- assertTrue(Bytes.equals(ROW_1, results[0].getRow()));
- assertTrue(Bytes.equals(ROW_2, results[1].getRow()));
- assertTrue(Bytes.equals(ROW_3, results[2].getRow()));
- assertTrue(Bytes.equals(ROW_4, results[3].getRow()));
-
- scanner.close();
-
- scanner = remoteTable.getScanner(COLUMN_1,QUALIFIER_1);
- results = scanner.next(4);
- assertNotNull(results);
- assertEquals(4, results.length);
- assertTrue(Bytes.equals(ROW_1, results[0].getRow()));
- assertTrue(Bytes.equals(ROW_2, results[1].getRow()));
- assertTrue(Bytes.equals(ROW_3, results[2].getRow()));
- assertTrue(Bytes.equals(ROW_4, results[3].getRow()));
- scanner.close();
- assertTrue(remoteTable.isAutoFlush());
-
- }
-
- @Test
- public void testCheckAndDelete() throws IOException {
- Get get = new Get(ROW_1);
- Result result = remoteTable.get(get);
- byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
- byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
- assertNotNull(value1);
- assertTrue(Bytes.equals(VALUE_1, value1));
- assertNull(value2);
- assertTrue(remoteTable.exists(get));
- assertEquals(1, remoteTable.exists(Collections.singletonList(get)).length);
- Delete delete = new Delete(ROW_1);
-
- remoteTable.checkAndDelete(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, delete);
- assertFalse(remoteTable.exists(get));
-
- Put put = new Put(ROW_1);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- remoteTable.put(put);
-
- assertTrue(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1,
- put));
- assertFalse(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_2,
- put));
- }
-
- /**
- * Test RemoteHable.Scanner.iterator method
- */
- @Test
- public void testIteratorScaner() throws IOException {
- List<Put> puts = new ArrayList<Put>();
- Put put = new Put(ROW_1);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- puts.add(put);
- put = new Put(ROW_2);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- puts.add(put);
- put = new Put(ROW_3);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- puts.add(put);
- put = new Put(ROW_4);
- put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
- puts.add(put);
- remoteTable.put(puts);
-
- ResultScanner scanner = remoteTable.getScanner(new Scan());
- Iterator<Result> iterator = scanner.iterator();
- assertTrue(iterator.hasNext());
- int counter = 0;
- while (iterator.hasNext()) {
- iterator.next();
- counter++;
- }
- assertEquals(4, counter);
- }
-
- /**
- * Test a some methods of class Response.
- */
- @Test
- public void testResponse(){
- Response response = new Response(200);
- assertEquals(200, response.getCode());
- Header[] headers = new Header[2];
- headers[0] = new Header("header1", "value1");
- headers[1] = new Header("header2", "value2");
- response = new Response(200, headers);
- assertEquals("value1", response.getHeader("header1"));
- assertFalse(response.hasBody());
- response.setCode(404);
- assertEquals(404, response.getCode());
- headers = new Header[2];
- headers[0] = new Header("header1", "value1.1");
- headers[1] = new Header("header2", "value2");
- response.setHeaders(headers);
- assertEquals("value1.1", response.getHeader("header1"));
- response.setBody(Bytes.toBytes("body"));
- assertTrue(response.hasBody());
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java
deleted file mode 100644
index 170dfab..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import com.sun.jersey.api.json.JSONJAXBContext;
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import junit.framework.TestCase;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestCellModel extends TestModelBase<CellModel> {
-
- private static final long TIMESTAMP = 1245219839331L;
- private static final byte[] COLUMN = Bytes.toBytes("testcolumn");
- private static final byte[] VALUE = Bytes.toBytes("testvalue");
-
- public TestCellModel() throws Exception {
- super(CellModel.class);
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Cell " +
- "column=\"dGVzdGNvbHVtbg==\" timestamp=\"1245219839331\">dGVzdHZhbHVl</Cell>";
- AS_PB =
- "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl";
-
- AS_JSON =
- "{\"column\":\"dGVzdGNvbHVtbg==\",\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVl\"}";
- }
-
- protected CellModel buildTestModel() {
- CellModel model = new CellModel();
- model.setColumn(COLUMN);
- model.setTimestamp(TIMESTAMP);
- model.setValue(VALUE);
- return model;
- }
-
- protected void checkModel(CellModel model) {
- assertTrue(Bytes.equals(model.getColumn(), COLUMN));
- assertTrue(Bytes.equals(model.getValue(), VALUE));
- assertTrue(model.hasUserTimestamp());
- assertEquals(model.getTimestamp(), TIMESTAMP);
- }
-
- public void testBuildModel() throws Exception {
- checkModel(buildTestModel());
- }
-
- public void testFromXML() throws Exception {
- checkModel(fromXML(AS_XML));
- }
-
- public void testFromPB() throws Exception {
- checkModel(fromPB(AS_PB));
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java
deleted file mode 100644
index 716da14..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.util.Iterator;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import junit.framework.TestCase;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestCellSetModel extends TestModelBase<CellSetModel> {
-
- private static final byte[] ROW1 = Bytes.toBytes("testrow1");
- private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1");
- private static final byte[] VALUE1 = Bytes.toBytes("testvalue1");
- private static final long TIMESTAMP1 = 1245219839331L;
- private static final byte[] ROW2 = Bytes.toBytes("testrow1");
- private static final byte[] COLUMN2 = Bytes.toBytes("testcolumn2");
- private static final byte[] VALUE2 = Bytes.toBytes("testvalue2");
- private static final long TIMESTAMP2 = 1245239813319L;
- private static final byte[] COLUMN3 = Bytes.toBytes("testcolumn3");
- private static final byte[] VALUE3 = Bytes.toBytes("testvalue3");
- private static final long TIMESTAMP3 = 1245393318192L;
-
- public TestCellSetModel() throws Exception {
- super(CellSetModel.class);
- AS_XML =
- "<CellSet>" +
- "<Row key=\"dGVzdHJvdzE=\">" +
- "<Cell timestamp=\"1245219839331\" column=\"dGVzdGNvbHVtbjE=\">" +
- "dGVzdHZhbHVlMQ==</Cell>" +
- "</Row>" +
- "<Row key=\"dGVzdHJvdzE=\">" +
- "<Cell timestamp=\"1245239813319\" column=\"dGVzdGNvbHVtbjI=\">" +
- "dGVzdHZhbHVlMg==</Cell>" +
- "<Cell timestamp=\"1245393318192\" column=\"dGVzdGNvbHVtbjM=\">" +
- "dGVzdHZhbHVlMw==</Cell>" +
- "</Row>" +
- "</CellSet>";
-
- AS_PB =
- "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" +
- "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" +
- "Igp0ZXN0dmFsdWUz";
-
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><CellSet>" +
- "<Row key=\"dGVzdHJvdzE=\"><Cell column=\"dGVzdGNvbHVtbjE=\" timestamp=\"1245219839331\">" +
- "dGVzdHZhbHVlMQ==</Cell></Row><Row key=\"dGVzdHJvdzE=\">" +
- "<Cell column=\"dGVzdGNvbHVtbjI=\" timestamp=\"1245239813319\">" +
- "dGVzdHZhbHVlMg==</Cell>" +
- "<Cell column=\"dGVzdGNvbHVtbjM=\" timestamp=\"1245393318192\">dGVzdHZhbHVlMw==</Cell>" +
- "</Row></CellSet>";
-
- AS_JSON =
- "{\"Row\":[{\"key\":\"dGVzdHJvdzE=\"," +
- "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\",\"timestamp\":1245219839331," +
- "\"$\":\"dGVzdHZhbHVlMQ==\"}]},{\"key\":\"dGVzdHJvdzE=\"," +
- "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjI=\",\"timestamp\":1245239813319," +
- "\"$\":\"dGVzdHZhbHVlMg==\"},{\"column\":\"dGVzdGNvbHVtbjM=\"," +
- "\"timestamp\":1245393318192,\"$\":\"dGVzdHZhbHVlMw==\"}]}]}";
- }
-
- protected CellSetModel buildTestModel() {
- CellSetModel model = new CellSetModel();
- RowModel row;
- row = new RowModel();
- row.setKey(ROW1);
- row.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1));
- model.addRow(row);
- row = new RowModel();
- row.setKey(ROW2);
- row.addCell(new CellModel(COLUMN2, TIMESTAMP2, VALUE2));
- row.addCell(new CellModel(COLUMN3, TIMESTAMP3, VALUE3));
- model.addRow(row);
- return model;
- }
-
- protected void checkModel(CellSetModel model) {
- Iterator<RowModel> rows = model.getRows().iterator();
- RowModel row = rows.next();
- assertTrue(Bytes.equals(ROW1, row.getKey()));
- Iterator<CellModel> cells = row.getCells().iterator();
- CellModel cell = cells.next();
- assertTrue(Bytes.equals(COLUMN1, cell.getColumn()));
- assertTrue(Bytes.equals(VALUE1, cell.getValue()));
- assertTrue(cell.hasUserTimestamp());
- assertEquals(cell.getTimestamp(), TIMESTAMP1);
- assertFalse(cells.hasNext());
- row = rows.next();
- assertTrue(Bytes.equals(ROW2, row.getKey()));
- cells = row.getCells().iterator();
- cell = cells.next();
- assertTrue(Bytes.equals(COLUMN2, cell.getColumn()));
- assertTrue(Bytes.equals(VALUE2, cell.getValue()));
- assertTrue(cell.hasUserTimestamp());
- assertEquals(cell.getTimestamp(), TIMESTAMP2);
- cell = cells.next();
- assertTrue(Bytes.equals(COLUMN3, cell.getColumn()));
- assertTrue(Bytes.equals(VALUE3, cell.getValue()));
- assertTrue(cell.hasUserTimestamp());
- assertEquals(cell.getTimestamp(), TIMESTAMP3);
- assertFalse(cells.hasNext());
- }
-
- public void testBuildModel() throws Exception {
- checkModel(buildTestModel());
- }
-
- public void testFromXML() throws Exception {
- checkModel(fromXML(AS_XML));
- }
-
- public void testFromPB() throws Exception {
- checkModel(fromPB(AS_PB));
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java
deleted file mode 100644
index 15e1652..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.StringReader;
-import java.io.StringWriter;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import junit.framework.TestCase;
-import org.apache.hadoop.hbase.SmallTests;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestColumnSchemaModel extends TestModelBase<ColumnSchemaModel> {
-
- protected static final String COLUMN_NAME = "testcolumn";
- protected static final boolean BLOCKCACHE = true;
- protected static final int BLOCKSIZE = 16384;
- protected static final String BLOOMFILTER = "NONE";
- protected static final String COMPRESSION = "GZ";
- protected static final boolean IN_MEMORY = false;
- protected static final int TTL = 86400;
- protected static final int VERSIONS = 1;
-
- public TestColumnSchemaModel() throws Exception {
- super(ColumnSchemaModel.class);
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><ColumnSchema " +
- "name=\"testcolumn\" BLOCKSIZE=\"16384\" BLOOMFILTER=\"NONE\" BLOCKCACHE=\"true\" " +
- "COMPRESSION=\"GZ\" VERSIONS=\"1\" TTL=\"86400\" IN_MEMORY=\"false\"/>";
-
- AS_JSON =
- "{\"name\":\"testcolumn\",\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\"," +
- "\"BLOCKCACHE\":\"true\",\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\"," +
- "\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}";
- }
-
- protected ColumnSchemaModel buildTestModel() {
- ColumnSchemaModel model = new ColumnSchemaModel();
- model.setName(COLUMN_NAME);
- model.__setBlocksize(BLOCKSIZE);
- model.__setBloomfilter(BLOOMFILTER);
- model.__setBlockcache(BLOCKCACHE);
- model.__setCompression(COMPRESSION);
- model.__setVersions(VERSIONS);
- model.__setTTL(TTL);
- model.__setInMemory(IN_MEMORY);
- return model;
- }
-
- protected void checkModel(ColumnSchemaModel model) {
- assertEquals(model.getName(), COLUMN_NAME);
- assertEquals(model.__getBlockcache(), BLOCKCACHE);
- assertEquals(model.__getBlocksize(), BLOCKSIZE);
- assertEquals(model.__getBloomfilter(), BLOOMFILTER);
- assertTrue(model.__getCompression().equalsIgnoreCase(COMPRESSION));
- assertEquals(model.__getInMemory(), IN_MEMORY);
- assertEquals(model.__getTTL(), TTL);
- assertEquals(model.__getVersions(), VERSIONS);
- }
-
- public void testFromPB() throws Exception {
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java
deleted file mode 100644
index 500d924..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import junit.framework.TestCase;
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.rest.provider.JAXBContextResolver;
-import org.apache.hadoop.hbase.util.Base64;
-import org.codehaus.jackson.jaxrs.JacksonJaxbJsonProvider;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.node.ObjectNode;
-import org.junit.experimental.categories.Category;
-
-import javax.ws.rs.core.MediaType;
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-
-@Category(SmallTests.class)
-public abstract class TestModelBase<T> extends TestCase {
-
- protected String AS_XML;
-
- protected String AS_PB;
-
- protected String AS_JSON;
-
- protected JAXBContext context;
-
- protected Class<?> clazz;
-
- protected ObjectMapper mapper;
-
- protected TestModelBase(Class<?> clazz) throws Exception {
- super();
- this.clazz = clazz;
- context = new JAXBContextResolver().getContext(clazz);
- mapper = new JacksonJaxbJsonProvider().locateMapper(clazz,
- MediaType.APPLICATION_JSON_TYPE);
- }
-
- protected abstract T buildTestModel();
-
- @SuppressWarnings("unused")
- protected String toXML(T model) throws JAXBException {
- StringWriter writer = new StringWriter();
- context.createMarshaller().marshal(model, writer);
- return writer.toString();
- }
-
- protected String toJSON(T model) throws JAXBException, IOException {
- StringWriter writer = new StringWriter();
- mapper.writeValue(writer, model);
-// original marshaller, uncomment this and comment mapper to verify backward compatibility
-// ((JSONJAXBContext)context).createJSONMarshaller().marshallToJSON(model, writer);
- return writer.toString();
- }
-
- public T fromJSON(String json) throws JAXBException, IOException {
- return (T)
- mapper.readValue(json, clazz);
- }
-
- public T fromXML(String xml) throws JAXBException {
- return (T)
- context.createUnmarshaller().unmarshal(new StringReader(xml));
- }
-
- @SuppressWarnings("unused")
- protected byte[] toPB(ProtobufMessageHandler model) {
- return model.createProtobufOutput();
- }
-
- protected T fromPB(String pb) throws
- Exception {
- return (T)clazz.getMethod("getObjectFromMessage", byte[].class).invoke(
- clazz.newInstance(),
- Base64.decode(AS_PB));
- }
-
- protected abstract void checkModel(T model);
-
- public void testBuildModel() throws Exception {
- checkModel(buildTestModel());
- }
-
- public void testFromPB() throws Exception {
- checkModel(fromPB(AS_PB));
- }
-
- public void testFromXML() throws Exception {
- checkModel(fromXML(AS_XML));
- }
-
- public void testToXML() throws Exception {
- assertEquals(AS_XML, toXML(buildTestModel()));
- }
-
- public void testToJSON() throws Exception {
- try {
- ObjectNode expObj = mapper.readValue(AS_JSON, ObjectNode.class);
- ObjectNode actObj = mapper.readValue(toJSON(buildTestModel()), ObjectNode.class);
- assertEquals(expObj, actObj);
- } catch(Exception e) {
- assertEquals(AS_JSON, toJSON(buildTestModel()));
- }
- }
-
- public void testFromJSON() throws Exception {
- checkModel(fromJSON(AS_JSON));
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java
deleted file mode 100644
index e0068c8..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.util.Iterator;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import junit.framework.TestCase;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestRowModel extends TestModelBase<RowModel> {
-
- private static final byte[] ROW1 = Bytes.toBytes("testrow1");
- private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1");
- private static final byte[] VALUE1 = Bytes.toBytes("testvalue1");
- private static final long TIMESTAMP1 = 1245219839331L;
-
- private JAXBContext context;
-
- public TestRowModel() throws Exception {
- super(RowModel.class);
- AS_XML =
- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Row key=\"dGVzdHJvdzE=\">" +
- "<Cell column=\"dGVzdGNvbHVtbjE=\" timestamp=\"1245219839331\">dGVzdHZhbHVlMQ==</Cell></Row>";
-
- AS_JSON =
- "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," +
- "\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVlMQ==\"}]}";
- }
-
- protected RowModel buildTestModel() {
- RowModel model = new RowModel();
- model.setKey(ROW1);
- model.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1));
- return model;
- }
-
- protected void checkModel(RowModel model) {
- assertTrue(Bytes.equals(ROW1, model.getKey()));
- Iterator<CellModel> cells = model.getCells().iterator();
- CellModel cell = cells.next();
- assertTrue(Bytes.equals(COLUMN1, cell.getColumn()));
- assertTrue(Bytes.equals(VALUE1, cell.getValue()));
- assertTrue(cell.hasUserTimestamp());
- assertEquals(cell.getTimestamp(), TIMESTAMP1);
- assertFalse(cells.hasNext());
- }
-
- @Override
- public void testFromPB() throws Exception {
- //do nothing row model has no PB
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
deleted file mode 100644
index 988872e..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestScannerModel extends TestModelBase<ScannerModel> {
- private static final String PRIVATE = "private";
- private static final String PUBLIC = "public";
- private static final byte[] START_ROW = Bytes.toBytes("abracadabra");
- private static final byte[] END_ROW = Bytes.toBytes("zzyzx");
- private static final byte[] COLUMN1 = Bytes.toBytes("column1");
- private static final byte[] COLUMN2 = Bytes.toBytes("column2:foo");
- private static final long START_TIME = 1245219839331L;
- private static final long END_TIME = 1245393318192L;
- private static final int CACHING = 1000;
- private static final int BATCH = 100;
- private static final boolean CACHE_BLOCKS = false;
-
- public TestScannerModel() throws Exception {
- super(ScannerModel.class);
- AS_XML = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"
- + "<Scanner batch=\"100\" cacheBlocks=\"false\" caching=\"1000\" endRow=\"enp5eng=\" "
- + "endTime=\"1245393318192\" maxVersions=\"2147483647\" startRow=\"YWJyYWNhZGFicmE=\" "
- + "startTime=\"1245219839331\">"
- + "<column>Y29sdW1uMQ==</column><column>Y29sdW1uMjpmb28=</column>"
- + "<labels>private</labels><labels>public</labels>"
- + "</Scanner>";
-
- AS_JSON = "{\"batch\":100,\"caching\":1000,\"cacheBlocks\":false,\"endRow\":\"enp5eng=\","
- + "\"endTime\":1245393318192,\"maxVersions\":2147483647,\"startRow\":\"YWJyYWNhZGFicmE=\","
- + "\"startTime\":1245219839331,\"column\":[\"Y29sdW1uMQ==\",\"Y29sdW1uMjpmb28=\"],"
- +"\"labels\":[\"private\",\"public\"]"
- +"}";
-
- AS_PB = "CgthYnJhY2FkYWJyYRIFenp5engaB2NvbHVtbjEaC2NvbHVtbjI6Zm9vIGQo47qL554kMLDi57mf"
- + "JDj/////B0joB1IHcHJpdmF0ZVIGcHVibGljWAA=";
- }
-
- protected ScannerModel buildTestModel() {
- ScannerModel model = new ScannerModel();
- model.setStartRow(START_ROW);
- model.setEndRow(END_ROW);
- model.addColumn(COLUMN1);
- model.addColumn(COLUMN2);
- model.setStartTime(START_TIME);
- model.setEndTime(END_TIME);
- model.setBatch(BATCH);
- model.setCaching(CACHING);
- model.addLabel(PRIVATE);
- model.addLabel(PUBLIC);
- model.setCacheBlocks(CACHE_BLOCKS);
- return model;
- }
-
- protected void checkModel(ScannerModel model) {
- assertTrue(Bytes.equals(model.getStartRow(), START_ROW));
- assertTrue(Bytes.equals(model.getEndRow(), END_ROW));
- boolean foundCol1 = false, foundCol2 = false;
- for (byte[] column : model.getColumns()) {
- if (Bytes.equals(column, COLUMN1)) {
- foundCol1 = true;
- } else if (Bytes.equals(column, COLUMN2)) {
- foundCol2 = true;
- }
- }
- assertTrue(foundCol1);
- assertTrue(foundCol2);
- assertEquals(model.getStartTime(), START_TIME);
- assertEquals(model.getEndTime(), END_TIME);
- assertEquals(model.getBatch(), BATCH);
- assertEquals(model.getCaching(), CACHING);
- assertEquals(model.getCacheBlocks(), CACHE_BLOCKS);
- boolean foundLabel1 = false;
- boolean foundLabel2 = false;
- if (model.getLabels() != null && model.getLabels().size() > 0) {
- for (String label : model.getLabels()) {
- if (label.equals(PRIVATE)) {
- foundLabel1 = true;
- } else if (label.equals(PUBLIC)) {
- foundLabel2 = true;
- }
- }
- assertTrue(foundLabel1);
- assertTrue(foundLabel2);
- }
- }
-
-}
[27/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java
new file mode 100644
index 0000000..20891a2
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java
@@ -0,0 +1,547 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: TableListMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class TableListMessage {
+ private TableListMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface TableListOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated string name = 1;
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ java.util.List<java.lang.String>
+ getNameList();
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ int getNameCount();
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ java.lang.String getName(int index);
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes(int index);
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableList}
+ */
+ public static final class TableList extends
+ com.google.protobuf.GeneratedMessage
+ implements TableListOrBuilder {
+ // Use TableList.newBuilder() to construct.
+ private TableList(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TableList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TableList defaultInstance;
+ public static TableList getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableList getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TableList(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ name_ = new com.google.protobuf.LazyStringArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ name_.add(input.readBytes());
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ name_ = new com.google.protobuf.UnmodifiableLazyStringList(name_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<TableList> PARSER =
+ new com.google.protobuf.AbstractParser<TableList>() {
+ public TableList parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TableList(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<TableList> getParserForType() {
+ return PARSER;
+ }
+
+ // repeated string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private com.google.protobuf.LazyStringList name_;
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public java.util.List<java.lang.String>
+ getNameList() {
+ return name_;
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public int getNameCount() {
+ return name_.size();
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public java.lang.String getName(int index) {
+ return name_.get(index);
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes(int index) {
+ return name_.getByteString(index);
+ }
+
+ private void initFields() {
+ name_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < name_.size(); i++) {
+ output.writeBytes(1, name_.getByteString(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ {
+ int dataSize = 0;
+ for (int i = 0; i < name_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(name_.getByteString(i));
+ }
+ size += dataSize;
+ size += 1 * getNameList().size();
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableList}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableListOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList(this);
+ int from_bitField0_ = bitField0_;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ name_ = new com.google.protobuf.UnmodifiableLazyStringList(
+ name_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.name_ = name_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.getDefaultInstance()) return this;
+ if (!other.name_.isEmpty()) {
+ if (name_.isEmpty()) {
+ name_ = other.name_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureNameIsMutable();
+ name_.addAll(other.name_);
+ }
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated string name = 1;
+ private com.google.protobuf.LazyStringList name_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ private void ensureNameIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ name_ = new com.google.protobuf.LazyStringArrayList(name_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public java.util.List<java.lang.String>
+ getNameList() {
+ return java.util.Collections.unmodifiableList(name_);
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public int getNameCount() {
+ return name_.size();
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public java.lang.String getName(int index) {
+ return name_.get(index);
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes(int index) {
+ return name_.getByteString(index);
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public Builder setName(
+ int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNameIsMutable();
+ name_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public Builder addName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNameIsMutable();
+ name_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public Builder addAllName(
+ java.lang.Iterable<java.lang.String> values) {
+ ensureNameIsMutable();
+ super.addAll(values, name_);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public Builder clearName() {
+ name_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string name = 1;</code>
+ */
+ public Builder addNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNameIsMutable();
+ name_.add(value);
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableList)
+ }
+
+ static {
+ defaultInstance = new TableList(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableList)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\026TableListMessage.proto\022/org.apache.had" +
+ "oop.hbase.rest.protobuf.generated\"\031\n\tTab" +
+ "leList\022\014\n\004name\030\001 \003(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor,
+ new java.lang.String[] { "Name", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
[28/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java
new file mode 100644
index 0000000..421c0ec
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java
@@ -0,0 +1,1802 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: TableInfoMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class TableInfoMessage {
+ private TableInfoMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface TableInfoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string name = 1;
+ /**
+ * <code>required string name = 1;</code>
+ */
+ boolean hasName();
+ /**
+ * <code>required string name = 1;</code>
+ */
+ java.lang.String getName();
+ /**
+ * <code>required string name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region>
+ getRegionsList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index);
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ int getRegionsCount();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>
+ getRegionsOrBuilderList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder getRegionsOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo}
+ */
+ public static final class TableInfo extends
+ com.google.protobuf.GeneratedMessage
+ implements TableInfoOrBuilder {
+ // Use TableInfo.newBuilder() to construct.
+ private TableInfo(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TableInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TableInfo defaultInstance;
+ public static TableInfo getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableInfo getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TableInfo(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region>();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ regions_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ regions_ = java.util.Collections.unmodifiableList(regions_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<TableInfo> PARSER =
+ new com.google.protobuf.AbstractParser<TableInfo>() {
+ public TableInfo parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TableInfo(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<TableInfo> getParserForType() {
+ return PARSER;
+ }
+
+ public interface RegionOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string name = 1;
+ /**
+ * <code>required string name = 1;</code>
+ */
+ boolean hasName();
+ /**
+ * <code>required string name = 1;</code>
+ */
+ java.lang.String getName();
+ /**
+ * <code>required string name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ // optional bytes startKey = 2;
+ /**
+ * <code>optional bytes startKey = 2;</code>
+ */
+ boolean hasStartKey();
+ /**
+ * <code>optional bytes startKey = 2;</code>
+ */
+ com.google.protobuf.ByteString getStartKey();
+
+ // optional bytes endKey = 3;
+ /**
+ * <code>optional bytes endKey = 3;</code>
+ */
+ boolean hasEndKey();
+ /**
+ * <code>optional bytes endKey = 3;</code>
+ */
+ com.google.protobuf.ByteString getEndKey();
+
+ // optional int64 id = 4;
+ /**
+ * <code>optional int64 id = 4;</code>
+ */
+ boolean hasId();
+ /**
+ * <code>optional int64 id = 4;</code>
+ */
+ long getId();
+
+ // optional string location = 5;
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ boolean hasLocation();
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ java.lang.String getLocation();
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ com.google.protobuf.ByteString
+ getLocationBytes();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region}
+ */
+ public static final class Region extends
+ com.google.protobuf.GeneratedMessage
+ implements RegionOrBuilder {
+ // Use Region.newBuilder() to construct.
+ private Region(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Region(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Region defaultInstance;
+ public static Region getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Region getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Region(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ startKey_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ endKey_ = input.readBytes();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ id_ = input.readInt64();
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000010;
+ location_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Region> PARSER =
+ new com.google.protobuf.AbstractParser<Region>() {
+ public Region parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Region(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Region> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional bytes startKey = 2;
+ public static final int STARTKEY_FIELD_NUMBER = 2;
+ private com.google.protobuf.ByteString startKey_;
+ /**
+ * <code>optional bytes startKey = 2;</code>
+ */
+ public boolean hasStartKey() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional bytes startKey = 2;</code>
+ */
+ public com.google.protobuf.ByteString getStartKey() {
+ return startKey_;
+ }
+
+ // optional bytes endKey = 3;
+ public static final int ENDKEY_FIELD_NUMBER = 3;
+ private com.google.protobuf.ByteString endKey_;
+ /**
+ * <code>optional bytes endKey = 3;</code>
+ */
+ public boolean hasEndKey() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional bytes endKey = 3;</code>
+ */
+ public com.google.protobuf.ByteString getEndKey() {
+ return endKey_;
+ }
+
+ // optional int64 id = 4;
+ public static final int ID_FIELD_NUMBER = 4;
+ private long id_;
+ /**
+ * <code>optional int64 id = 4;</code>
+ */
+ public boolean hasId() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int64 id = 4;</code>
+ */
+ public long getId() {
+ return id_;
+ }
+
+ // optional string location = 5;
+ public static final int LOCATION_FIELD_NUMBER = 5;
+ private java.lang.Object location_;
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ public boolean hasLocation() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ public java.lang.String getLocation() {
+ java.lang.Object ref = location_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ location_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ public com.google.protobuf.ByteString
+ getLocationBytes() {
+ java.lang.Object ref = location_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ location_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ name_ = "";
+ startKey_ = com.google.protobuf.ByteString.EMPTY;
+ endKey_ = com.google.protobuf.ByteString.EMPTY;
+ id_ = 0L;
+ location_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, startKey_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, endKey_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeInt64(4, id_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeBytes(5, getLocationBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, startKey_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, endKey_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(4, id_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(5, getLocationBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ startKey_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ endKey_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ id_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ location_ = "";
+ bitField0_ = (bitField0_ & ~0x00000010);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.startKey_ = startKey_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.endKey_ = endKey_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.id_ = id_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.location_ = location_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ bitField0_ |= 0x00000001;
+ name_ = other.name_;
+ onChanged();
+ }
+ if (other.hasStartKey()) {
+ setStartKey(other.getStartKey());
+ }
+ if (other.hasEndKey()) {
+ setEndKey(other.getEndKey());
+ }
+ if (other.hasId()) {
+ setId(other.getId());
+ }
+ if (other.hasLocation()) {
+ bitField0_ |= 0x00000010;
+ location_ = other.location_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasName()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string name = 1;
+ private java.lang.Object name_ = "";
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional bytes startKey = 2;
+ private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes startKey = 2;</code>
+ */
+ public boolean hasStartKey() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional bytes startKey = 2;</code>
+ */
+ public com.google.protobuf.ByteString getStartKey() {
+ return startKey_;
+ }
+ /**
+ * <code>optional bytes startKey = 2;</code>
+ */
+ public Builder setStartKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ startKey_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes startKey = 2;</code>
+ */
+ public Builder clearStartKey() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ startKey_ = getDefaultInstance().getStartKey();
+ onChanged();
+ return this;
+ }
+
+ // optional bytes endKey = 3;
+ private com.google.protobuf.ByteString endKey_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes endKey = 3;</code>
+ */
+ public boolean hasEndKey() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional bytes endKey = 3;</code>
+ */
+ public com.google.protobuf.ByteString getEndKey() {
+ return endKey_;
+ }
+ /**
+ * <code>optional bytes endKey = 3;</code>
+ */
+ public Builder setEndKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ endKey_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes endKey = 3;</code>
+ */
+ public Builder clearEndKey() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ endKey_ = getDefaultInstance().getEndKey();
+ onChanged();
+ return this;
+ }
+
+ // optional int64 id = 4;
+ private long id_ ;
+ /**
+ * <code>optional int64 id = 4;</code>
+ */
+ public boolean hasId() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int64 id = 4;</code>
+ */
+ public long getId() {
+ return id_;
+ }
+ /**
+ * <code>optional int64 id = 4;</code>
+ */
+ public Builder setId(long value) {
+ bitField0_ |= 0x00000008;
+ id_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 id = 4;</code>
+ */
+ public Builder clearId() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ id_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional string location = 5;
+ private java.lang.Object location_ = "";
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ public boolean hasLocation() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ public java.lang.String getLocation() {
+ java.lang.Object ref = location_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ location_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ public com.google.protobuf.ByteString
+ getLocationBytes() {
+ java.lang.Object ref = location_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ location_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ public Builder setLocation(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ location_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ public Builder clearLocation() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ location_ = getDefaultInstance().getLocation();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string location = 5;</code>
+ */
+ public Builder setLocationBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ location_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region)
+ }
+
+ static {
+ defaultInstance = new Region(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region)
+ }
+
+ private int bitField0_;
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
+ public static final int REGIONS_FIELD_NUMBER = 2;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> regions_;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> getRegionsList() {
+ return regions_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>
+ getRegionsOrBuilderList() {
+ return regions_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public int getRegionsCount() {
+ return regions_.size();
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
+ return regions_.get(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder getRegionsOrBuilder(
+ int index) {
+ return regions_.get(index);
+ }
+
+ private void initFields() {
+ name_ = "";
+ regions_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getRegionsCount(); i++) {
+ if (!getRegions(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ for (int i = 0; i < regions_.size(); i++) {
+ output.writeMessage(2, regions_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ for (int i = 0; i < regions_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, regions_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getRegionsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (regionsBuilder_ == null) {
+ regions_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ regionsBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (regionsBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ regions_ = java.util.Collections.unmodifiableList(regions_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.regions_ = regions_;
+ } else {
+ result.regions_ = regionsBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ bitField0_ |= 0x00000001;
+ name_ = other.name_;
+ onChanged();
+ }
+ if (regionsBuilder_ == null) {
+ if (!other.regions_.isEmpty()) {
+ if (regions_.isEmpty()) {
+ regions_ = other.regions_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureRegionsIsMutable();
+ regions_.addAll(other.regions_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.regions_.isEmpty()) {
+ if (regionsBuilder_.isEmpty()) {
+ regionsBuilder_.dispose();
+ regionsBuilder_ = null;
+ regions_ = other.regions_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ regionsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRegionsFieldBuilder() : null;
+ } else {
+ regionsBuilder_.addAllMessages(other.regions_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasName()) {
+
+ return false;
+ }
+ for (int i = 0; i < getRegionsCount(); i++) {
+ if (!getRegions(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string name = 1;
+ private java.lang.Object name_ = "";
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> regions_ =
+ java.util.Collections.emptyList();
+ private void ensureRegionsIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region>(regions_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder> regionsBuilder_;
+
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> getRegionsList() {
+ if (regionsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(regions_);
+ } else {
+ return regionsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public int getRegionsCount() {
+ if (regionsBuilder_ == null) {
+ return regions_.size();
+ } else {
+ return regionsBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
+ if (regionsBuilder_ == null) {
+ return regions_.get(index);
+ } else {
+ return regionsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public Builder setRegions(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
+ if (regionsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionsIsMutable();
+ regions_.set(index, value);
+ onChanged();
+ } else {
+ regionsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public Builder setRegions(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
+ if (regionsBuilder_ == null) {
+ ensureRegionsIsMutable();
+ regions_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public Builder addRegions(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
+ if (regionsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionsIsMutable();
+ regions_.add(value);
+ onChanged();
+ } else {
+ regionsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public Builder addRegions(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
+ if (regionsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionsIsMutable();
+ regions_.add(index, value);
+ onChanged();
+ } else {
+ regionsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public Builder addRegions(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
+ if (regionsBuilder_ == null) {
+ ensureRegionsIsMutable();
+ regions_.add(builderForValue.build());
+ onChanged();
+ } else {
+ regionsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public Builder addRegions(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
+ if (regionsBuilder_ == null) {
+ ensureRegionsIsMutable();
+ regions_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public Builder addAllRegions(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> values) {
+ if (regionsBuilder_ == null) {
+ ensureRegionsIsMutable();
+ super.addAll(values, regions_);
+ onChanged();
+ } else {
+ regionsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public Builder clearRegions() {
+ if (regionsBuilder_ == null) {
+ regions_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ regionsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public Builder removeRegions(int index) {
+ if (regionsBuilder_ == null) {
+ ensureRegionsIsMutable();
+ regions_.remove(index);
+ onChanged();
+ } else {
+ regionsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder getRegionsBuilder(
+ int index) {
+ return getRegionsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder getRegionsOrBuilder(
+ int index) {
+ if (regionsBuilder_ == null) {
+ return regions_.get(index); } else {
+ return regionsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>
+ getRegionsOrBuilderList() {
+ if (regionsBuilder_ != null) {
+ return regionsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(regions_);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder addRegionsBuilder() {
+ return getRegionsFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder addRegionsBuilder(
+ int index) {
+ return getRegionsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder>
+ getRegionsBuilderList() {
+ return getRegionsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>
+ getRegionsFieldBuilder() {
+ if (regionsBuilder_ == null) {
+ regionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>(
+ regions_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ regions_ = null;
+ }
+ return regionsBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo)
+ }
+
+ static {
+ defaultInstance = new TableInfo(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\026TableInfoMessage.proto\022/org.apache.had" +
+ "oop.hbase.rest.protobuf.generated\"\305\001\n\tTa" +
+ "bleInfo\022\014\n\004name\030\001 \002(\t\022R\n\007regions\030\002 \003(\0132A" +
+ ".org.apache.hadoop.hbase.rest.protobuf.g" +
+ "enerated.TableInfo.Region\032V\n\006Region\022\014\n\004n" +
+ "ame\030\001 \002(\t\022\020\n\010startKey\030\002 \001(\014\022\016\n\006endKey\030\003 " +
+ "\001(\014\022\n\n\002id\030\004 \001(\003\022\020\n\010location\030\005 \001(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor,
+ new java.lang.String[] { "Name", "Regions", });
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor =
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor,
+ new java.lang.String[] { "Name", "StartKey", "EndKey", "Id", "Location", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
[08/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java
deleted file mode 100644
index 20891a2..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java
+++ /dev/null
@@ -1,547 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: TableListMessage.proto
-
-package org.apache.hadoop.hbase.rest.protobuf.generated;
-
-public final class TableListMessage {
- private TableListMessage() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- public interface TableListOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // repeated string name = 1;
- /**
- * <code>repeated string name = 1;</code>
- */
- java.util.List<java.lang.String>
- getNameList();
- /**
- * <code>repeated string name = 1;</code>
- */
- int getNameCount();
- /**
- * <code>repeated string name = 1;</code>
- */
- java.lang.String getName(int index);
- /**
- * <code>repeated string name = 1;</code>
- */
- com.google.protobuf.ByteString
- getNameBytes(int index);
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableList}
- */
- public static final class TableList extends
- com.google.protobuf.GeneratedMessage
- implements TableListOrBuilder {
- // Use TableList.newBuilder() to construct.
- private TableList(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private TableList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final TableList defaultInstance;
- public static TableList getDefaultInstance() {
- return defaultInstance;
- }
-
- public TableList getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private TableList(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
- name_ = new com.google.protobuf.LazyStringArrayList();
- mutable_bitField0_ |= 0x00000001;
- }
- name_.add(input.readBytes());
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
- name_ = new com.google.protobuf.UnmodifiableLazyStringList(name_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.Builder.class);
- }
-
- public static com.google.protobuf.Parser<TableList> PARSER =
- new com.google.protobuf.AbstractParser<TableList>() {
- public TableList parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new TableList(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<TableList> getParserForType() {
- return PARSER;
- }
-
- // repeated string name = 1;
- public static final int NAME_FIELD_NUMBER = 1;
- private com.google.protobuf.LazyStringList name_;
- /**
- * <code>repeated string name = 1;</code>
- */
- public java.util.List<java.lang.String>
- getNameList() {
- return name_;
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public int getNameCount() {
- return name_.size();
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public java.lang.String getName(int index) {
- return name_.get(index);
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes(int index) {
- return name_.getByteString(index);
- }
-
- private void initFields() {
- name_ = com.google.protobuf.LazyStringArrayList.EMPTY;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- for (int i = 0; i < name_.size(); i++) {
- output.writeBytes(1, name_.getByteString(i));
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- {
- int dataSize = 0;
- for (int i = 0; i < name_.size(); i++) {
- dataSize += com.google.protobuf.CodedOutputStream
- .computeBytesSizeNoTag(name_.getByteString(i));
- }
- size += dataSize;
- size += 1 * getNameList().size();
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableList}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableListOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = com.google.protobuf.LazyStringArrayList.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000001);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList(this);
- int from_bitField0_ = bitField0_;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- name_ = new com.google.protobuf.UnmodifiableLazyStringList(
- name_);
- bitField0_ = (bitField0_ & ~0x00000001);
- }
- result.name_ = name_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.getDefaultInstance()) return this;
- if (!other.name_.isEmpty()) {
- if (name_.isEmpty()) {
- name_ = other.name_;
- bitField0_ = (bitField0_ & ~0x00000001);
- } else {
- ensureNameIsMutable();
- name_.addAll(other.name_);
- }
- onChanged();
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // repeated string name = 1;
- private com.google.protobuf.LazyStringList name_ = com.google.protobuf.LazyStringArrayList.EMPTY;
- private void ensureNameIsMutable() {
- if (!((bitField0_ & 0x00000001) == 0x00000001)) {
- name_ = new com.google.protobuf.LazyStringArrayList(name_);
- bitField0_ |= 0x00000001;
- }
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public java.util.List<java.lang.String>
- getNameList() {
- return java.util.Collections.unmodifiableList(name_);
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public int getNameCount() {
- return name_.size();
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public java.lang.String getName(int index) {
- return name_.get(index);
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes(int index) {
- return name_.getByteString(index);
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public Builder setName(
- int index, java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureNameIsMutable();
- name_.set(index, value);
- onChanged();
- return this;
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public Builder addName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureNameIsMutable();
- name_.add(value);
- onChanged();
- return this;
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public Builder addAllName(
- java.lang.Iterable<java.lang.String> values) {
- ensureNameIsMutable();
- super.addAll(values, name_);
- onChanged();
- return this;
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public Builder clearName() {
- name_ = com.google.protobuf.LazyStringArrayList.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000001);
- onChanged();
- return this;
- }
- /**
- * <code>repeated string name = 1;</code>
- */
- public Builder addNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureNameIsMutable();
- name_.add(value);
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableList)
- }
-
- static {
- defaultInstance = new TableList(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableList)
- }
-
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\026TableListMessage.proto\022/org.apache.had" +
- "oop.hbase.rest.protobuf.generated\"\031\n\tTab" +
- "leList\022\014\n\004name\030\001 \003(\t"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor,
- new java.lang.String[] { "Name", });
- return null;
- }
- };
- com.google.protobuf.Descriptors.FileDescriptor
- .internalBuildGeneratedFileFrom(descriptorData,
- new com.google.protobuf.Descriptors.FileDescriptor[] {
- }, assigner);
- }
-
- // @@protoc_insertion_point(outer_class_scope)
-}
[19/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
deleted file mode 100644
index 2ce8ede..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * REST servlet container. It is used to get the remote request user
- * without going through @HttpContext, so that we can minimize code changes.
- */
-@InterfaceAudience.Private
-public class RESTServletContainer extends ServletContainer {
- private static final long serialVersionUID = -2474255003443394314L;
-
- /**
- * This container is used only if authentication and
- * impersonation is enabled. The remote request user is used
- * as a proxy user for impersonation in invoking any REST service.
- */
- @Override
- public void service(final HttpServletRequest request,
- final HttpServletResponse response) throws ServletException, IOException {
- final String doAsUserFromQuery = request.getParameter("doAs");
- RESTServlet servlet = RESTServlet.getInstance();
- if (doAsUserFromQuery != null) {
- Configuration conf = servlet.getConfiguration();
- if (!servlet.supportsProxyuser()) {
- throw new ServletException("Support for proxyuser is not configured");
- }
- UserGroupInformation ugi = servlet.getRealUser();
- // create and attempt to authorize a proxy user (the client is attempting
- // to do proxy user)
- ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
- // validate the proxy user authorization
- try {
- ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
- } catch(AuthorizationException e) {
- throw new ServletException(e.getMessage());
- }
- servlet.setEffectiveUser(doAsUserFromQuery);
- } else {
- String effectiveUser = request.getRemoteUser();
- servlet.setEffectiveUser(effectiveUser);
- }
- super.service(request, response);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
deleted file mode 100644
index ddc2f56..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.Map;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.MetaScanner;
-import org.apache.hadoop.hbase.rest.model.TableInfoModel;
-import org.apache.hadoop.hbase.rest.model.TableRegionModel;
-
-@InterfaceAudience.Private
-public class RegionsResource extends ResourceBase {
- private static final Log LOG = LogFactory.getLog(RegionsResource.class);
-
- static CacheControl cacheControl;
- static {
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- TableResource tableResource;
-
- /**
- * Constructor
- * @param tableResource
- * @throws IOException
- */
- public RegionsResource(TableResource tableResource) throws IOException {
- super();
- this.tableResource = tableResource;
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- TableName tableName = TableName.valueOf(tableResource.getName());
- TableInfoModel model = new TableInfoModel(tableName.getNameAsString());
- Map<HRegionInfo,ServerName> regions = MetaScanner.allTableRegions(
- servlet.getConfiguration(), null, tableName, false);
- for (Map.Entry<HRegionInfo,ServerName> e: regions.entrySet()) {
- HRegionInfo hri = e.getKey();
- ServerName addr = e.getValue();
- model.add(
- new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(),
- hri.getStartKey(), hri.getEndKey(), addr.getHostAndPort()));
- }
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return response.build();
- } catch (TableNotFoundException e) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.NOT_FOUND)
- .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
- .build();
- } catch (IOException e) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.SERVICE_UNAVAILABLE)
- .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
- .build();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
deleted file mode 100644
index f71d848..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Response;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
-import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
-import org.apache.hadoop.util.StringUtils;
-
-@InterfaceAudience.Private
-public class ResourceBase implements Constants {
-
- RESTServlet servlet;
- Class<?> accessDeniedClazz;
-
- public ResourceBase() throws IOException {
- servlet = RESTServlet.getInstance();
- try {
- accessDeniedClazz = Class.forName("org.apache.hadoop.hbase.security.AccessDeniedException");
- } catch (ClassNotFoundException e) {
- }
- }
-
- protected Response processException(Throwable exp) {
- Throwable curr = exp;
- if(accessDeniedClazz != null) {
- //some access denied exceptions are buried
- while (curr != null) {
- if(accessDeniedClazz.isAssignableFrom(curr.getClass())) {
- throw new WebApplicationException(
- Response.status(Response.Status.FORBIDDEN)
- .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF +
- StringUtils.stringifyException(exp) + CRLF)
- .build());
- }
- curr = curr.getCause();
- }
- }
- //TableNotFound may also be buried one level deep
- if (exp instanceof TableNotFoundException ||
- exp.getCause() instanceof TableNotFoundException) {
- throw new WebApplicationException(
- Response.status(Response.Status.NOT_FOUND)
- .type(MIMETYPE_TEXT).entity("Not found" + CRLF +
- StringUtils.stringifyException(exp) + CRLF)
- .build());
- }
- if (exp instanceof NoSuchColumnFamilyException){
- throw new WebApplicationException(
- Response.status(Response.Status.NOT_FOUND)
- .type(MIMETYPE_TEXT).entity("Not found" + CRLF +
- StringUtils.stringifyException(exp) + CRLF)
- .build());
- }
- if (exp instanceof RuntimeException) {
- throw new WebApplicationException(
- Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request" + CRLF +
- StringUtils.stringifyException(exp) + CRLF)
- .build());
- }
- if (exp instanceof RetriesExhaustedWithDetailsException) {
- RetriesExhaustedWithDetailsException retryException =
- (RetriesExhaustedWithDetailsException) exp;
- processException(retryException.getCause(0));
- }
- throw new WebApplicationException(
- Response.status(Response.Status.SERVICE_UNAVAILABLE)
- .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF +
- StringUtils.stringifyException(exp) + CRLF)
- .build());
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java
deleted file mode 100644
index d397399..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-import com.sun.jersey.api.core.PackagesResourceConfig;
-
-@InterfaceAudience.Private
-public class ResourceConfig extends PackagesResourceConfig {
- public ResourceConfig() {
- super("org.apache.hadoop.hbase.rest");
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java
deleted file mode 100644
index 989c59e..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.rest.model.ScannerModel;
-
-@InterfaceAudience.Private
-public abstract class ResultGenerator implements Iterator<Cell> {
-
- public static ResultGenerator fromRowSpec(final String table,
- final RowSpec rowspec, final Filter filter, final boolean cacheBlocks)
- throws IOException {
- if (rowspec.isSingleRow()) {
- return new RowResultGenerator(table, rowspec, filter, cacheBlocks);
- } else {
- return new ScannerResultGenerator(table, rowspec, filter, cacheBlocks);
- }
- }
-
- public static Filter buildFilter(final String filter) throws Exception {
- return ScannerModel.buildFilter(filter);
- }
-
- public abstract void putBack(Cell kv);
-
- public abstract void close();
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
deleted file mode 100644
index c425e84..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.rest.model.TableListModel;
-import org.apache.hadoop.hbase.rest.model.TableModel;
-
-@Path("/")
-@InterfaceAudience.Private
-public class RootResource extends ResourceBase {
- private static final Log LOG = LogFactory.getLog(RootResource.class);
-
- static CacheControl cacheControl;
- static {
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- /**
- * Constructor
- * @throws IOException
- */
- public RootResource() throws IOException {
- super();
- }
-
- private final TableListModel getTableList() throws IOException {
- TableListModel tableList = new TableListModel();
- TableName[] tableNames = servlet.getAdmin().listTableNames();
- for (TableName name: tableNames) {
- tableList.add(new TableModel(name.getNameAsString()));
- }
- return tableList;
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- ResponseBuilder response = Response.ok(getTableList());
- response.cacheControl(cacheControl);
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return response.build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return processException(e);
- }
- }
-
- @Path("status/cluster")
- public StorageClusterStatusResource getClusterStatusResource()
- throws IOException {
- return new StorageClusterStatusResource();
- }
-
- @Path("version")
- public VersionResource getVersionResource() throws IOException {
- return new VersionResource();
- }
-
- @Path("{table}")
- public TableResource getTableResource(
- final @PathParam("table") String table) throws IOException {
- return new TableResource(table);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
deleted file mode 100644
index 7db5328..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Private
-public class RowResource extends ResourceBase {
- private static final Log LOG = LogFactory.getLog(RowResource.class);
-
- static final String CHECK_PUT = "put";
- static final String CHECK_DELETE = "delete";
-
- TableResource tableResource;
- RowSpec rowspec;
- private String check = null;
-
- /**
- * Constructor
- * @param tableResource
- * @param rowspec
- * @param versions
- * @throws IOException
- */
- public RowResource(TableResource tableResource, String rowspec,
- String versions, String check) throws IOException {
- super();
- this.tableResource = tableResource;
- this.rowspec = new RowSpec(rowspec);
- if (versions != null) {
- this.rowspec.setMaxVersions(Integer.valueOf(versions));
- }
- this.check = check;
- }
-
- @GET
- @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
- try {
- ResultGenerator generator =
- ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null,
- !params.containsKey(NOCACHE_PARAM_NAME));
- if (!generator.hasNext()) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.NOT_FOUND)
- .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
- .build();
- }
- int count = 0;
- CellSetModel model = new CellSetModel();
- Cell value = generator.next();
- byte[] rowKey = CellUtil.cloneRow(value);
- RowModel rowModel = new RowModel(rowKey);
- do {
- if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
- model.addRow(rowModel);
- rowKey = CellUtil.cloneRow(value);
- rowModel = new RowModel(rowKey);
- }
- rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value),
- value.getTimestamp(), CellUtil.cloneValue(value)));
- if (++count > rowspec.getMaxValues()) {
- break;
- }
- value = generator.next();
- } while (value != null);
- model.addRow(rowModel);
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return Response.ok(model).build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return processException(e);
- }
- }
-
- @GET
- @Produces(MIMETYPE_BINARY)
- public Response getBinary(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
- }
- servlet.getMetrics().incrementRequests(1);
- // doesn't make sense to use a non specific coordinate as this can only
- // return a single cell
- if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
- .entity("Bad request: Either 0 or more than 1 columns specified." + CRLF).build();
- }
- MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
- try {
- ResultGenerator generator =
- ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null,
- !params.containsKey(NOCACHE_PARAM_NAME));
- if (!generator.hasNext()) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.NOT_FOUND)
- .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
- .build();
- }
- Cell value = generator.next();
- ResponseBuilder response = Response.ok(CellUtil.cloneValue(value));
- response.header("X-Timestamp", value.getTimestamp());
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return response.build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return processException(e);
- }
- }
-
- Response update(final CellSetModel model, final boolean replace) {
- servlet.getMetrics().incrementRequests(1);
- if (servlet.isReadOnly()) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response.status(Response.Status.FORBIDDEN)
- .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
- .build();
- }
-
- if (CHECK_PUT.equalsIgnoreCase(check)) {
- return checkAndPut(model);
- } else if (CHECK_DELETE.equalsIgnoreCase(check)) {
- return checkAndDelete(model);
- } else if (check != null && check.length() > 0) {
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Invalid check value '" + check + "'" + CRLF)
- .build();
- }
-
- HTableInterface table = null;
- try {
- List<RowModel> rows = model.getRows();
- List<Put> puts = new ArrayList<Put>();
- for (RowModel row: rows) {
- byte[] key = row.getKey();
- if (key == null) {
- key = rowspec.getRow();
- }
- if (key == null) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF)
- .build();
- }
- Put put = new Put(key);
- int i = 0;
- for (CellModel cell: row.getCells()) {
- byte[] col = cell.getColumn();
- if (col == null) try {
- col = rowspec.getColumns()[i++];
- } catch (ArrayIndexOutOfBoundsException e) {
- col = null;
- }
- if (col == null) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
- .build();
- }
- byte [][] parts = KeyValue.parseColumn(col);
- if (parts.length != 2) {
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
- .build();
- }
- put.addImmutable(parts[0], parts[1], cell.getTimestamp(), cell.getValue());
- }
- puts.add(put);
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + put.toString());
- }
- }
- table = servlet.getTable(tableResource.getName());
- table.put(puts);
- table.flushCommits();
- ResponseBuilder response = Response.ok();
- servlet.getMetrics().incrementSucessfulPutRequests(1);
- return response.build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return processException(e);
- } finally {
- if (table != null) try {
- table.close();
- } catch (IOException ioe) {
- LOG.debug("Exception received while closing the table", ioe);
- }
- }
- }
-
- // This currently supports only update of one row at a time.
- Response updateBinary(final byte[] message, final HttpHeaders headers,
- final boolean replace) {
- servlet.getMetrics().incrementRequests(1);
- if (servlet.isReadOnly()) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response.status(Response.Status.FORBIDDEN)
- .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
- .build();
- }
- HTableInterface table = null;
- try {
- byte[] row = rowspec.getRow();
- byte[][] columns = rowspec.getColumns();
- byte[] column = null;
- if (columns != null) {
- column = columns[0];
- }
- long timestamp = HConstants.LATEST_TIMESTAMP;
- List<String> vals = headers.getRequestHeader("X-Row");
- if (vals != null && !vals.isEmpty()) {
- row = Bytes.toBytes(vals.get(0));
- }
- vals = headers.getRequestHeader("X-Column");
- if (vals != null && !vals.isEmpty()) {
- column = Bytes.toBytes(vals.get(0));
- }
- vals = headers.getRequestHeader("X-Timestamp");
- if (vals != null && !vals.isEmpty()) {
- timestamp = Long.valueOf(vals.get(0));
- }
- if (column == null) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
- .build();
- }
- Put put = new Put(row);
- byte parts[][] = KeyValue.parseColumn(column);
- if (parts.length != 2) {
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
- .build();
- }
- put.addImmutable(parts[0], parts[1], timestamp, message);
- table = servlet.getTable(tableResource.getName());
- table.put(put);
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + put.toString());
- }
- servlet.getMetrics().incrementSucessfulPutRequests(1);
- return Response.ok().build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return processException(e);
- } finally {
- if (table != null) try {
- table.close();
- } catch (IOException ioe) {
- LOG.debug(ioe);
- }
- }
- }
-
- @PUT
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response put(final CellSetModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath()
- + " " + uriInfo.getQueryParameters());
- }
- return update(model, true);
- }
-
- @PUT
- @Consumes(MIMETYPE_BINARY)
- public Response putBinary(final byte[] message,
- final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
- }
- return updateBinary(message, headers, true);
- }
-
- @POST
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response post(final CellSetModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("POST " + uriInfo.getAbsolutePath()
- + " " + uriInfo.getQueryParameters());
- }
- return update(model, false);
- }
-
- @POST
- @Consumes(MIMETYPE_BINARY)
- public Response postBinary(final byte[] message,
- final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY);
- }
- return updateBinary(message, headers, false);
- }
-
- @DELETE
- public Response delete(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("DELETE " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- if (servlet.isReadOnly()) {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- return Response.status(Response.Status.FORBIDDEN)
- .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
- .build();
- }
- Delete delete = null;
- if (rowspec.hasTimestamp())
- delete = new Delete(rowspec.getRow(), rowspec.getTimestamp());
- else
- delete = new Delete(rowspec.getRow());
-
- for (byte[] column: rowspec.getColumns()) {
- byte[][] split = KeyValue.parseColumn(column);
- if (rowspec.hasTimestamp()) {
- if (split.length == 1) {
- delete.deleteFamily(split[0], rowspec.getTimestamp());
- } else if (split.length == 2) {
- delete.deleteColumns(split[0], split[1], rowspec.getTimestamp());
- } else {
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
- .build();
- }
- } else {
- if (split.length == 1) {
- delete.deleteFamily(split[0]);
- } else if (split.length == 2) {
- delete.deleteColumns(split[0], split[1]);
- } else {
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
- .build();
- }
- }
- }
- HTableInterface table = null;
- try {
- table = servlet.getTable(tableResource.getName());
- table.delete(delete);
- servlet.getMetrics().incrementSucessfulDeleteRequests(1);
- if (LOG.isDebugEnabled()) {
- LOG.debug("DELETE " + delete.toString());
- }
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- return processException(e);
- } finally {
- if (table != null) try {
- table.close();
- } catch (IOException ioe) {
- LOG.debug(ioe);
- }
- }
- return Response.ok().build();
- }
-
- /**
- * Validates the input request parameters, parses columns from CellSetModel,
- * and invokes checkAndPut on HTable.
- *
- * @param model instance of CellSetModel
- * @return Response 200 OK, 304 Not modified, 400 Bad request
- */
- Response checkAndPut(final CellSetModel model) {
- HTableInterface table = null;
- try {
- table = servlet.getTable(tableResource.getName());
- if (model.getRows().size() != 1) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
- .entity("Bad request: Number of rows specified is not 1." + CRLF).build();
- }
-
- RowModel rowModel = model.getRows().get(0);
- byte[] key = rowModel.getKey();
- if (key == null) {
- key = rowspec.getRow();
- }
-
- List<CellModel> cellModels = rowModel.getCells();
- int cellModelCount = cellModels.size();
- if (key == null || cellModelCount <= 1) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response
- .status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT)
- .entity(
- "Bad request: Either row key is null or no data found for columns specified." + CRLF)
- .build();
- }
-
- Put put = new Put(key);
- boolean retValue;
- CellModel valueToCheckCell = cellModels.get(cellModelCount - 1);
- byte[] valueToCheckColumn = valueToCheckCell.getColumn();
- byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
- if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
- CellModel valueToPutCell = null;
- for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
- if(Bytes.equals(cellModels.get(i).getColumn(),
- valueToCheckCell.getColumn())) {
- valueToPutCell = cellModels.get(i);
- break;
- }
- }
- if (valueToPutCell == null) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
- .entity("Bad request: The column to put and check do not match." + CRLF).build();
- } else {
- put.addImmutable(valueToPutParts[0], valueToPutParts[1], valueToPutCell.getTimestamp(),
- valueToPutCell.getValue());
- retValue = table.checkAndPut(key, valueToPutParts[0], valueToPutParts[1],
- valueToCheckCell.getValue(), put);
- }
- } else {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF)
- .build();
- }
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("CHECK-AND-PUT " + put.toString() + ", returns " + retValue);
- }
- if (!retValue) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return Response.status(Response.Status.NOT_MODIFIED)
- .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF)
- .build();
- }
- table.flushCommits();
- ResponseBuilder response = Response.ok();
- servlet.getMetrics().incrementSucessfulPutRequests(1);
- return response.build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return processException(e);
- } finally {
- if (table != null) try {
- table.close();
- } catch (IOException ioe) {
- LOG.debug("Exception received while closing the table", ioe);
- }
- }
- }
-
- /**
- * Validates the input request parameters, parses columns from CellSetModel,
- * and invokes checkAndDelete on HTable.
- *
- * @param model instance of CellSetModel
- * @return Response 200 OK, 304 Not modified, 400 Bad request
- */
- Response checkAndDelete(final CellSetModel model) {
- HTableInterface table = null;
- Delete delete = null;
- try {
- table = servlet.getTable(tableResource.getName());
- if (model.getRows().size() != 1) {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
- .build();
- }
- RowModel rowModel = model.getRows().get(0);
- byte[] key = rowModel.getKey();
- if (key == null) {
- key = rowspec.getRow();
- }
- if (key == null) {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF)
- .build();
- }
-
- delete = new Delete(key);
- boolean retValue;
- CellModel valueToDeleteCell = rowModel.getCells().get(0);
- byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
- if (valueToDeleteColumn == null) {
- try {
- valueToDeleteColumn = rowspec.getColumns()[0];
- } catch (final ArrayIndexOutOfBoundsException e) {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF)
- .build();
- }
- }
- byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
- if (parts.length == 2) {
- if (parts[1].length != 0) {
- delete.deleteColumns(parts[0], parts[1]);
- retValue = table.checkAndDelete(key, parts[0], parts[1],
- valueToDeleteCell.getValue(), delete);
- } else {
- // The case of empty qualifier.
- delete.deleteColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
- retValue = table.checkAndDelete(key, parts[0], Bytes.toBytes(StringUtils.EMPTY),
- valueToDeleteCell.getValue(), delete);
- }
- } else {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF)
- .build();
- }
- delete.deleteColumns(parts[0], parts[1]);
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("CHECK-AND-DELETE " + delete.toString() + ", returns "
- + retValue);
- }
-
- if (!retValue) {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- return Response.status(Response.Status.NOT_MODIFIED)
- .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF)
- .build();
- }
- table.flushCommits();
- ResponseBuilder response = Response.ok();
- servlet.getMetrics().incrementSucessfulDeleteRequests(1);
- return response.build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- return processException(e);
- } finally {
- if (table != null) try {
- table.close();
- } catch (IOException ioe) {
- LOG.debug("Exception received while closing the table", ioe);
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
deleted file mode 100644
index b9492dd..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.util.StringUtils;
-
-@InterfaceAudience.Private
-public class RowResultGenerator extends ResultGenerator {
- private static final Log LOG = LogFactory.getLog(RowResultGenerator.class);
-
- private Iterator<Cell> valuesI;
- private Cell cache;
-
- public RowResultGenerator(final String tableName, final RowSpec rowspec,
- final Filter filter, final boolean cacheBlocks)
- throws IllegalArgumentException, IOException {
- HTableInterface table = RESTServlet.getInstance().getTable(tableName);
- try {
- Get get = new Get(rowspec.getRow());
- if (rowspec.hasColumns()) {
- for (byte[] col: rowspec.getColumns()) {
- byte[][] split = KeyValue.parseColumn(col);
- if (split.length == 1) {
- get.addFamily(split[0]);
- } else if (split.length == 2) {
- get.addColumn(split[0], split[1]);
- } else {
- throw new IllegalArgumentException("Invalid column specifier.");
- }
- }
- }
- get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
- get.setMaxVersions(rowspec.getMaxVersions());
- if (filter != null) {
- get.setFilter(filter);
- }
- get.setCacheBlocks(cacheBlocks);
- Result result = table.get(get);
- if (result != null && !result.isEmpty()) {
- valuesI = result.listCells().iterator();
- }
- } catch (DoNotRetryIOException e) {
- // Warn here because Stargate will return 404 in the case if multiple
- // column families were specified but one did not exist -- currently
- // HBase will fail the whole Get.
- // Specifying multiple columns in a URI should be uncommon usage but
- // help to avoid confusion by leaving a record of what happened here in
- // the log.
- LOG.warn(StringUtils.stringifyException(e));
- } finally {
- table.close();
- }
- }
-
- public void close() {
- }
-
- public boolean hasNext() {
- if (cache != null) {
- return true;
- }
- if (valuesI == null) {
- return false;
- }
- return valuesI.hasNext();
- }
-
- public Cell next() {
- if (cache != null) {
- Cell kv = cache;
- cache = null;
- return kv;
- }
- if (valuesI == null) {
- return null;
- }
- try {
- return valuesI.next();
- } catch (NoSuchElementException e) {
- return null;
- }
- }
-
- public void putBack(Cell kv) {
- this.cache = kv;
- }
-
- public void remove() {
- throw new UnsupportedOperationException("remove not supported");
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
deleted file mode 100644
index b6c1ca8..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.TreeSet;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Parses a path based row/column/timestamp specification into its component
- * elements.
- * <p>
- *
- */
-@InterfaceAudience.Private
-public class RowSpec {
- public static final long DEFAULT_START_TIMESTAMP = 0;
- public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
-
- private byte[] row = HConstants.EMPTY_START_ROW;
- private byte[] endRow = null;
- private TreeSet<byte[]> columns =
- new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
- private List<String> labels = new ArrayList<String>();
- private long startTime = DEFAULT_START_TIMESTAMP;
- private long endTime = DEFAULT_END_TIMESTAMP;
- private int maxVersions = 1;
- private int maxValues = Integer.MAX_VALUE;
-
- public RowSpec(String path) throws IllegalArgumentException {
- int i = 0;
- while (path.charAt(i) == '/') {
- i++;
- }
- i = parseRowKeys(path, i);
- i = parseColumns(path, i);
- i = parseTimestamp(path, i);
- i = parseQueryParams(path, i);
- }
-
- private int parseRowKeys(final String path, int i)
- throws IllegalArgumentException {
- String startRow = null, endRow = null;
- try {
- StringBuilder sb = new StringBuilder();
- char c;
- while (i < path.length() && (c = path.charAt(i)) != '/') {
- sb.append(c);
- i++;
- }
- i++;
- String row = startRow = sb.toString();
- int idx = startRow.indexOf(',');
- if (idx != -1) {
- startRow = URLDecoder.decode(row.substring(0, idx),
- HConstants.UTF8_ENCODING);
- endRow = URLDecoder.decode(row.substring(idx + 1),
- HConstants.UTF8_ENCODING);
- } else {
- startRow = URLDecoder.decode(row, HConstants.UTF8_ENCODING);
- }
- } catch (IndexOutOfBoundsException e) {
- throw new IllegalArgumentException(e);
- } catch (UnsupportedEncodingException e) {
- throw new RuntimeException(e);
- }
- // HBase does not support wildcards on row keys so we will emulate a
- // suffix glob by synthesizing appropriate start and end row keys for
- // table scanning
- if (startRow.charAt(startRow.length() - 1) == '*') {
- if (endRow != null)
- throw new IllegalArgumentException("invalid path: start row "+
- "specified with wildcard");
- this.row = Bytes.toBytes(startRow.substring(0,
- startRow.lastIndexOf("*")));
- this.endRow = new byte[this.row.length + 1];
- System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
- this.endRow[this.row.length] = (byte)255;
- } else {
- this.row = Bytes.toBytes(startRow.toString());
- if (endRow != null) {
- this.endRow = Bytes.toBytes(endRow.toString());
- }
- }
- return i;
- }
-
- private int parseColumns(final String path, int i) throws IllegalArgumentException {
- if (i >= path.length()) {
- return i;
- }
- try {
- char c;
- StringBuilder column = new StringBuilder();
- while (i < path.length() && (c = path.charAt(i)) != '/') {
- if (c == ',') {
- if (column.length() < 1) {
- throw new IllegalArgumentException("invalid path");
- }
- String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING);
- this.columns.add(Bytes.toBytes(s));
- column.setLength(0);
- i++;
- continue;
- }
- column.append(c);
- i++;
- }
- i++;
- // trailing list entry
- if (column.length() > 0) {
- String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING);
- this.columns.add(Bytes.toBytes(s));
- }
- } catch (IndexOutOfBoundsException e) {
- throw new IllegalArgumentException(e);
- } catch (UnsupportedEncodingException e) {
- // shouldn't happen
- throw new RuntimeException(e);
- }
- return i;
- }
-
- private int parseTimestamp(final String path, int i)
- throws IllegalArgumentException {
- if (i >= path.length()) {
- return i;
- }
- long time0 = 0, time1 = 0;
- try {
- char c = 0;
- StringBuilder stamp = new StringBuilder();
- while (i < path.length()) {
- c = path.charAt(i);
- if (c == '/' || c == ',') {
- break;
- }
- stamp.append(c);
- i++;
- }
- try {
- time0 = Long.valueOf(URLDecoder.decode(stamp.toString(),
- HConstants.UTF8_ENCODING));
- } catch (NumberFormatException e) {
- throw new IllegalArgumentException(e);
- }
- if (c == ',') {
- stamp = new StringBuilder();
- i++;
- while (i < path.length() && ((c = path.charAt(i)) != '/')) {
- stamp.append(c);
- i++;
- }
- try {
- time1 = Long.valueOf(URLDecoder.decode(stamp.toString(),
- HConstants.UTF8_ENCODING));
- } catch (NumberFormatException e) {
- throw new IllegalArgumentException(e);
- }
- }
- if (c == '/') {
- i++;
- }
- } catch (IndexOutOfBoundsException e) {
- throw new IllegalArgumentException(e);
- } catch (UnsupportedEncodingException e) {
- // shouldn't happen
- throw new RuntimeException(e);
- }
- if (time1 != 0) {
- startTime = time0;
- endTime = time1;
- } else {
- endTime = time0;
- }
- return i;
- }
-
- private int parseQueryParams(final String path, int i) {
- if (i >= path.length()) {
- return i;
- }
- StringBuilder query = new StringBuilder();
- try {
- query.append(URLDecoder.decode(path.substring(i),
- HConstants.UTF8_ENCODING));
- } catch (UnsupportedEncodingException e) {
- // should not happen
- throw new RuntimeException(e);
- }
- i += query.length();
- int j = 0;
- while (j < query.length()) {
- char c = query.charAt(j);
- if (c != '?' && c != '&') {
- break;
- }
- if (++j > query.length()) {
- throw new IllegalArgumentException("malformed query parameter");
- }
- char what = query.charAt(j);
- if (++j > query.length()) {
- break;
- }
- c = query.charAt(j);
- if (c != '=') {
- throw new IllegalArgumentException("malformed query parameter");
- }
- if (++j > query.length()) {
- break;
- }
- switch (what) {
- case 'm': {
- StringBuilder sb = new StringBuilder();
- while (j <= query.length()) {
- c = query.charAt(j);
- if (c < '0' || c > '9') {
- j--;
- break;
- }
- sb.append(c);
- }
- maxVersions = Integer.valueOf(sb.toString());
- } break;
- case 'n': {
- StringBuilder sb = new StringBuilder();
- while (j <= query.length()) {
- c = query.charAt(j);
- if (c < '0' || c > '9') {
- j--;
- break;
- }
- sb.append(c);
- }
- maxValues = Integer.valueOf(sb.toString());
- } break;
- default:
- throw new IllegalArgumentException("unknown parameter '" + c + "'");
- }
- }
- return i;
- }
-
- public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
- long startTime, long endTime, int maxVersions) {
- this.row = startRow;
- this.endRow = endRow;
- if (columns != null) {
- Collections.addAll(this.columns, columns);
- }
- this.startTime = startTime;
- this.endTime = endTime;
- this.maxVersions = maxVersions;
- }
-
- public RowSpec(byte[] startRow, byte[] endRow, Collection<byte[]> columns,
- long startTime, long endTime, int maxVersions, Collection<String> labels) {
- this(startRow, endRow, columns, startTime, endTime, maxVersions);
- if(labels != null) {
- this.labels.addAll(labels);
- }
- }
- public RowSpec(byte[] startRow, byte[] endRow, Collection<byte[]> columns,
- long startTime, long endTime, int maxVersions) {
- this.row = startRow;
- this.endRow = endRow;
- if (columns != null) {
- this.columns.addAll(columns);
- }
- this.startTime = startTime;
- this.endTime = endTime;
- this.maxVersions = maxVersions;
- }
-
- public boolean isSingleRow() {
- return endRow == null;
- }
-
- public int getMaxVersions() {
- return maxVersions;
- }
-
- public void setMaxVersions(final int maxVersions) {
- this.maxVersions = maxVersions;
- }
-
- public int getMaxValues() {
- return maxValues;
- }
-
- public void setMaxValues(final int maxValues) {
- this.maxValues = maxValues;
- }
-
- public boolean hasColumns() {
- return !columns.isEmpty();
- }
-
- public boolean hasLabels() {
- return !labels.isEmpty();
- }
-
- public byte[] getRow() {
- return row;
- }
-
- public byte[] getStartRow() {
- return row;
- }
-
- public boolean hasEndRow() {
- return endRow != null;
- }
-
- public byte[] getEndRow() {
- return endRow;
- }
-
- public void addColumn(final byte[] column) {
- columns.add(column);
- }
-
- public byte[][] getColumns() {
- return columns.toArray(new byte[columns.size()][]);
- }
-
- public List<String> getLabels() {
- return labels;
- }
-
- public boolean hasTimestamp() {
- return (startTime == 0) && (endTime != Long.MAX_VALUE);
- }
-
- public long getTimestamp() {
- return endTime;
- }
-
- public long getStartTime() {
- return startTime;
- }
-
- public void setStartTime(final long startTime) {
- this.startTime = startTime;
- }
-
- public long getEndTime() {
- return endTime;
- }
-
- public void setEndTime(long endTime) {
- this.endTime = endTime;
- }
-
- public String toString() {
- StringBuilder result = new StringBuilder();
- result.append("{startRow => '");
- if (row != null) {
- result.append(Bytes.toString(row));
- }
- result.append("', endRow => '");
- if (endRow != null) {
- result.append(Bytes.toString(endRow));
- }
- result.append("', columns => [");
- for (byte[] col: columns) {
- result.append(" '");
- result.append(Bytes.toString(col));
- result.append("'");
- }
- result.append(" ], startTime => ");
- result.append(Long.toString(startTime));
- result.append(", endTime => ");
- result.append(Long.toString(endTime));
- result.append(", maxVersions => ");
- result.append(Integer.toString(maxVersions));
- result.append(", maxValues => ");
- result.append(Integer.toString(maxValues));
- result.append("}");
- return result.toString();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
deleted file mode 100644
index ffb2fae..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Private
-public class ScannerInstanceResource extends ResourceBase {
- private static final Log LOG =
- LogFactory.getLog(ScannerInstanceResource.class);
-
- static CacheControl cacheControl;
- static {
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- ResultGenerator generator = null;
- String id = null;
- int batch = 1;
-
- public ScannerInstanceResource() throws IOException { }
-
- public ScannerInstanceResource(String table, String id,
- ResultGenerator generator, int batch) throws IOException {
- this.id = id;
- this.generator = generator;
- this.batch = batch;
- }
-
- @GET
- @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response get(final @Context UriInfo uriInfo,
- @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- if (generator == null) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.NOT_FOUND)
- .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
- .build();
- }
- CellSetModel model = new CellSetModel();
- RowModel rowModel = null;
- byte[] rowKey = null;
- int limit = batch;
- if (maxValues > 0) {
- limit = maxValues;
- }
- int count = limit;
- do {
- Cell value = null;
- try {
- value = generator.next();
- } catch (IllegalStateException e) {
- if (ScannerResource.delete(id)) {
- servlet.getMetrics().incrementSucessfulDeleteRequests(1);
- } else {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- }
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.GONE)
- .type(MIMETYPE_TEXT).entity("Gone" + CRLF)
- .build();
- }
- if (value == null) {
- LOG.info("generator exhausted");
- // respond with 204 (No Content) if an empty cell set would be
- // returned
- if (count == limit) {
- return Response.noContent().build();
- }
- break;
- }
- if (rowKey == null) {
- rowKey = CellUtil.cloneRow(value);
- rowModel = new RowModel(rowKey);
- }
- if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
- // if maxRows was given as a query param, stop if we would exceed the
- // specified number of rows
- if (maxRows > 0) {
- if (--maxRows == 0) {
- generator.putBack(value);
- break;
- }
- }
- model.addRow(rowModel);
- rowKey = CellUtil.cloneRow(value);
- rowModel = new RowModel(rowKey);
- }
- rowModel.addCell(
- new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value),
- value.getTimestamp(), CellUtil.cloneValue(value)));
- } while (--count > 0);
- model.addRow(rowModel);
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return response.build();
- }
-
- @GET
- @Produces(MIMETYPE_BINARY)
- public Response getBinary(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
- MIMETYPE_BINARY);
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- Cell value = generator.next();
- if (value == null) {
- LOG.info("generator exhausted");
- return Response.noContent().build();
- }
- ResponseBuilder response = Response.ok(CellUtil.cloneValue(value));
- response.cacheControl(cacheControl);
- response.header("X-Row", Base64.encodeBytes(CellUtil.cloneRow(value)));
- response.header("X-Column",
- Base64.encodeBytes(
- KeyValue.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))));
- response.header("X-Timestamp", value.getTimestamp());
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return response.build();
- } catch (IllegalStateException e) {
- if (ScannerResource.delete(id)) {
- servlet.getMetrics().incrementSucessfulDeleteRequests(1);
- } else {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- }
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.GONE)
- .type(MIMETYPE_TEXT).entity("Gone" + CRLF)
- .build();
- }
- }
-
- @DELETE
- public Response delete(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("DELETE " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- if (servlet.isReadOnly()) {
- return Response.status(Response.Status.FORBIDDEN)
- .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
- .build();
- }
- if (ScannerResource.delete(id)) {
- servlet.getMetrics().incrementSucessfulDeleteRequests(1);
- } else {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- }
- return Response.ok().build();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
deleted file mode 100644
index 6c424ce..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriBuilder;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.rest.model.ScannerModel;
-
-@InterfaceAudience.Private
-public class ScannerResource extends ResourceBase {
-
- private static final Log LOG = LogFactory.getLog(ScannerResource.class);
-
- static final Map<String,ScannerInstanceResource> scanners =
- Collections.synchronizedMap(new HashMap<String,ScannerInstanceResource>());
-
- TableResource tableResource;
-
- /**
- * Constructor
- * @param tableResource
- * @throws IOException
- */
- public ScannerResource(TableResource tableResource)throws IOException {
- super();
- this.tableResource = tableResource;
- }
-
- static boolean delete(final String id) {
- ScannerInstanceResource instance = scanners.remove(id);
- if (instance != null) {
- instance.generator.close();
- return true;
- } else {
- return false;
- }
- }
-
- Response update(final ScannerModel model, final boolean replace,
- final UriInfo uriInfo) {
- servlet.getMetrics().incrementRequests(1);
- if (servlet.isReadOnly()) {
- return Response.status(Response.Status.FORBIDDEN)
- .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
- .build();
- }
- byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
- RowSpec spec = null;
- if (model.getLabels() != null) {
- spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(),
- model.getEndTime(), model.getMaxVersions(), model.getLabels());
- } else {
- spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(),
- model.getEndTime(), model.getMaxVersions());
- }
- MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
-
- try {
- Filter filter = ScannerResultGenerator.buildFilterFromModel(model);
- String tableName = tableResource.getName();
- ScannerResultGenerator gen =
- new ScannerResultGenerator(tableName, spec, filter, model.getCaching(),
- model.getCacheBlocks());
- String id = gen.getID();
- ScannerInstanceResource instance =
- new ScannerInstanceResource(tableName, id, gen, model.getBatch());
- scanners.put(id, instance);
- if (LOG.isDebugEnabled()) {
- LOG.debug("new scanner: " + id);
- }
- UriBuilder builder = uriInfo.getAbsolutePathBuilder();
- URI uri = builder.path(id).build();
- servlet.getMetrics().incrementSucessfulPutRequests(1);
- return Response.created(uri).build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- if (e instanceof TableNotFoundException) {
- return Response.status(Response.Status.NOT_FOUND)
- .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
- .build();
- } else if (e instanceof RuntimeException) {
- return Response.status(Response.Status.BAD_REQUEST)
- .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
- .build();
- }
- return Response.status(Response.Status.SERVICE_UNAVAILABLE)
- .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
- .build();
- }
- }
-
- @PUT
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response put(final ScannerModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath());
- }
- return update(model, true, uriInfo);
- }
-
- @POST
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response post(final ScannerModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("POST " + uriInfo.getAbsolutePath());
- }
- return update(model, false, uriInfo);
- }
-
- @Path("{scanner: .+}")
- public ScannerInstanceResource getScannerInstanceResource(
- final @PathParam("scanner") String id) throws IOException {
- ScannerInstanceResource instance = scanners.get(id);
- if (instance == null) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return new ScannerInstanceResource();
- } else {
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- }
- return instance;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
deleted file mode 100644
index 055c971..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.rest.model.ScannerModel;
-import org.apache.hadoop.hbase.security.visibility.Authorizations;
-import org.apache.hadoop.util.StringUtils;
-
-@InterfaceAudience.Private
-public class ScannerResultGenerator extends ResultGenerator {
-
- private static final Log LOG =
- LogFactory.getLog(ScannerResultGenerator.class);
-
- public static Filter buildFilterFromModel(final ScannerModel model)
- throws Exception {
- String filter = model.getFilter();
- if (filter == null || filter.length() == 0) {
- return null;
- }
- return buildFilter(filter);
- }
-
- private String id;
- private Iterator<Cell> rowI;
- private Cell cache;
- private ResultScanner scanner;
- private Result cached;
-
- public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
- final Filter filter, final boolean cacheBlocks)
- throws IllegalArgumentException, IOException {
- this(tableName, rowspec, filter, -1, cacheBlocks);
- }
-
- public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
- final Filter filter, final int caching, final boolean cacheBlocks)
- throws IllegalArgumentException, IOException {
- HTableInterface table = RESTServlet.getInstance().getTable(tableName);
- try {
- Scan scan;
- if (rowspec.hasEndRow()) {
- scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
- } else {
- scan = new Scan(rowspec.getStartRow());
- }
- if (rowspec.hasColumns()) {
- byte[][] columns = rowspec.getColumns();
- for (byte[] column: columns) {
- byte[][] split = KeyValue.parseColumn(column);
- if (split.length == 1) {
- scan.addFamily(split[0]);
- } else if (split.length == 2) {
- scan.addColumn(split[0], split[1]);
- } else {
- throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
- }
- }
- }
- scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
- scan.setMaxVersions(rowspec.getMaxVersions());
- if (filter != null) {
- scan.setFilter(filter);
- }
- if (caching > 0 ) {
- scan.setCaching(caching);
- }
- scan.setCacheBlocks(cacheBlocks);
- if (rowspec.hasLabels()) {
- scan.setAuthorizations(new Authorizations(rowspec.getLabels()));
- }
- scanner = table.getScanner(scan);
- cached = null;
- id = Long.toString(System.currentTimeMillis()) +
- Integer.toHexString(scanner.hashCode());
- } finally {
- table.close();
- }
- }
-
- public String getID() {
- return id;
- }
-
- public void close() {
- if (scanner != null) {
- scanner.close();
- scanner = null;
- }
- }
-
- public boolean hasNext() {
- if (cache != null) {
- return true;
- }
- if (rowI != null && rowI.hasNext()) {
- return true;
- }
- if (cached != null) {
- return true;
- }
- try {
- Result result = scanner.next();
- if (result != null && !result.isEmpty()) {
- cached = result;
- }
- } catch (UnknownScannerException e) {
- throw new IllegalArgumentException(e);
- } catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
- }
- return cached != null;
- }
-
- public Cell next() {
- if (cache != null) {
- Cell kv = cache;
- cache = null;
- return kv;
- }
- boolean loop;
- do {
- loop = false;
- if (rowI != null) {
- if (rowI.hasNext()) {
- return rowI.next();
- } else {
- rowI = null;
- }
- }
- if (cached != null) {
- rowI = cached.listCells().iterator();
- loop = true;
- cached = null;
- } else {
- Result result = null;
- try {
- result = scanner.next();
- } catch (UnknownScannerException e) {
- throw new IllegalArgumentException(e);
- } catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
- }
- if (result != null && !result.isEmpty()) {
- rowI = result.listCells().iterator();
- loop = true;
- }
- }
- } while (loop);
- return null;
- }
-
- public void putBack(Cell kv) {
- this.cache = kv;
- }
-
- public void remove() {
- throw new UnsupportedOperationException("remove not supported");
- }
-}
[13/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java
deleted file mode 100644
index f35a25f..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java
+++ /dev/null
@@ -1,1521 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: CellSetMessage.proto
-
-package org.apache.hadoop.hbase.rest.protobuf.generated;
-
-public final class CellSetMessage {
- private CellSetMessage() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- public interface CellSetOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row>
- getRowsList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index);
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- int getRowsCount();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>
- getRowsOrBuilderList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder getRowsOrBuilder(
- int index);
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.CellSet}
- */
- public static final class CellSet extends
- com.google.protobuf.GeneratedMessage
- implements CellSetOrBuilder {
- // Use CellSet.newBuilder() to construct.
- private CellSet(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private CellSet(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final CellSet defaultInstance;
- public static CellSet getDefaultInstance() {
- return defaultInstance;
- }
-
- public CellSet getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private CellSet(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
- rows_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row>();
- mutable_bitField0_ |= 0x00000001;
- }
- rows_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.PARSER, extensionRegistry));
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
- rows_ = java.util.Collections.unmodifiableList(rows_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Builder.class);
- }
-
- public static com.google.protobuf.Parser<CellSet> PARSER =
- new com.google.protobuf.AbstractParser<CellSet>() {
- public CellSet parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new CellSet(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<CellSet> getParserForType() {
- return PARSER;
- }
-
- public interface RowOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required bytes key = 1;
- /**
- * <code>required bytes key = 1;</code>
- */
- boolean hasKey();
- /**
- * <code>required bytes key = 1;</code>
- */
- com.google.protobuf.ByteString getKey();
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell>
- getValuesList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell getValues(int index);
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- int getValuesCount();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>
- getValuesOrBuilderList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder getValuesOrBuilder(
- int index);
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row}
- */
- public static final class Row extends
- com.google.protobuf.GeneratedMessage
- implements RowOrBuilder {
- // Use Row.newBuilder() to construct.
- private Row(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Row(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Row defaultInstance;
- public static Row getDefaultInstance() {
- return defaultInstance;
- }
-
- public Row getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Row(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- key_ = input.readBytes();
- break;
- }
- case 18: {
- if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- values_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell>();
- mutable_bitField0_ |= 0x00000002;
- }
- values_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.PARSER, extensionRegistry));
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- values_ = java.util.Collections.unmodifiableList(values_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Row> PARSER =
- new com.google.protobuf.AbstractParser<Row>() {
- public Row parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Row(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Row> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // required bytes key = 1;
- public static final int KEY_FIELD_NUMBER = 1;
- private com.google.protobuf.ByteString key_;
- /**
- * <code>required bytes key = 1;</code>
- */
- public boolean hasKey() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required bytes key = 1;</code>
- */
- public com.google.protobuf.ByteString getKey() {
- return key_;
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
- public static final int VALUES_FIELD_NUMBER = 2;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> values_;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> getValuesList() {
- return values_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>
- getValuesOrBuilderList() {
- return values_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public int getValuesCount() {
- return values_.size();
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell getValues(int index) {
- return values_.get(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder getValuesOrBuilder(
- int index) {
- return values_.get(index);
- }
-
- private void initFields() {
- key_ = com.google.protobuf.ByteString.EMPTY;
- values_ = java.util.Collections.emptyList();
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasKey()) {
- memoizedIsInitialized = 0;
- return false;
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, key_);
- }
- for (int i = 0; i < values_.size(); i++) {
- output.writeMessage(2, values_.get(i));
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, key_);
- }
- for (int i = 0; i < values_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, values_.get(i));
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getValuesFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- key_ = com.google.protobuf.ByteString.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000001);
- if (valuesBuilder_ == null) {
- values_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- valuesBuilder_.clear();
- }
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row result = new org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.key_ = key_;
- if (valuesBuilder_ == null) {
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- values_ = java.util.Collections.unmodifiableList(values_);
- bitField0_ = (bitField0_ & ~0x00000002);
- }
- result.values_ = values_;
- } else {
- result.values_ = valuesBuilder_.build();
- }
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance()) return this;
- if (other.hasKey()) {
- setKey(other.getKey());
- }
- if (valuesBuilder_ == null) {
- if (!other.values_.isEmpty()) {
- if (values_.isEmpty()) {
- values_ = other.values_;
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- ensureValuesIsMutable();
- values_.addAll(other.values_);
- }
- onChanged();
- }
- } else {
- if (!other.values_.isEmpty()) {
- if (valuesBuilder_.isEmpty()) {
- valuesBuilder_.dispose();
- valuesBuilder_ = null;
- values_ = other.values_;
- bitField0_ = (bitField0_ & ~0x00000002);
- valuesBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getValuesFieldBuilder() : null;
- } else {
- valuesBuilder_.addAllMessages(other.values_);
- }
- }
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasKey()) {
-
- return false;
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // required bytes key = 1;
- private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
- /**
- * <code>required bytes key = 1;</code>
- */
- public boolean hasKey() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required bytes key = 1;</code>
- */
- public com.google.protobuf.ByteString getKey() {
- return key_;
- }
- /**
- * <code>required bytes key = 1;</code>
- */
- public Builder setKey(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- key_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required bytes key = 1;</code>
- */
- public Builder clearKey() {
- bitField0_ = (bitField0_ & ~0x00000001);
- key_ = getDefaultInstance().getKey();
- onChanged();
- return this;
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> values_ =
- java.util.Collections.emptyList();
- private void ensureValuesIsMutable() {
- if (!((bitField0_ & 0x00000002) == 0x00000002)) {
- values_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell>(values_);
- bitField0_ |= 0x00000002;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder> valuesBuilder_;
-
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> getValuesList() {
- if (valuesBuilder_ == null) {
- return java.util.Collections.unmodifiableList(values_);
- } else {
- return valuesBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public int getValuesCount() {
- if (valuesBuilder_ == null) {
- return values_.size();
- } else {
- return valuesBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell getValues(int index) {
- if (valuesBuilder_ == null) {
- return values_.get(index);
- } else {
- return valuesBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public Builder setValues(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell value) {
- if (valuesBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureValuesIsMutable();
- values_.set(index, value);
- onChanged();
- } else {
- valuesBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public Builder setValues(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
- if (valuesBuilder_ == null) {
- ensureValuesIsMutable();
- values_.set(index, builderForValue.build());
- onChanged();
- } else {
- valuesBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public Builder addValues(org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell value) {
- if (valuesBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureValuesIsMutable();
- values_.add(value);
- onChanged();
- } else {
- valuesBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public Builder addValues(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell value) {
- if (valuesBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureValuesIsMutable();
- values_.add(index, value);
- onChanged();
- } else {
- valuesBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public Builder addValues(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
- if (valuesBuilder_ == null) {
- ensureValuesIsMutable();
- values_.add(builderForValue.build());
- onChanged();
- } else {
- valuesBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public Builder addValues(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
- if (valuesBuilder_ == null) {
- ensureValuesIsMutable();
- values_.add(index, builderForValue.build());
- onChanged();
- } else {
- valuesBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public Builder addAllValues(
- java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell> values) {
- if (valuesBuilder_ == null) {
- ensureValuesIsMutable();
- super.addAll(values, values_);
- onChanged();
- } else {
- valuesBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public Builder clearValues() {
- if (valuesBuilder_ == null) {
- values_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000002);
- onChanged();
- } else {
- valuesBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public Builder removeValues(int index) {
- if (valuesBuilder_ == null) {
- ensureValuesIsMutable();
- values_.remove(index);
- onChanged();
- } else {
- valuesBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder getValuesBuilder(
- int index) {
- return getValuesFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder getValuesOrBuilder(
- int index) {
- if (valuesBuilder_ == null) {
- return values_.get(index); } else {
- return valuesBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>
- getValuesOrBuilderList() {
- if (valuesBuilder_ != null) {
- return valuesBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(values_);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder addValuesBuilder() {
- return getValuesFieldBuilder().addBuilder(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder addValuesBuilder(
- int index) {
- return getValuesFieldBuilder().addBuilder(
- index, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder>
- getValuesBuilderList() {
- return getValuesFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>
- getValuesFieldBuilder() {
- if (valuesBuilder_ == null) {
- valuesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.CellOrBuilder>(
- values_,
- ((bitField0_ & 0x00000002) == 0x00000002),
- getParentForChildren(),
- isClean());
- values_ = null;
- }
- return valuesBuilder_;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row)
- }
-
- static {
- defaultInstance = new Row(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row)
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
- public static final int ROWS_FIELD_NUMBER = 1;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> rows_;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> getRowsList() {
- return rows_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>
- getRowsOrBuilderList() {
- return rows_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public int getRowsCount() {
- return rows_.size();
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
- return rows_.get(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder getRowsOrBuilder(
- int index) {
- return rows_.get(index);
- }
-
- private void initFields() {
- rows_ = java.util.Collections.emptyList();
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- for (int i = 0; i < getRowsCount(); i++) {
- if (!getRows(i).isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- for (int i = 0; i < rows_.size(); i++) {
- output.writeMessage(1, rows_.get(i));
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- for (int i = 0; i < rows_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(1, rows_.get(i));
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.CellSet}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSetOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.class, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getRowsFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- if (rowsBuilder_ == null) {
- rows_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000001);
- } else {
- rowsBuilder_.clear();
- }
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet result = new org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet(this);
- int from_bitField0_ = bitField0_;
- if (rowsBuilder_ == null) {
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- rows_ = java.util.Collections.unmodifiableList(rows_);
- bitField0_ = (bitField0_ & ~0x00000001);
- }
- result.rows_ = rows_;
- } else {
- result.rows_ = rowsBuilder_.build();
- }
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance()) return this;
- if (rowsBuilder_ == null) {
- if (!other.rows_.isEmpty()) {
- if (rows_.isEmpty()) {
- rows_ = other.rows_;
- bitField0_ = (bitField0_ & ~0x00000001);
- } else {
- ensureRowsIsMutable();
- rows_.addAll(other.rows_);
- }
- onChanged();
- }
- } else {
- if (!other.rows_.isEmpty()) {
- if (rowsBuilder_.isEmpty()) {
- rowsBuilder_.dispose();
- rowsBuilder_ = null;
- rows_ = other.rows_;
- bitField0_ = (bitField0_ & ~0x00000001);
- rowsBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getRowsFieldBuilder() : null;
- } else {
- rowsBuilder_.addAllMessages(other.rows_);
- }
- }
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- for (int i = 0; i < getRowsCount(); i++) {
- if (!getRows(i).isInitialized()) {
-
- return false;
- }
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> rows_ =
- java.util.Collections.emptyList();
- private void ensureRowsIsMutable() {
- if (!((bitField0_ & 0x00000001) == 0x00000001)) {
- rows_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row>(rows_);
- bitField0_ |= 0x00000001;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder> rowsBuilder_;
-
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> getRowsList() {
- if (rowsBuilder_ == null) {
- return java.util.Collections.unmodifiableList(rows_);
- } else {
- return rowsBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public int getRowsCount() {
- if (rowsBuilder_ == null) {
- return rows_.size();
- } else {
- return rowsBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
- if (rowsBuilder_ == null) {
- return rows_.get(index);
- } else {
- return rowsBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public Builder setRows(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row value) {
- if (rowsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureRowsIsMutable();
- rows_.set(index, value);
- onChanged();
- } else {
- rowsBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public Builder setRows(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
- if (rowsBuilder_ == null) {
- ensureRowsIsMutable();
- rows_.set(index, builderForValue.build());
- onChanged();
- } else {
- rowsBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public Builder addRows(org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row value) {
- if (rowsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureRowsIsMutable();
- rows_.add(value);
- onChanged();
- } else {
- rowsBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public Builder addRows(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row value) {
- if (rowsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureRowsIsMutable();
- rows_.add(index, value);
- onChanged();
- } else {
- rowsBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public Builder addRows(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
- if (rowsBuilder_ == null) {
- ensureRowsIsMutable();
- rows_.add(builderForValue.build());
- onChanged();
- } else {
- rowsBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public Builder addRows(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
- if (rowsBuilder_ == null) {
- ensureRowsIsMutable();
- rows_.add(index, builderForValue.build());
- onChanged();
- } else {
- rowsBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public Builder addAllRows(
- java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row> values) {
- if (rowsBuilder_ == null) {
- ensureRowsIsMutable();
- super.addAll(values, rows_);
- onChanged();
- } else {
- rowsBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public Builder clearRows() {
- if (rowsBuilder_ == null) {
- rows_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000001);
- onChanged();
- } else {
- rowsBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public Builder removeRows(int index) {
- if (rowsBuilder_ == null) {
- ensureRowsIsMutable();
- rows_.remove(index);
- onChanged();
- } else {
- rowsBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder getRowsBuilder(
- int index) {
- return getRowsFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder getRowsOrBuilder(
- int index) {
- if (rowsBuilder_ == null) {
- return rows_.get(index); } else {
- return rowsBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>
- getRowsOrBuilderList() {
- if (rowsBuilder_ != null) {
- return rowsBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(rows_);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder addRowsBuilder() {
- return getRowsFieldBuilder().addBuilder(
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder addRowsBuilder(
- int index) {
- return getRowsFieldBuilder().addBuilder(
- index, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder>
- getRowsBuilderList() {
- return getRowsFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>
- getRowsFieldBuilder() {
- if (rowsBuilder_ == null) {
- rowsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.RowOrBuilder>(
- rows_,
- ((bitField0_ & 0x00000001) == 0x00000001),
- getParentForChildren(),
- isClean());
- rows_ = null;
- }
- return rowsBuilder_;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.CellSet)
- }
-
- static {
- defaultInstance = new CellSet(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.CellSet)
- }
-
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\024CellSetMessage.proto\022/org.apache.hadoo" +
- "p.hbase.rest.protobuf.generated\032\021CellMes" +
- "sage.proto\"\260\001\n\007CellSet\022J\n\004rows\030\001 \003(\0132<.o" +
- "rg.apache.hadoop.hbase.rest.protobuf.gen" +
- "erated.CellSet.Row\032Y\n\003Row\022\013\n\003key\030\001 \002(\014\022E" +
- "\n\006values\030\002 \003(\01325.org.apache.hadoop.hbase" +
- ".rest.protobuf.generated.Cell"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor,
- new java.lang.String[] { "Rows", });
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor =
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor.getNestedTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_Row_descriptor,
- new java.lang.String[] { "Key", "Values", });
- return null;
- }
- };
- com.google.protobuf.Descriptors.FileDescriptor
- .internalBuildGeneratedFileFrom(descriptorData,
- new com.google.protobuf.Descriptors.FileDescriptor[] {
- org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.getDescriptor(),
- }, assigner);
- }
-
- // @@protoc_insertion_point(outer_class_scope)
-}
[31/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
new file mode 100644
index 0000000..f5f6a95
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
@@ -0,0 +1,1904 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: ColumnSchemaMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class ColumnSchemaMessage {
+ private ColumnSchemaMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface ColumnSchemaOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string name = 1;
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ boolean hasName();
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ java.lang.String getName();
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>
+ getAttrsList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index);
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ int getAttrsCount();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>
+ getAttrsOrBuilderList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder getAttrsOrBuilder(
+ int index);
+
+ // optional int32 ttl = 3;
+ /**
+ * <code>optional int32 ttl = 3;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ boolean hasTtl();
+ /**
+ * <code>optional int32 ttl = 3;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ int getTtl();
+
+ // optional int32 maxVersions = 4;
+ /**
+ * <code>optional int32 maxVersions = 4;</code>
+ */
+ boolean hasMaxVersions();
+ /**
+ * <code>optional int32 maxVersions = 4;</code>
+ */
+ int getMaxVersions();
+
+ // optional string compression = 5;
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ boolean hasCompression();
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ java.lang.String getCompression();
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ com.google.protobuf.ByteString
+ getCompressionBytes();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema}
+ */
+ public static final class ColumnSchema extends
+ com.google.protobuf.GeneratedMessage
+ implements ColumnSchemaOrBuilder {
+ // Use ColumnSchema.newBuilder() to construct.
+ private ColumnSchema(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private ColumnSchema(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final ColumnSchema defaultInstance;
+ public static ColumnSchema getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ColumnSchema getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private ColumnSchema(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ attrs_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.PARSER, extensionRegistry));
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000002;
+ ttl_ = input.readInt32();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000004;
+ maxVersions_ = input.readInt32();
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000008;
+ compression_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ attrs_ = java.util.Collections.unmodifiableList(attrs_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.class, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<ColumnSchema> PARSER =
+ new com.google.protobuf.AbstractParser<ColumnSchema>() {
+ public ColumnSchema parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new ColumnSchema(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<ColumnSchema> getParserForType() {
+ return PARSER;
+ }
+
+ public interface AttributeOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string name = 1;
+ /**
+ * <code>required string name = 1;</code>
+ */
+ boolean hasName();
+ /**
+ * <code>required string name = 1;</code>
+ */
+ java.lang.String getName();
+ /**
+ * <code>required string name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ // required string value = 2;
+ /**
+ * <code>required string value = 2;</code>
+ */
+ boolean hasValue();
+ /**
+ * <code>required string value = 2;</code>
+ */
+ java.lang.String getValue();
+ /**
+ * <code>required string value = 2;</code>
+ */
+ com.google.protobuf.ByteString
+ getValueBytes();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute}
+ */
+ public static final class Attribute extends
+ com.google.protobuf.GeneratedMessage
+ implements AttributeOrBuilder {
+ // Use Attribute.newBuilder() to construct.
+ private Attribute(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Attribute(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Attribute defaultInstance;
+ public static Attribute getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Attribute getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Attribute(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ value_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.class, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Attribute> PARSER =
+ new com.google.protobuf.AbstractParser<Attribute>() {
+ public Attribute parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Attribute(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Attribute> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private java.lang.Object value_;
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public java.lang.String getValue() {
+ java.lang.Object ref = value_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ value_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ name_ = "";
+ value_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasValue()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getValueBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getValueBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.class, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute result = new org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ bitField0_ |= 0x00000001;
+ name_ = other.name_;
+ onChanged();
+ }
+ if (other.hasValue()) {
+ bitField0_ |= 0x00000002;
+ value_ = other.value_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasName()) {
+
+ return false;
+ }
+ if (!hasValue()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string name = 1;
+ private java.lang.Object name_ = "";
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string value = 2;
+ private java.lang.Object value_ = "";
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public java.lang.String getValue() {
+ java.lang.Object ref = value_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ value_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public Builder setValue(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value_ = getDefaultInstance().getValue();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public Builder setValueBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute)
+ }
+
+ static {
+ defaultInstance = new Attribute(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute)
+ }
+
+ private int bitField0_;
+ // optional string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
+ public static final int ATTRS_FIELD_NUMBER = 2;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> attrs_;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> getAttrsList() {
+ return attrs_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>
+ getAttrsOrBuilderList() {
+ return attrs_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public int getAttrsCount() {
+ return attrs_.size();
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
+ return attrs_.get(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder getAttrsOrBuilder(
+ int index) {
+ return attrs_.get(index);
+ }
+
+ // optional int32 ttl = 3;
+ public static final int TTL_FIELD_NUMBER = 3;
+ private int ttl_;
+ /**
+ * <code>optional int32 ttl = 3;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public boolean hasTtl() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional int32 ttl = 3;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public int getTtl() {
+ return ttl_;
+ }
+
+ // optional int32 maxVersions = 4;
+ public static final int MAXVERSIONS_FIELD_NUMBER = 4;
+ private int maxVersions_;
+ /**
+ * <code>optional int32 maxVersions = 4;</code>
+ */
+ public boolean hasMaxVersions() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int32 maxVersions = 4;</code>
+ */
+ public int getMaxVersions() {
+ return maxVersions_;
+ }
+
+ // optional string compression = 5;
+ public static final int COMPRESSION_FIELD_NUMBER = 5;
+ private java.lang.Object compression_;
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ public boolean hasCompression() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ public java.lang.String getCompression() {
+ java.lang.Object ref = compression_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ compression_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ public com.google.protobuf.ByteString
+ getCompressionBytes() {
+ java.lang.Object ref = compression_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ compression_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ name_ = "";
+ attrs_ = java.util.Collections.emptyList();
+ ttl_ = 0;
+ maxVersions_ = 0;
+ compression_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getAttrsCount(); i++) {
+ if (!getAttrs(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ for (int i = 0; i < attrs_.size(); i++) {
+ output.writeMessage(2, attrs_.get(i));
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt32(3, ttl_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(4, maxVersions_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(5, getCompressionBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ for (int i = 0; i < attrs_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, attrs_.get(i));
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, ttl_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, maxVersions_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(5, getCompressionBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.class, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getAttrsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (attrsBuilder_ == null) {
+ attrs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ attrsBuilder_.clear();
+ }
+ ttl_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ maxVersions_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ compression_ = "";
+ bitField0_ = (bitField0_ & ~0x00000010);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema result = new org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (attrsBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ attrs_ = java.util.Collections.unmodifiableList(attrs_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.attrs_ = attrs_;
+ } else {
+ result.attrs_ = attrsBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.ttl_ = ttl_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.maxVersions_ = maxVersions_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.compression_ = compression_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ bitField0_ |= 0x00000001;
+ name_ = other.name_;
+ onChanged();
+ }
+ if (attrsBuilder_ == null) {
+ if (!other.attrs_.isEmpty()) {
+ if (attrs_.isEmpty()) {
+ attrs_ = other.attrs_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureAttrsIsMutable();
+ attrs_.addAll(other.attrs_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.attrs_.isEmpty()) {
+ if (attrsBuilder_.isEmpty()) {
+ attrsBuilder_.dispose();
+ attrsBuilder_ = null;
+ attrs_ = other.attrs_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ attrsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getAttrsFieldBuilder() : null;
+ } else {
+ attrsBuilder_.addAllMessages(other.attrs_);
+ }
+ }
+ }
+ if (other.hasTtl()) {
+ setTtl(other.getTtl());
+ }
+ if (other.hasMaxVersions()) {
+ setMaxVersions(other.getMaxVersions());
+ }
+ if (other.hasCompression()) {
+ bitField0_ |= 0x00000010;
+ compression_ = other.compression_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getAttrsCount(); i++) {
+ if (!getAttrs(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string name = 1;
+ private java.lang.Object name_ = "";
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> attrs_ =
+ java.util.Collections.emptyList();
+ private void ensureAttrsIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>(attrs_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder> attrsBuilder_;
+
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> getAttrsList() {
+ if (attrsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(attrs_);
+ } else {
+ return attrsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public int getAttrsCount() {
+ if (attrsBuilder_ == null) {
+ return attrs_.size();
+ } else {
+ return attrsBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
+ if (attrsBuilder_ == null) {
+ return attrs_.get(index);
+ } else {
+ return attrsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public Builder setAttrs(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
+ if (attrsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttrsIsMutable();
+ attrs_.set(index, value);
+ onChanged();
+ } else {
+ attrsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public Builder setAttrs(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ attrs_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ attrsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAttrs(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
+ if (attrsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttrsIsMutable();
+ attrs_.add(value);
+ onChanged();
+ } else {
+ attrsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAttrs(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
+ if (attrsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttrsIsMutable();
+ attrs_.add(index, value);
+ onChanged();
+ } else {
+ attrsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAttrs(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ attrs_.add(builderForValue.build());
+ onChanged();
+ } else {
+ attrsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAttrs(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ attrs_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ attrsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAllAttrs(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> values) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ super.addAll(values, attrs_);
+ onChanged();
+ } else {
+ attrsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public Builder clearAttrs() {
+ if (attrsBuilder_ == null) {
+ attrs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ attrsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public Builder removeAttrs(int index) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ attrs_.remove(index);
+ onChanged();
+ } else {
+ attrsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder getAttrsBuilder(
+ int index) {
+ return getAttrsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder getAttrsOrBuilder(
+ int index) {
+ if (attrsBuilder_ == null) {
+ return attrs_.get(index); } else {
+ return attrsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>
+ getAttrsOrBuilderList() {
+ if (attrsBuilder_ != null) {
+ return attrsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(attrs_);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder addAttrsBuilder() {
+ return getAttrsFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder addAttrsBuilder(
+ int index) {
+ return getAttrsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder>
+ getAttrsBuilderList() {
+ return getAttrsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>
+ getAttrsFieldBuilder() {
+ if (attrsBuilder_ == null) {
+ attrsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>(
+ attrs_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ attrs_ = null;
+ }
+ return attrsBuilder_;
+ }
+
+ // optional int32 ttl = 3;
+ private int ttl_ ;
+ /**
+ * <code>optional int32 ttl = 3;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public boolean hasTtl() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int32 ttl = 3;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public int getTtl() {
+ return ttl_;
+ }
+ /**
+ * <code>optional int32 ttl = 3;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public Builder setTtl(int value) {
+ bitField0_ |= 0x00000004;
+ ttl_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 ttl = 3;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public Builder clearTtl() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ ttl_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 maxVersions = 4;
+ private int maxVersions_ ;
+ /**
+ * <code>optional int32 maxVersions = 4;</code>
+ */
+ public boolean hasMaxVersions() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int32 maxVersions = 4;</code>
+ */
+ public int getMaxVersions() {
+ return maxVersions_;
+ }
+ /**
+ * <code>optional int32 maxVersions = 4;</code>
+ */
+ public Builder setMaxVersions(int value) {
+ bitField0_ |= 0x00000008;
+ maxVersions_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 maxVersions = 4;</code>
+ */
+ public Builder clearMaxVersions() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ maxVersions_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional string compression = 5;
+ private java.lang.Object compression_ = "";
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ public boolean hasCompression() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ public java.lang.String getCompression() {
+ java.lang.Object ref = compression_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ compression_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ public com.google.protobuf.ByteString
+ getCompressionBytes() {
+ java.lang.Object ref = compression_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ compression_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ public Builder setCompression(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ compression_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ public Builder clearCompression() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ compression_ = getDefaultInstance().getCompression();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string compression = 5;</code>
+ */
+ public Builder setCompressionBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ compression_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema)
+ }
+
+ static {
+ defaultInstance = new ColumnSchema(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\031ColumnSchemaMessage.proto\022/org.apache." +
+ "hadoop.hbase.rest.protobuf.generated\"\325\001\n" +
+ "\014ColumnSchema\022\014\n\004name\030\001 \001(\t\022V\n\005attrs\030\002 \003" +
+ "(\0132G.org.apache.hadoop.hbase.rest.protob" +
+ "uf.generated.ColumnSchema.Attribute\022\013\n\003t" +
+ "tl\030\003 \001(\005\022\023\n\013maxVersions\030\004 \001(\005\022\023\n\013compres" +
+ "sion\030\005 \001(\t\032(\n\tAttribute\022\014\n\004name\030\001 \002(\t\022\r\n" +
+ "\005value\030\002 \002(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor,
+ new java.lang.String[] { "Name", "Attrs", "Ttl", "MaxVersions", "Compression", });
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor =
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor,
+ new java.lang.String[] { "Name", "Value", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
[36/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
new file mode 100644
index 0000000..5cc2c7b
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
@@ -0,0 +1,168 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.HeaderParam;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.codehaus.jackson.annotate.JsonIgnore;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+@InterfaceAudience.Private
+public class TableScanResource extends ResourceBase {
+
+ private static final Log LOG = LogFactory.getLog(TableScanResource.class);
+ TableResource tableResource;
+ ResultScanner results;
+ int userRequestedLimit;
+
+ public TableScanResource(ResultScanner scanner, int userRequestedLimit) throws IOException {
+ super();
+ this.results = scanner;
+ this.userRequestedLimit = userRequestedLimit;
+ }
+
+ @GET
+ @Produces({ Constants.MIMETYPE_XML, Constants.MIMETYPE_JSON })
+ public CellSetModelStream get(final @Context UriInfo uriInfo) {
+ servlet.getMetrics().incrementRequests(1);
+ final int rowsToSend = userRequestedLimit;
+ servlet.getMetrics().incrementSucessfulScanRequests(1);
+ final Iterator<Result> itr = results.iterator();
+ return new CellSetModelStream(new ArrayList<RowModel>() {
+ public Iterator<RowModel> iterator() {
+ return new Iterator<RowModel>() {
+ int count = rowsToSend;
+
+ @Override
+ public boolean hasNext() {
+ if (count > 0) {
+ return itr.hasNext();
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException(
+ "Remove method cannot be used in CellSetModelStream");
+ }
+
+ @Override
+ public RowModel next() {
+ Result rs = itr.next();
+ if ((rs == null) || (count <= 0)) {
+ return null;
+ }
+ byte[] rowKey = rs.getRow();
+ RowModel rModel = new RowModel(rowKey);
+ List<Cell> kvs = rs.listCells();
+ for (Cell kv : kvs) {
+ rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
+ kv.getTimestamp(), CellUtil.cloneValue(kv)));
+ }
+ count--;
+ return rModel;
+ }
+ };
+ }
+ });
+ }
+
+ @GET
+ @Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF })
+ public Response getProtobuf(
+ final @Context UriInfo uriInfo,
+ final @PathParam("scanspec") String scanSpec,
+ final @HeaderParam("Accept") String contentType,
+ @DefaultValue(Integer.MAX_VALUE + "") @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit,
+ @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow,
+ @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow,
+ @DefaultValue("column") @QueryParam(Constants.SCAN_COLUMN) List<String> column,
+ @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions,
+ @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize,
+ @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime,
+ @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime,
+ @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks) {
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
+ ProtobufStreamingUtil stream = new ProtobufStreamingUtil(this.results, contentType,
+ userRequestedLimit, fetchSize);
+ servlet.getMetrics().incrementSucessfulScanRequests(1);
+ ResponseBuilder response = Response.ok(stream);
+ response.header("content-type", contentType);
+ return response.build();
+ } catch (Exception exp) {
+ servlet.getMetrics().incrementFailedScanRequests(1);
+ processException(exp);
+ LOG.warn(exp);
+ return null;
+ }
+ }
+
+ @XmlRootElement(name = "CellSet")
+ @XmlAccessorType(XmlAccessType.FIELD)
+ public static class CellSetModelStream {
+ // JAXB needs an arraylist for streaming
+ @XmlElement(name = "Row")
+ @JsonIgnore
+ private ArrayList<RowModel> Row;
+
+ public CellSetModelStream() {
+ }
+
+ public CellSetModelStream(final ArrayList<RowModel> rowList) {
+ this.Row = rowList;
+ }
+
+ // jackson needs an iterator for streaming
+ @JsonProperty("Row")
+ public Iterator<RowModel> getIterator() {
+ return Row.iterator();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
new file mode 100644
index 0000000..ae93825
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
@@ -0,0 +1,104 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.servlet.ServletContext;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.model.VersionModel;
+
+/**
+ * Implements REST software version reporting
+ * <p>
+ * <tt>/version/rest</tt>
+ * <p>
+ * <tt>/version</tt> (alias for <tt>/version/rest</tt>)
+ */
+@InterfaceAudience.Private
+public class VersionResource extends ResourceBase {
+
+ private static final Log LOG = LogFactory.getLog(VersionResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ /**
+ * Constructor
+ * @throws IOException
+ */
+ public VersionResource() throws IOException {
+ super();
+ }
+
+ /**
+ * Build a response for a version request.
+ * @param context servlet context
+ * @param uriInfo (JAX-RS context variable) request URL
+ * @return a response for a version request
+ */
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context ServletContext context,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ ResponseBuilder response = Response.ok(new VersionModel(context));
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ }
+
+ /**
+ * Dispatch to StorageClusterVersionResource
+ */
+ @Path("cluster")
+ public StorageClusterVersionResource getClusterVersionResource()
+ throws IOException {
+ return new StorageClusterVersionResource();
+ }
+
+ /**
+ * Dispatch <tt>/version/rest</tt> to self.
+ */
+ @Path("rest")
+ public VersionResource getVersionResource() {
+ return this;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
new file mode 100644
index 0000000..ebedf57
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
@@ -0,0 +1,525 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.client;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.commons.httpclient.HttpClient;
+import org.apache.commons.httpclient.HttpMethod;
+import org.apache.commons.httpclient.HttpVersion;
+import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
+import org.apache.commons.httpclient.URI;
+import org.apache.commons.httpclient.methods.ByteArrayRequestEntity;
+import org.apache.commons.httpclient.methods.DeleteMethod;
+import org.apache.commons.httpclient.methods.GetMethod;
+import org.apache.commons.httpclient.methods.HeadMethod;
+import org.apache.commons.httpclient.methods.PostMethod;
+import org.apache.commons.httpclient.methods.PutMethod;
+import org.apache.commons.httpclient.params.HttpClientParams;
+import org.apache.commons.httpclient.params.HttpConnectionManagerParams;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * A wrapper around HttpClient which provides some useful function and
+ * semantics for interacting with the REST gateway.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class Client {
+ public static final Header[] EMPTY_HEADER_ARRAY = new Header[0];
+
+ private static final Log LOG = LogFactory.getLog(Client.class);
+
+ private HttpClient httpClient;
+ private Cluster cluster;
+ private boolean sslEnabled;
+
+ private Map<String, String> extraHeaders;
+
+ /**
+ * Default Constructor
+ */
+ public Client() {
+ this(null);
+ }
+
+ private void initialize(Cluster cluster, boolean sslEnabled) {
+ this.cluster = cluster;
+ this.sslEnabled = sslEnabled;
+ MultiThreadedHttpConnectionManager manager =
+ new MultiThreadedHttpConnectionManager();
+ HttpConnectionManagerParams managerParams = manager.getParams();
+ managerParams.setConnectionTimeout(2000); // 2 s
+ managerParams.setDefaultMaxConnectionsPerHost(10);
+ managerParams.setMaxTotalConnections(100);
+ extraHeaders = new ConcurrentHashMap<String, String>();
+ this.httpClient = new HttpClient(manager);
+ HttpClientParams clientParams = httpClient.getParams();
+ clientParams.setVersion(HttpVersion.HTTP_1_1);
+
+ }
+ /**
+ * Constructor
+ * @param cluster the cluster definition
+ */
+ public Client(Cluster cluster) {
+ initialize(cluster, false);
+ }
+
+ /**
+ * Constructor
+ * @param cluster the cluster definition
+ * @param sslEnabled enable SSL or not
+ */
+ public Client(Cluster cluster, boolean sslEnabled) {
+ initialize(cluster, sslEnabled);
+ }
+
+ /**
+ * Shut down the client. Close any open persistent connections.
+ */
+ public void shutdown() {
+ MultiThreadedHttpConnectionManager manager =
+ (MultiThreadedHttpConnectionManager) httpClient.getHttpConnectionManager();
+ manager.shutdown();
+ }
+
+ /**
+ * @return the wrapped HttpClient
+ */
+ public HttpClient getHttpClient() {
+ return httpClient;
+ }
+
+ /**
+ * Add extra headers. These extra headers will be applied to all http
+ * methods before they are removed. If any header is not used any more,
+ * client needs to remove it explicitly.
+ */
+ public void addExtraHeader(final String name, final String value) {
+ extraHeaders.put(name, value);
+ }
+
+ /**
+ * Get an extra header value.
+ */
+ public String getExtraHeader(final String name) {
+ return extraHeaders.get(name);
+ }
+
+ /**
+ * Get all extra headers (read-only).
+ */
+ public Map<String, String> getExtraHeaders() {
+ return Collections.unmodifiableMap(extraHeaders);
+ }
+
+ /**
+ * Remove an extra header.
+ */
+ public void removeExtraHeader(final String name) {
+ extraHeaders.remove(name);
+ }
+
+ /**
+ * Execute a transaction method given only the path. Will select at random
+ * one of the members of the supplied cluster definition and iterate through
+ * the list until a transaction can be successfully completed. The
+ * definition of success here is a complete HTTP transaction, irrespective
+ * of result code.
+ * @param cluster the cluster definition
+ * @param method the transaction method
+ * @param headers HTTP header values to send
+ * @param path the properly urlencoded path
+ * @return the HTTP response code
+ * @throws IOException
+ */
+ public int executePathOnly(Cluster cluster, HttpMethod method,
+ Header[] headers, String path) throws IOException {
+ IOException lastException;
+ if (cluster.nodes.size() < 1) {
+ throw new IOException("Cluster is empty");
+ }
+ int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random());
+ int i = start;
+ do {
+ cluster.lastHost = cluster.nodes.get(i);
+ try {
+ StringBuilder sb = new StringBuilder();
+ if (sslEnabled) {
+ sb.append("https://");
+ } else {
+ sb.append("http://");
+ }
+ sb.append(cluster.lastHost);
+ sb.append(path);
+ URI uri = new URI(sb.toString(), true);
+ return executeURI(method, headers, uri.toString());
+ } catch (IOException e) {
+ lastException = e;
+ }
+ } while (++i != start && i < cluster.nodes.size());
+ throw lastException;
+ }
+
+ /**
+ * Execute a transaction method given a complete URI.
+ * @param method the transaction method
+ * @param headers HTTP header values to send
+ * @param uri a properly urlencoded URI
+ * @return the HTTP response code
+ * @throws IOException
+ */
+ public int executeURI(HttpMethod method, Header[] headers, String uri)
+ throws IOException {
+ method.setURI(new URI(uri, true));
+ for (Map.Entry<String, String> e: extraHeaders.entrySet()) {
+ method.addRequestHeader(e.getKey(), e.getValue());
+ }
+ if (headers != null) {
+ for (Header header: headers) {
+ method.addRequestHeader(header);
+ }
+ }
+ long startTime = System.currentTimeMillis();
+ int code = httpClient.executeMethod(method);
+ long endTime = System.currentTimeMillis();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(method.getName() + " " + uri + " " + code + " " +
+ method.getStatusText() + " in " + (endTime - startTime) + " ms");
+ }
+ return code;
+ }
+
+ /**
+ * Execute a transaction method. Will call either <tt>executePathOnly</tt>
+ * or <tt>executeURI</tt> depending on whether a path only is supplied in
+ * 'path', or if a complete URI is passed instead, respectively.
+ * @param cluster the cluster definition
+ * @param method the HTTP method
+ * @param headers HTTP header values to send
+ * @param path the properly urlencoded path or URI
+ * @return the HTTP response code
+ * @throws IOException
+ */
+ public int execute(Cluster cluster, HttpMethod method, Header[] headers,
+ String path) throws IOException {
+ if (path.startsWith("/")) {
+ return executePathOnly(cluster, method, headers, path);
+ }
+ return executeURI(method, headers, path);
+ }
+
+ /**
+ * @return the cluster definition
+ */
+ public Cluster getCluster() {
+ return cluster;
+ }
+
+ /**
+ * @param cluster the cluster definition
+ */
+ public void setCluster(Cluster cluster) {
+ this.cluster = cluster;
+ }
+
+ /**
+ * Send a HEAD request
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response head(String path) throws IOException {
+ return head(cluster, path, null);
+ }
+
+ /**
+ * Send a HEAD request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include in the request
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response head(Cluster cluster, String path, Header[] headers)
+ throws IOException {
+ HeadMethod method = new HeadMethod();
+ try {
+ int code = execute(cluster, method, null, path);
+ headers = method.getResponseHeaders();
+ return new Response(code, headers, null);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+
+ /**
+ * Send a GET request
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(String path) throws IOException {
+ return get(cluster, path);
+ }
+
+ /**
+ * Send a GET request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(Cluster cluster, String path) throws IOException {
+ return get(cluster, path, EMPTY_HEADER_ARRAY);
+ }
+
+ /**
+ * Send a GET request
+ * @param path the path or URI
+ * @param accept Accept header value
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(String path, String accept) throws IOException {
+ return get(cluster, path, accept);
+ }
+
+ /**
+ * Send a GET request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param accept Accept header value
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(Cluster cluster, String path, String accept)
+ throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new Header("Accept", accept);
+ return get(cluster, path, headers);
+ }
+
+ /**
+ * Send a GET request
+ * @param path the path or URI
+ * @param headers the HTTP headers to include in the request,
+ * <tt>Accept</tt> must be supplied
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(String path, Header[] headers) throws IOException {
+ return get(cluster, path, headers);
+ }
+
+ /**
+ * Send a GET request
+ * @param c the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include in the request
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(Cluster c, String path, Header[] headers)
+ throws IOException {
+ GetMethod method = new GetMethod();
+ try {
+ int code = execute(c, method, headers, path);
+ headers = method.getResponseHeaders();
+ byte[] body = method.getResponseBody();
+ InputStream in = method.getResponseBodyAsStream();
+ return new Response(code, headers, body, in);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+
+ /**
+ * Send a PUT request
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(String path, String contentType, byte[] content)
+ throws IOException {
+ return put(cluster, path, contentType, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(Cluster cluster, String path, String contentType,
+ byte[] content) throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new Header("Content-Type", contentType);
+ return put(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(String path, Header[] headers, byte[] content)
+ throws IOException {
+ return put(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(Cluster cluster, String path, Header[] headers,
+ byte[] content) throws IOException {
+ PutMethod method = new PutMethod();
+ try {
+ method.setRequestEntity(new ByteArrayRequestEntity(content));
+ int code = execute(cluster, method, headers, path);
+ headers = method.getResponseHeaders();
+ content = method.getResponseBody();
+ return new Response(code, headers, content);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+
+ /**
+ * Send a POST request
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(String path, String contentType, byte[] content)
+ throws IOException {
+ return post(cluster, path, contentType, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(Cluster cluster, String path, String contentType,
+ byte[] content) throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new Header("Content-Type", contentType);
+ return post(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(String path, Header[] headers, byte[] content)
+ throws IOException {
+ return post(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(Cluster cluster, String path, Header[] headers,
+ byte[] content) throws IOException {
+ PostMethod method = new PostMethod();
+ try {
+ method.setRequestEntity(new ByteArrayRequestEntity(content));
+ int code = execute(cluster, method, headers, path);
+ headers = method.getResponseHeaders();
+ content = method.getResponseBody();
+ return new Response(code, headers, content);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+
+ /**
+ * Send a DELETE request
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response delete(String path) throws IOException {
+ return delete(cluster, path);
+ }
+
+ /**
+ * Send a DELETE request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response delete(Cluster cluster, String path) throws IOException {
+ DeleteMethod method = new DeleteMethod();
+ try {
+ int code = execute(cluster, method, null, path);
+ Header[] headers = method.getResponseHeaders();
+ byte[] content = method.getResponseBody();
+ return new Response(code, headers, content);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
new file mode 100644
index 0000000..a2de329
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
@@ -0,0 +1,103 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.client;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * A list of 'host:port' addresses of HTTP servers operating as a single
+ * entity, for example multiple redundant web service gateways.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class Cluster {
+ protected List<String> nodes =
+ Collections.synchronizedList(new ArrayList<String>());
+ protected String lastHost;
+
+ /**
+ * Constructor
+ */
+ public Cluster() {}
+
+ /**
+ * Constructor
+ * @param nodes a list of service locations, in 'host:port' format
+ */
+ public Cluster(List<String> nodes) {
+ nodes.addAll(nodes);
+ }
+
+ /**
+ * @return true if no locations have been added, false otherwise
+ */
+ public boolean isEmpty() {
+ return nodes.isEmpty();
+ }
+
+ /**
+ * Add a node to the cluster
+ * @param node the service location in 'host:port' format
+ */
+ public Cluster add(String node) {
+ nodes.add(node);
+ return this;
+ }
+
+ /**
+ * Add a node to the cluster
+ * @param name host name
+ * @param port service port
+ */
+ public Cluster add(String name, int port) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(name);
+ sb.append(':');
+ sb.append(port);
+ return add(sb.toString());
+ }
+
+ /**
+ * Remove a node from the cluster
+ * @param node the service location in 'host:port' format
+ */
+ public Cluster remove(String node) {
+ nodes.remove(node);
+ return this;
+ }
+
+ /**
+ * Remove a node from the cluster
+ * @param name host name
+ * @param port service port
+ */
+ public Cluster remove(String name, int port) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(name);
+ sb.append(':');
+ sb.append(port);
+ return remove(sb.toString());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
new file mode 100644
index 0000000..23da9c8
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
@@ -0,0 +1,390 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.client;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.rest.Constants;
+import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
+import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
+import org.apache.hadoop.hbase.rest.model.TableListModel;
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+import org.apache.hadoop.hbase.rest.model.VersionModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class RemoteAdmin {
+
+ final Client client;
+ final Configuration conf;
+ final String accessToken;
+ final int maxRetries;
+ final long sleepTime;
+
+ // This unmarshaller is necessary for getting the /version/cluster resource.
+ // This resource does not support protobufs. Therefore this is necessary to
+ // request/interpret it as XML.
+ private static volatile Unmarshaller versionClusterUnmarshaller;
+
+ /**
+ * Constructor
+ *
+ * @param client
+ * @param conf
+ */
+ public RemoteAdmin(Client client, Configuration conf) {
+ this(client, conf, null);
+ }
+
+ static Unmarshaller getUnmarsheller() throws JAXBException {
+
+ if (versionClusterUnmarshaller == null) {
+
+ RemoteAdmin.versionClusterUnmarshaller = JAXBContext.newInstance(
+ StorageClusterVersionModel.class).createUnmarshaller();
+ }
+ return RemoteAdmin.versionClusterUnmarshaller;
+ }
+
+ /**
+ * Constructor
+ * @param client
+ * @param conf
+ * @param accessToken
+ */
+ public RemoteAdmin(Client client, Configuration conf, String accessToken) {
+ this.client = client;
+ this.conf = conf;
+ this.accessToken = accessToken;
+ this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10);
+ this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000);
+ }
+
+ /**
+ * @param tableName name of table to check
+ * @return true if all regions of the table are available
+ * @throws IOException if a remote or network exception occurs
+ */
+ public boolean isTableAvailable(String tableName) throws IOException {
+ return isTableAvailable(Bytes.toBytes(tableName));
+ }
+
+ /**
+ * @return string representing the rest api's version
+ * @throws IOEXception
+ * if the endpoint does not exist, there is a timeout, or some other
+ * general failure mode
+ */
+ public VersionModel getRestVersion() throws IOException {
+
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ if (accessToken != null) {
+ path.append(accessToken);
+ path.append('/');
+ }
+
+ path.append("version/rest");
+
+ int code = 0;
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.get(path.toString(),
+ Constants.MIMETYPE_PROTOBUF);
+ code = response.getCode();
+ switch (code) {
+ case 200:
+
+ VersionModel v = new VersionModel();
+ return (VersionModel) v.getObjectFromMessage(response.getBody());
+ case 404:
+ throw new IOException("REST version not found");
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) {
+ }
+ break;
+ default:
+ throw new IOException("get request to " + path.toString()
+ + " returned " + code);
+ }
+ }
+ throw new IOException("get request to " + path.toString() + " timed out");
+ }
+
+ /**
+ * @return string representing the cluster's version
+ * @throws IOEXception if the endpoint does not exist, there is a timeout, or some other general failure mode
+ */
+ public StorageClusterStatusModel getClusterStatus() throws IOException {
+
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ if (accessToken !=null) {
+ path.append(accessToken);
+ path.append('/');
+ }
+
+ path.append("status/cluster");
+
+ int code = 0;
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.get(path.toString(),
+ Constants.MIMETYPE_PROTOBUF);
+ code = response.getCode();
+ switch (code) {
+ case 200:
+ StorageClusterStatusModel s = new StorageClusterStatusModel();
+ return (StorageClusterStatusModel) s.getObjectFromMessage(response
+ .getBody());
+ case 404:
+ throw new IOException("Cluster version not found");
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) {
+ }
+ break;
+ default:
+ throw new IOException("get request to " + path + " returned " + code);
+ }
+ }
+ throw new IOException("get request to " + path + " timed out");
+ }
+
+ /**
+ * @return string representing the cluster's version
+ * @throws IOEXception
+ * if the endpoint does not exist, there is a timeout, or some other
+ * general failure mode
+ */
+ public StorageClusterVersionModel getClusterVersion() throws IOException {
+
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ if (accessToken != null) {
+ path.append(accessToken);
+ path.append('/');
+ }
+
+ path.append("version/cluster");
+
+ int code = 0;
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.get(path.toString(), Constants.MIMETYPE_XML);
+ code = response.getCode();
+ switch (code) {
+ case 200:
+ try {
+
+ return (StorageClusterVersionModel) getUnmarsheller().unmarshal(
+ new ByteArrayInputStream(response.getBody()));
+ } catch (JAXBException jaxbe) {
+
+ throw new IOException(
+ "Issue parsing StorageClusterVersionModel object in XML form: "
+ + jaxbe.getLocalizedMessage());
+ }
+ case 404:
+ throw new IOException("Cluster version not found");
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) {
+ }
+ break;
+ default:
+ throw new IOException(path.toString() + " request returned " + code);
+ }
+ }
+ throw new IOException("get request to " + path.toString()
+ + " request timed out");
+ }
+
+ /**
+ * @param tableName name of table to check
+ * @return true if all regions of the table are available
+ * @throws IOException if a remote or network exception occurs
+ */
+ public boolean isTableAvailable(byte[] tableName) throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ if (accessToken != null) {
+ path.append(accessToken);
+ path.append('/');
+ }
+ path.append(Bytes.toStringBinary(tableName));
+ path.append('/');
+ path.append("exists");
+ int code = 0;
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF);
+ code = response.getCode();
+ switch (code) {
+ case 200:
+ return true;
+ case 404:
+ return false;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("get request to " + path.toString() + " returned " + code);
+ }
+ }
+ throw new IOException("get request to " + path.toString() + " timed out");
+ }
+
+ /**
+ * Creates a new table.
+ * @param desc table descriptor for table
+ * @throws IOException if a remote or network exception occurs
+ */
+ public void createTable(HTableDescriptor desc)
+ throws IOException {
+ TableSchemaModel model = new TableSchemaModel(desc);
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ if (accessToken != null) {
+ path.append(accessToken);
+ path.append('/');
+ }
+ path.append(desc.getTableName());
+ path.append('/');
+ path.append("schema");
+ int code = 0;
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.put(path.toString(), Constants.MIMETYPE_PROTOBUF,
+ model.createProtobufOutput());
+ code = response.getCode();
+ switch (code) {
+ case 201:
+ return;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("create request to " + path.toString() + " returned " + code);
+ }
+ }
+ throw new IOException("create request to " + path.toString() + " timed out");
+ }
+
+ /**
+ * Deletes a table.
+ * @param tableName name of table to delete
+ * @throws IOException if a remote or network exception occurs
+ */
+ public void deleteTable(final String tableName) throws IOException {
+ deleteTable(Bytes.toBytes(tableName));
+ }
+
+ /**
+ * Deletes a table.
+ * @param tableName name of table to delete
+ * @throws IOException if a remote or network exception occurs
+ */
+ public void deleteTable(final byte [] tableName) throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ if (accessToken != null) {
+ path.append(accessToken);
+ path.append('/');
+ }
+ path.append(Bytes.toStringBinary(tableName));
+ path.append('/');
+ path.append("schema");
+ int code = 0;
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.delete(path.toString());
+ code = response.getCode();
+ switch (code) {
+ case 200:
+ return;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("delete request to " + path.toString() + " returned " + code);
+ }
+ }
+ throw new IOException("delete request to " + path.toString() + " timed out");
+ }
+
+ /**
+ * @return string representing the cluster's version
+ * @throws IOEXception
+ * if the endpoint does not exist, there is a timeout, or some other
+ * general failure mode
+ */
+ public TableListModel getTableList() throws IOException {
+
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ if (accessToken != null) {
+ path.append(accessToken);
+ path.append('/');
+ }
+
+ int code = 0;
+ for (int i = 0; i < maxRetries; i++) {
+ // Response response = client.get(path.toString(),
+ // Constants.MIMETYPE_XML);
+ Response response = client.get(path.toString(),
+ Constants.MIMETYPE_PROTOBUF);
+ code = response.getCode();
+ switch (code) {
+ case 200:
+ TableListModel t = new TableListModel();
+ return (TableListModel) t.getObjectFromMessage(response.getBody());
+ case 404:
+ throw new IOException("Table list not found");
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) {
+ }
+ break;
+ default:
+ throw new IOException("get request to " + path.toString()
+ + " request returned " + code);
+ }
+ }
+ throw new IOException("get request to " + path.toString()
+ + " request timed out");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
new file mode 100644
index 0000000..fbede44
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -0,0 +1,825 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.client;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.rest.Constants;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Message;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
+
+/**
+ * HTable interface to remote tables accessed via REST gateway
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class RemoteHTable implements HTableInterface {
+
+ private static final Log LOG = LogFactory.getLog(RemoteHTable.class);
+
+ final Client client;
+ final Configuration conf;
+ final byte[] name;
+ final int maxRetries;
+ final long sleepTime;
+
+ @SuppressWarnings("rawtypes")
+ protected String buildRowSpec(final byte[] row, final Map familyMap,
+ final long startTime, final long endTime, final int maxVersions) {
+ StringBuffer sb = new StringBuffer();
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(name));
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(row));
+ Set families = familyMap.entrySet();
+ if (families != null) {
+ Iterator i = familyMap.entrySet().iterator();
+ sb.append('/');
+ while (i.hasNext()) {
+ Map.Entry e = (Map.Entry)i.next();
+ Collection quals = (Collection)e.getValue();
+ if (quals == null || quals.isEmpty()) {
+ // this is an unqualified family. append the family name and NO ':'
+ sb.append(Bytes.toStringBinary((byte[])e.getKey()));
+ } else {
+ Iterator ii = quals.iterator();
+ while (ii.hasNext()) {
+ sb.append(Bytes.toStringBinary((byte[])e.getKey()));
+ sb.append(':');
+ Object o = ii.next();
+ // Puts use byte[] but Deletes use KeyValue
+ if (o instanceof byte[]) {
+ sb.append(Bytes.toStringBinary((byte[])o));
+ } else if (o instanceof KeyValue) {
+ sb.append(Bytes.toStringBinary(((KeyValue)o).getQualifier()));
+ } else {
+ throw new RuntimeException("object type not handled");
+ }
+ if (ii.hasNext()) {
+ sb.append(',');
+ }
+ }
+ }
+ if (i.hasNext()) {
+ sb.append(',');
+ }
+ }
+ }
+ if (startTime >= 0 && endTime != Long.MAX_VALUE) {
+ sb.append('/');
+ sb.append(startTime);
+ if (startTime != endTime) {
+ sb.append(',');
+ sb.append(endTime);
+ }
+ } else if (endTime != Long.MAX_VALUE) {
+ sb.append('/');
+ sb.append(endTime);
+ }
+ if (maxVersions > 1) {
+ sb.append("?v=");
+ sb.append(maxVersions);
+ }
+ return sb.toString();
+ }
+
+ protected String buildMultiRowSpec(final byte[][] rows, int maxVersions) {
+ StringBuilder sb = new StringBuilder();
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(name));
+ sb.append("/multiget/");
+ if (rows == null || rows.length == 0) {
+ return sb.toString();
+ }
+ sb.append("?");
+ for(int i=0; i<rows.length; i++) {
+ byte[] rk = rows[i];
+ if (i != 0) {
+ sb.append('&');
+ }
+ sb.append("row=");
+ sb.append(Bytes.toStringBinary(rk));
+ }
+ sb.append("&v=");
+ sb.append(maxVersions);
+
+ return sb.toString();
+ }
+
+ protected Result[] buildResultFromModel(final CellSetModel model) {
+ List<Result> results = new ArrayList<Result>();
+ for (RowModel row: model.getRows()) {
+ List<Cell> kvs = new ArrayList<Cell>();
+ for (CellModel cell: row.getCells()) {
+ byte[][] split = KeyValue.parseColumn(cell.getColumn());
+ byte[] column = split[0];
+ byte[] qualifier = null;
+ if (split.length == 1) {
+ qualifier = HConstants.EMPTY_BYTE_ARRAY;
+ } else if (split.length == 2) {
+ qualifier = split[1];
+ } else {
+ throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
+ }
+ kvs.add(new KeyValue(row.getKey(), column, qualifier,
+ cell.getTimestamp(), cell.getValue()));
+ }
+ results.add(Result.create(kvs));
+ }
+ return results.toArray(new Result[results.size()]);
+ }
+
+ protected CellSetModel buildModelFromPut(Put put) {
+ RowModel row = new RowModel(put.getRow());
+ long ts = put.getTimeStamp();
+ for (List<Cell> cells: put.getFamilyCellMap().values()) {
+ for (Cell cell: cells) {
+ KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ row.addCell(new CellModel(kv.getFamily(), kv.getQualifier(),
+ ts != HConstants.LATEST_TIMESTAMP ? ts : kv.getTimestamp(),
+ kv.getValue()));
+ }
+ }
+ CellSetModel model = new CellSetModel();
+ model.addRow(row);
+ return model;
+ }
+
+ /**
+ * Constructor
+ * @param client
+ * @param name
+ */
+ public RemoteHTable(Client client, String name) {
+ this(client, HBaseConfiguration.create(), Bytes.toBytes(name));
+ }
+
+ /**
+ * Constructor
+ * @param client
+ * @param conf
+ * @param name
+ */
+ public RemoteHTable(Client client, Configuration conf, String name) {
+ this(client, conf, Bytes.toBytes(name));
+ }
+
+ /**
+ * Constructor
+ * @param client
+ * @param conf
+ * @param name
+ */
+ public RemoteHTable(Client client, Configuration conf, byte[] name) {
+ this.client = client;
+ this.conf = conf;
+ this.name = name;
+ this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10);
+ this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000);
+ }
+
+ public byte[] getTableName() {
+ return name.clone();
+ }
+
+ @Override
+ public TableName getName() {
+ return TableName.valueOf(name);
+ }
+
+ public Configuration getConfiguration() {
+ return conf;
+ }
+
+ public HTableDescriptor getTableDescriptor() throws IOException {
+ StringBuilder sb = new StringBuilder();
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(name));
+ sb.append('/');
+ sb.append("schema");
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
+ int code = response.getCode();
+ switch (code) {
+ case 200:
+ TableSchemaModel schema = new TableSchemaModel();
+ schema.getObjectFromMessage(response.getBody());
+ return schema.getTableDescriptor();
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("schema request returned " + code);
+ }
+ }
+ throw new IOException("schema request timed out");
+ }
+
+ public void close() throws IOException {
+ client.shutdown();
+ }
+
+ public Result get(Get get) throws IOException {
+ TimeRange range = get.getTimeRange();
+ String spec = buildRowSpec(get.getRow(), get.getFamilyMap(),
+ range.getMin(), range.getMax(), get.getMaxVersions());
+ if (get.getFilter() != null) {
+ LOG.warn("filters not supported on gets");
+ }
+ Result[] results = getResults(spec);
+ if (results.length > 0) {
+ if (results.length > 1) {
+ LOG.warn("too many results for get (" + results.length + ")");
+ }
+ return results[0];
+ } else {
+ return new Result();
+ }
+ }
+
+ public Result[] get(List<Get> gets) throws IOException {
+ byte[][] rows = new byte[gets.size()][];
+ int maxVersions = 1;
+ int count = 0;
+
+ for(Get g:gets) {
+
+ if ( count == 0 ) {
+ maxVersions = g.getMaxVersions();
+ } else if (g.getMaxVersions() != maxVersions) {
+ LOG.warn("MaxVersions on Gets do not match, using the first in the list ("+maxVersions+")");
+ }
+
+ if (g.getFilter() != null) {
+ LOG.warn("filters not supported on gets");
+ }
+
+ rows[count] = g.getRow();
+ count ++;
+ }
+
+ String spec = buildMultiRowSpec(rows, maxVersions);
+
+ return getResults(spec);
+ }
+
+ private Result[] getResults(String spec) throws IOException {
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.get(spec, Constants.MIMETYPE_PROTOBUF);
+ int code = response.getCode();
+ switch (code) {
+ case 200:
+ CellSetModel model = new CellSetModel();
+ model.getObjectFromMessage(response.getBody());
+ Result[] results = buildResultFromModel(model);
+ if ( results.length > 0) {
+ return results;
+ }
+ // fall through
+ case 404:
+ return new Result[0];
+
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("get request returned " + code);
+ }
+ }
+ throw new IOException("get request timed out");
+ }
+
+ public boolean exists(Get get) throws IOException {
+ LOG.warn("exists() is really get(), just use get()");
+ Result result = get(get);
+ return (result != null && !(result.isEmpty()));
+ }
+
+ /**
+ * exists(List) is really a list of get() calls. Just use get().
+ * @param gets list of Get to test for the existence
+ */
+ public Boolean[] exists(List<Get> gets) throws IOException {
+ LOG.warn("exists(List<Get>) is really list of get() calls, just use get()");
+ Boolean[] results = new Boolean[gets.size()];
+ for (int i = 0; i < results.length; i++) {
+ results[i] = exists(gets.get(i));
+ }
+ return results;
+ }
+
+ public void put(Put put) throws IOException {
+ CellSetModel model = buildModelFromPut(put);
+ StringBuilder sb = new StringBuilder();
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(name));
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(put.getRow()));
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
+ model.createProtobufOutput());
+ int code = response.getCode();
+ switch (code) {
+ case 200:
+ return;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("put request failed with " + code);
+ }
+ }
+ throw new IOException("put request timed out");
+ }
+
+ public void put(List<Put> puts) throws IOException {
+ // this is a trick: The gateway accepts multiple rows in a cell set and
+ // ignores the row specification in the URI
+
+ // separate puts by row
+ TreeMap<byte[],List<Cell>> map =
+ new TreeMap<byte[],List<Cell>>(Bytes.BYTES_COMPARATOR);
+ for (Put put: puts) {
+ byte[] row = put.getRow();
+ List<Cell> cells = map.get(row);
+ if (cells == null) {
+ cells = new ArrayList<Cell>();
+ map.put(row, cells);
+ }
+ for (List<Cell> l: put.getFamilyCellMap().values()) {
+ cells.addAll(l);
+ }
+ }
+
+ // build the cell set
+ CellSetModel model = new CellSetModel();
+ for (Map.Entry<byte[], List<Cell>> e: map.entrySet()) {
+ RowModel row = new RowModel(e.getKey());
+ for (Cell cell: e.getValue()) {
+ KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ row.addCell(new CellModel(kv));
+ }
+ model.addRow(row);
+ }
+
+ // build path for multiput
+ StringBuilder sb = new StringBuilder();
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(name));
+ sb.append("/$multiput"); // can be any nonexistent row
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
+ model.createProtobufOutput());
+ int code = response.getCode();
+ switch (code) {
+ case 200:
+ return;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("multiput request failed with " + code);
+ }
+ }
+ throw new IOException("multiput request timed out");
+ }
+
+ public void delete(Delete delete) throws IOException {
+ String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(),
+ delete.getTimeStamp(), delete.getTimeStamp(), 1);
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.delete(spec);
+ int code = response.getCode();
+ switch (code) {
+ case 200:
+ return;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("delete request failed with " + code);
+ }
+ }
+ throw new IOException("delete request timed out");
+ }
+
+ public void delete(List<Delete> deletes) throws IOException {
+ for (Delete delete: deletes) {
+ delete(delete);
+ }
+ }
+
+ public void flushCommits() throws IOException {
+ // no-op
+ }
+
+ class Scanner implements ResultScanner {
+
+ String uri;
+
+ public Scanner(Scan scan) throws IOException {
+ ScannerModel model;
+ try {
+ model = ScannerModel.fromScan(scan);
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ StringBuffer sb = new StringBuffer();
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(name));
+ sb.append('/');
+ sb.append("scanner");
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.post(sb.toString(),
+ Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ int code = response.getCode();
+ switch (code) {
+ case 201:
+ uri = response.getLocation();
+ return;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("scan request failed with " + code);
+ }
+ }
+ throw new IOException("scan request timed out");
+ }
+
+ @Override
+ public Result[] next(int nbRows) throws IOException {
+ StringBuilder sb = new StringBuilder(uri);
+ sb.append("?n=");
+ sb.append(nbRows);
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.get(sb.toString(),
+ Constants.MIMETYPE_PROTOBUF);
+ int code = response.getCode();
+ switch (code) {
+ case 200:
+ CellSetModel model = new CellSetModel();
+ model.getObjectFromMessage(response.getBody());
+ return buildResultFromModel(model);
+ case 204:
+ case 206:
+ return null;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException e) { }
+ break;
+ default:
+ throw new IOException("scanner.next request failed with " + code);
+ }
+ }
+ throw new IOException("scanner.next request timed out");
+ }
+
+ @Override
+ public Result next() throws IOException {
+ Result[] results = next(1);
+ if (results == null || results.length < 1) {
+ return null;
+ }
+ return results[0];
+ }
+
+ class Iter implements Iterator<Result> {
+
+ Result cache;
+
+ public Iter() {
+ try {
+ cache = Scanner.this.next();
+ } catch (IOException e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ }
+
+ @Override
+ public boolean hasNext() {
+ return cache != null;
+ }
+
+ @Override
+ public Result next() {
+ Result result = cache;
+ try {
+ cache = Scanner.this.next();
+ } catch (IOException e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ cache = null;
+ }
+ return result;
+ }
+
+ @Override
+ public void remove() {
+ throw new RuntimeException("remove() not supported");
+ }
+
+ }
+
+ @Override
+ public Iterator<Result> iterator() {
+ return new Iter();
+ }
+
+ @Override
+ public void close() {
+ try {
+ client.delete(uri);
+ } catch (IOException e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ }
+
+ }
+
+ public ResultScanner getScanner(Scan scan) throws IOException {
+ return new Scanner(scan);
+ }
+
+ public ResultScanner getScanner(byte[] family) throws IOException {
+ Scan scan = new Scan();
+ scan.addFamily(family);
+ return new Scanner(scan);
+ }
+
+ public ResultScanner getScanner(byte[] family, byte[] qualifier)
+ throws IOException {
+ Scan scan = new Scan();
+ scan.addColumn(family, qualifier);
+ return new Scanner(scan);
+ }
+
+ public boolean isAutoFlush() {
+ return true;
+ }
+
+ public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
+ throw new IOException("getRowOrBefore not supported");
+ }
+
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+ byte[] value, Put put) throws IOException {
+ // column to check-the-value
+ put.add(new KeyValue(row, family, qualifier, value));
+
+ CellSetModel model = buildModelFromPut(put);
+ StringBuilder sb = new StringBuilder();
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(name));
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(put.getRow()));
+ sb.append("?check=put");
+
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.put(sb.toString(),
+ Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ int code = response.getCode();
+ switch (code) {
+ case 200:
+ return true;
+ case 304: // NOT-MODIFIED
+ return false;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (final InterruptedException e) {
+ }
+ break;
+ default:
+ throw new IOException("checkAndPut request failed with " + code);
+ }
+ }
+ throw new IOException("checkAndPut request timed out");
+ }
+
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+ byte[] value, Delete delete) throws IOException {
+ Put put = new Put(row);
+ // column to check-the-value
+ put.add(new KeyValue(row, family, qualifier, value));
+ CellSetModel model = buildModelFromPut(put);
+ StringBuilder sb = new StringBuilder();
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(name));
+ sb.append('/');
+ sb.append(Bytes.toStringBinary(row));
+ sb.append("?check=delete");
+
+ for (int i = 0; i < maxRetries; i++) {
+ Response response = client.put(sb.toString(),
+ Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ int code = response.getCode();
+ switch (code) {
+ case 200:
+ return true;
+ case 304: // NOT-MODIFIED
+ return false;
+ case 509:
+ try {
+ Thread.sleep(sleepTime);
+ } catch (final InterruptedException e) {
+ }
+ break;
+ default:
+ throw new IOException("checkAndDelete request failed with " + code);
+ }
+ }
+ throw new IOException("checkAndDelete request timed out");
+ }
+
+ public Result increment(Increment increment) throws IOException {
+ throw new IOException("Increment not supported");
+ }
+
+ public Result append(Append append) throws IOException {
+ throw new IOException("Append not supported");
+ }
+
+ public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
+ long amount) throws IOException {
+ throw new IOException("incrementColumnValue not supported");
+ }
+
+ public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
+ long amount, Durability durability) throws IOException {
+ throw new IOException("incrementColumnValue not supported");
+ }
+
+ @Override
+ public void batch(List<? extends Row> actions, Object[] results) throws IOException {
+ throw new IOException("batch not supported");
+ }
+
+ @Override
+ public Object[] batch(List<? extends Row> actions) throws IOException {
+ throw new IOException("batch not supported");
+ }
+
+ @Override
+ public <R> void batchCallback(List<? extends Row> actions, Object[] results,
+ Batch.Callback<R> callback) throws IOException, InterruptedException {
+ throw new IOException("batchCallback not supported");
+ }
+
+ @Override
+ public <R> Object[] batchCallback(List<? extends Row> actions, Batch.Callback<R> callback)
+ throws IOException, InterruptedException {
+ throw new IOException("batchCallback not supported");
+ }
+
+ @Override
+ public CoprocessorRpcChannel coprocessorService(byte[] row) {
+ throw new UnsupportedOperationException("coprocessorService not implemented");
+ }
+
+ @Override
+ public <T extends Service, R> Map<byte[], R> coprocessorService(Class<T> service,
+ byte[] startKey, byte[] endKey, Batch.Call<T, R> callable)
+ throws ServiceException, Throwable {
+ throw new UnsupportedOperationException("coprocessorService not implemented");
+ }
+
+ @Override
+ public <T extends Service, R> void coprocessorService(Class<T> service,
+ byte[] startKey, byte[] endKey, Batch.Call<T, R> callable, Batch.Callback<R> callback)
+ throws ServiceException, Throwable {
+ throw new UnsupportedOperationException("coprocessorService not implemented");
+ }
+
+ @Override
+ public void mutateRow(RowMutations rm) throws IOException {
+ throw new IOException("atomicMutation not supported");
+ }
+
+ @Override
+ public void setAutoFlush(boolean autoFlush) {
+ throw new UnsupportedOperationException("setAutoFlush not implemented");
+ }
+
+ @Override
+ public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
+ throw new UnsupportedOperationException("setAutoFlush not implemented");
+ }
+
+ @Override
+ public void setAutoFlushTo(boolean autoFlush) {
+ throw new UnsupportedOperationException("setAutoFlushTo not implemented");
+ }
+
+ @Override
+ public long getWriteBufferSize() {
+ throw new UnsupportedOperationException("getWriteBufferSize not implemented");
+ }
+
+ @Override
+ public void setWriteBufferSize(long writeBufferSize) throws IOException {
+ throw new IOException("setWriteBufferSize not supported");
+ }
+
+ @Override
+ public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
+ long amount, boolean writeToWAL) throws IOException {
+ throw new IOException("incrementColumnValue not supported");
+ }
+
+ @Override
+ public <R extends Message> Map<byte[], R> batchCoprocessorService(
+ Descriptors.MethodDescriptor method, Message request,
+ byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
+ throw new UnsupportedOperationException("batchCoprocessorService not implemented");
+ }
+
+ @Override
+ public <R extends Message> void batchCoprocessorService(
+ Descriptors.MethodDescriptor method, Message request,
+ byte[] startKey, byte[] endKey, R responsePrototype, Callback<R> callback)
+ throws ServiceException, Throwable {
+ throw new UnsupportedOperationException("batchCoprocessorService not implemented");
+ }
+
+ @Override
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, RowMutations mutation) throws IOException {
+ throw new UnsupportedOperationException("checkAndMutate not implemented");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
new file mode 100644
index 0000000..871b646
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
@@ -0,0 +1,155 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.client;
+
+import java.io.InputStream;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * The HTTP result code, response headers, and body of a HTTP response.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class Response {
+ private int code;
+ private Header[] headers;
+ private byte[] body;
+ private InputStream stream;
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ */
+ public Response(int code) {
+ this(code, null, null);
+ }
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ * @param headers the HTTP response headers
+ */
+ public Response(int code, Header[] headers) {
+ this(code, headers, null);
+ }
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ * @param headers the HTTP response headers
+ * @param body the response body, can be null
+ */
+ public Response(int code, Header[] headers, byte[] body) {
+ this.code = code;
+ this.headers = headers;
+ this.body = body;
+ }
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ * @param headers headers the HTTP response headers
+ * @param body the response body, can be null
+ * @param in Inputstream if the response had one.
+ */
+ public Response(int code, Header[] headers, byte[] body, InputStream in) {
+ this.code = code;
+ this.headers = headers;
+ this.body = body;
+ this.stream = in;
+ }
+
+ /**
+ * @return the HTTP response code
+ */
+ public int getCode() {
+ return code;
+ }
+
+ /**
+ * Gets the input stream instance.
+ *
+ * @return an instance of InputStream class.
+ */
+ public InputStream getStream(){
+ return this.stream;
+ }
+
+ /**
+ * @return the HTTP response headers
+ */
+ public Header[] getHeaders() {
+ return headers;
+ }
+
+ public String getHeader(String key) {
+ for (Header header: headers) {
+ if (header.getName().equalsIgnoreCase(key)) {
+ return header.getValue();
+ }
+ }
+ return null;
+ }
+
+ /**
+ * @return the value of the Location header
+ */
+ public String getLocation() {
+ return getHeader("Location");
+ }
+
+ /**
+ * @return true if a response body was sent
+ */
+ public boolean hasBody() {
+ return body != null;
+ }
+
+ /**
+ * @return the HTTP response body
+ */
+ public byte[] getBody() {
+ return body;
+ }
+
+ /**
+ * @param code the HTTP response code
+ */
+ public void setCode(int code) {
+ this.code = code;
+ }
+
+ /**
+ * @param headers the HTTP response headers
+ */
+ public void setHeaders(Header[] headers) {
+ this.headers = headers;
+ }
+
+ /**
+ * @param body the response body
+ */
+ public void setBody(byte[] body) {
+ this.body = body;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
new file mode 100644
index 0000000..6d68cdd
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.filter;
+
+import static org.apache.hadoop.hbase.rest.Constants.REST_AUTHENTICATION_PRINCIPAL;
+import static org.apache.hadoop.hbase.rest.Constants.REST_DNS_INTERFACE;
+import static org.apache.hadoop.hbase.rest.Constants.REST_DNS_NAMESERVER;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Properties;
+
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+
+public class AuthFilter extends AuthenticationFilter {
+ private static final Log LOG = LogFactory.getLog(AuthFilter.class);
+ private static final String REST_PREFIX = "hbase.rest.authentication.";
+ private static final int REST_PREFIX_LEN = REST_PREFIX.length();
+
+ /**
+ * Returns the configuration to be used by the authentication filter
+ * to initialize the authentication handler.
+ *
+ * This filter retrieves all HBase configurations and passes those started
+ * with REST_PREFIX to the authentication handler. It is useful to support
+ * plugging different authentication handlers.
+ */
+ @Override
+ protected Properties getConfiguration(
+ String configPrefix, FilterConfig filterConfig) throws ServletException {
+ Properties props = super.getConfiguration(configPrefix, filterConfig);
+ //setting the cookie path to root '/' so it is used for all resources.
+ props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
+
+ Configuration conf = HBaseConfiguration.create();
+ for (Map.Entry<String, String> entry : conf) {
+ String name = entry.getKey();
+ if (name.startsWith(REST_PREFIX)) {
+ String value = entry.getValue();
+ if(name.equals(REST_AUTHENTICATION_PRINCIPAL)) {
+ try {
+ String machineName = Strings.domainNamePointerToHostName(
+ DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"),
+ conf.get(REST_DNS_NAMESERVER, "default")));
+ value = SecurityUtil.getServerPrincipal(value, machineName);
+ } catch (IOException ie) {
+ throw new ServletException("Failed to retrieve server principal", ie);
+ }
+ }
+ LOG.debug("Setting property " + name + "=" + value);
+ name = name.substring(REST_PREFIX_LEN);
+ props.setProperty(name, value);
+ }
+ }
+ return props;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java
new file mode 100644
index 0000000..02957e9
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java
@@ -0,0 +1,58 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.util.zip.GZIPInputStream;
+
+import javax.servlet.ServletInputStream;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class GZIPRequestStream extends ServletInputStream
+{
+ private GZIPInputStream in;
+
+ public GZIPRequestStream(HttpServletRequest request) throws IOException {
+ this.in = new GZIPInputStream(request.getInputStream());
+ }
+
+ @Override
+ public int read() throws IOException {
+ return in.read();
+ }
+
+ @Override
+ public int read(byte[] b) throws IOException {
+ return in.read(b);
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ return in.read(b, off, len);
+ }
+
+ @Override
+ public void close() throws IOException {
+ in.close();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java
new file mode 100644
index 0000000..361e442
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java
@@ -0,0 +1,52 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+
+import javax.servlet.ServletInputStream;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class GZIPRequestWrapper extends HttpServletRequestWrapper {
+ private ServletInputStream is;
+ private BufferedReader reader;
+
+ public GZIPRequestWrapper(HttpServletRequest request) throws IOException {
+ super(request);
+ this.is = new GZIPRequestStream(request);
+ this.reader = new BufferedReader(new InputStreamReader(this.is));
+ }
+
+ @Override
+ public ServletInputStream getInputStream() throws IOException {
+ return is;
+ }
+
+ @Override
+ public BufferedReader getReader() throws IOException {
+ return reader;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java
new file mode 100644
index 0000000..cc74f9c
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java
@@ -0,0 +1,78 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.util.zip.GZIPOutputStream;
+
+import javax.servlet.ServletOutputStream;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class GZIPResponseStream extends ServletOutputStream
+{
+ private HttpServletResponse response;
+ private GZIPOutputStream out;
+
+ public GZIPResponseStream(HttpServletResponse response) throws IOException {
+ this.response = response;
+ this.out = new GZIPOutputStream(response.getOutputStream());
+ response.addHeader("Content-Encoding", "gzip");
+ }
+
+ public void resetBuffer() {
+ if (out != null && !response.isCommitted()) {
+ response.setHeader("Content-Encoding", null);
+ }
+ out = null;
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ out.write(b);
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ out.write(b);
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ out.write(b, off, len);
+ }
+
+ @Override
+ public void close() throws IOException {
+ finish();
+ out.close();
+ }
+
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ public void finish() throws IOException {
+ out.finish();
+ }
+}
[07/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java
deleted file mode 100644
index 88f9cd3..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java
+++ /dev/null
@@ -1,2125 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: TableSchemaMessage.proto
-
-package org.apache.hadoop.hbase.rest.protobuf.generated;
-
-public final class TableSchemaMessage {
- private TableSchemaMessage() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- public interface TableSchemaOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // optional string name = 1;
- /**
- * <code>optional string name = 1;</code>
- */
- boolean hasName();
- /**
- * <code>optional string name = 1;</code>
- */
- java.lang.String getName();
- /**
- * <code>optional string name = 1;</code>
- */
- com.google.protobuf.ByteString
- getNameBytes();
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>
- getAttrsList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index);
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- int getAttrsCount();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>
- getAttrsOrBuilderList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder getAttrsOrBuilder(
- int index);
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema>
- getColumnsList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index);
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- int getColumnsCount();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>
- getColumnsOrBuilderList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder getColumnsOrBuilder(
- int index);
-
- // optional bool inMemory = 4;
- /**
- * <code>optional bool inMemory = 4;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- boolean hasInMemory();
- /**
- * <code>optional bool inMemory = 4;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- boolean getInMemory();
-
- // optional bool readOnly = 5;
- /**
- * <code>optional bool readOnly = 5;</code>
- */
- boolean hasReadOnly();
- /**
- * <code>optional bool readOnly = 5;</code>
- */
- boolean getReadOnly();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema}
- */
- public static final class TableSchema extends
- com.google.protobuf.GeneratedMessage
- implements TableSchemaOrBuilder {
- // Use TableSchema.newBuilder() to construct.
- private TableSchema(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private TableSchema(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final TableSchema defaultInstance;
- public static TableSchema getDefaultInstance() {
- return defaultInstance;
- }
-
- public TableSchema getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private TableSchema(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- name_ = input.readBytes();
- break;
- }
- case 18: {
- if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
- mutable_bitField0_ |= 0x00000002;
- }
- attrs_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.PARSER, extensionRegistry));
- break;
- }
- case 26: {
- if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
- mutable_bitField0_ |= 0x00000004;
- }
- columns_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.PARSER, extensionRegistry));
- break;
- }
- case 32: {
- bitField0_ |= 0x00000002;
- inMemory_ = input.readBool();
- break;
- }
- case 40: {
- bitField0_ |= 0x00000004;
- readOnly_ = input.readBool();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- attrs_ = java.util.Collections.unmodifiableList(attrs_);
- }
- if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- columns_ = java.util.Collections.unmodifiableList(columns_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder.class);
- }
-
- public static com.google.protobuf.Parser<TableSchema> PARSER =
- new com.google.protobuf.AbstractParser<TableSchema>() {
- public TableSchema parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new TableSchema(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<TableSchema> getParserForType() {
- return PARSER;
- }
-
- public interface AttributeOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required string name = 1;
- /**
- * <code>required string name = 1;</code>
- */
- boolean hasName();
- /**
- * <code>required string name = 1;</code>
- */
- java.lang.String getName();
- /**
- * <code>required string name = 1;</code>
- */
- com.google.protobuf.ByteString
- getNameBytes();
-
- // required string value = 2;
- /**
- * <code>required string value = 2;</code>
- */
- boolean hasValue();
- /**
- * <code>required string value = 2;</code>
- */
- java.lang.String getValue();
- /**
- * <code>required string value = 2;</code>
- */
- com.google.protobuf.ByteString
- getValueBytes();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute}
- */
- public static final class Attribute extends
- com.google.protobuf.GeneratedMessage
- implements AttributeOrBuilder {
- // Use Attribute.newBuilder() to construct.
- private Attribute(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Attribute(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Attribute defaultInstance;
- public static Attribute getDefaultInstance() {
- return defaultInstance;
- }
-
- public Attribute getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Attribute(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- name_ = input.readBytes();
- break;
- }
- case 18: {
- bitField0_ |= 0x00000002;
- value_ = input.readBytes();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Attribute> PARSER =
- new com.google.protobuf.AbstractParser<Attribute>() {
- public Attribute parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Attribute(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Attribute> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // required string name = 1;
- public static final int NAME_FIELD_NUMBER = 1;
- private java.lang.Object name_;
- /**
- * <code>required string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // required string value = 2;
- public static final int VALUE_FIELD_NUMBER = 2;
- private java.lang.Object value_;
- /**
- * <code>required string value = 2;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>required string value = 2;</code>
- */
- public java.lang.String getValue() {
- java.lang.Object ref = value_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- value_ = s;
- }
- return s;
- }
- }
- /**
- * <code>required string value = 2;</code>
- */
- public com.google.protobuf.ByteString
- getValueBytes() {
- java.lang.Object ref = value_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- value_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- private void initFields() {
- name_ = "";
- value_ = "";
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasName()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasValue()) {
- memoizedIsInitialized = 0;
- return false;
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, getValueBytes());
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, getValueBytes());
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- value_ = "";
- bitField0_ = (bitField0_ & ~0x00000002);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.value_ = value_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance()) return this;
- if (other.hasName()) {
- bitField0_ |= 0x00000001;
- name_ = other.name_;
- onChanged();
- }
- if (other.hasValue()) {
- bitField0_ |= 0x00000002;
- value_ = other.value_;
- onChanged();
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasName()) {
-
- return false;
- }
- if (!hasValue()) {
-
- return false;
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // required string name = 1;
- private java.lang.Object name_ = "";
- /**
- * <code>required string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- name_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder setName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder clearName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- name_ = getDefaultInstance().getName();
- onChanged();
- return this;
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder setNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
-
- // required string value = 2;
- private java.lang.Object value_ = "";
- /**
- * <code>required string value = 2;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>required string value = 2;</code>
- */
- public java.lang.String getValue() {
- java.lang.Object ref = value_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- value_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>required string value = 2;</code>
- */
- public com.google.protobuf.ByteString
- getValueBytes() {
- java.lang.Object ref = value_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- value_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>required string value = 2;</code>
- */
- public Builder setValue(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- value_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required string value = 2;</code>
- */
- public Builder clearValue() {
- bitField0_ = (bitField0_ & ~0x00000002);
- value_ = getDefaultInstance().getValue();
- onChanged();
- return this;
- }
- /**
- * <code>required string value = 2;</code>
- */
- public Builder setValueBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- value_ = value;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute)
- }
-
- static {
- defaultInstance = new Attribute(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute)
- }
-
- private int bitField0_;
- // optional string name = 1;
- public static final int NAME_FIELD_NUMBER = 1;
- private java.lang.Object name_;
- /**
- * <code>optional string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
- public static final int ATTRS_FIELD_NUMBER = 2;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> attrs_;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> getAttrsList() {
- return attrs_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>
- getAttrsOrBuilderList() {
- return attrs_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public int getAttrsCount() {
- return attrs_.size();
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
- return attrs_.get(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder getAttrsOrBuilder(
- int index) {
- return attrs_.get(index);
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
- public static final int COLUMNS_FIELD_NUMBER = 3;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> columns_;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> getColumnsList() {
- return columns_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>
- getColumnsOrBuilderList() {
- return columns_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public int getColumnsCount() {
- return columns_.size();
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
- return columns_.get(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder getColumnsOrBuilder(
- int index) {
- return columns_.get(index);
- }
-
- // optional bool inMemory = 4;
- public static final int INMEMORY_FIELD_NUMBER = 4;
- private boolean inMemory_;
- /**
- * <code>optional bool inMemory = 4;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public boolean hasInMemory() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional bool inMemory = 4;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public boolean getInMemory() {
- return inMemory_;
- }
-
- // optional bool readOnly = 5;
- public static final int READONLY_FIELD_NUMBER = 5;
- private boolean readOnly_;
- /**
- * <code>optional bool readOnly = 5;</code>
- */
- public boolean hasReadOnly() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional bool readOnly = 5;</code>
- */
- public boolean getReadOnly() {
- return readOnly_;
- }
-
- private void initFields() {
- name_ = "";
- attrs_ = java.util.Collections.emptyList();
- columns_ = java.util.Collections.emptyList();
- inMemory_ = false;
- readOnly_ = false;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- for (int i = 0; i < getAttrsCount(); i++) {
- if (!getAttrs(i).isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
- for (int i = 0; i < getColumnsCount(); i++) {
- if (!getColumns(i).isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getNameBytes());
- }
- for (int i = 0; i < attrs_.size(); i++) {
- output.writeMessage(2, attrs_.get(i));
- }
- for (int i = 0; i < columns_.size(); i++) {
- output.writeMessage(3, columns_.get(i));
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBool(4, inMemory_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeBool(5, readOnly_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getNameBytes());
- }
- for (int i = 0; i < attrs_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, attrs_.get(i));
- }
- for (int i = 0; i < columns_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(3, columns_.get(i));
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBoolSize(4, inMemory_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBoolSize(5, readOnly_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchemaOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getAttrsFieldBuilder();
- getColumnsFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- if (attrsBuilder_ == null) {
- attrs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- attrsBuilder_.clear();
- }
- if (columnsBuilder_ == null) {
- columns_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
- } else {
- columnsBuilder_.clear();
- }
- inMemory_ = false;
- bitField0_ = (bitField0_ & ~0x00000008);
- readOnly_ = false;
- bitField0_ = (bitField0_ & ~0x00000010);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (attrsBuilder_ == null) {
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- attrs_ = java.util.Collections.unmodifiableList(attrs_);
- bitField0_ = (bitField0_ & ~0x00000002);
- }
- result.attrs_ = attrs_;
- } else {
- result.attrs_ = attrsBuilder_.build();
- }
- if (columnsBuilder_ == null) {
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- columns_ = java.util.Collections.unmodifiableList(columns_);
- bitField0_ = (bitField0_ & ~0x00000004);
- }
- result.columns_ = columns_;
- } else {
- result.columns_ = columnsBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000002;
- }
- result.inMemory_ = inMemory_;
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
- to_bitField0_ |= 0x00000004;
- }
- result.readOnly_ = readOnly_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance()) return this;
- if (other.hasName()) {
- bitField0_ |= 0x00000001;
- name_ = other.name_;
- onChanged();
- }
- if (attrsBuilder_ == null) {
- if (!other.attrs_.isEmpty()) {
- if (attrs_.isEmpty()) {
- attrs_ = other.attrs_;
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- ensureAttrsIsMutable();
- attrs_.addAll(other.attrs_);
- }
- onChanged();
- }
- } else {
- if (!other.attrs_.isEmpty()) {
- if (attrsBuilder_.isEmpty()) {
- attrsBuilder_.dispose();
- attrsBuilder_ = null;
- attrs_ = other.attrs_;
- bitField0_ = (bitField0_ & ~0x00000002);
- attrsBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getAttrsFieldBuilder() : null;
- } else {
- attrsBuilder_.addAllMessages(other.attrs_);
- }
- }
- }
- if (columnsBuilder_ == null) {
- if (!other.columns_.isEmpty()) {
- if (columns_.isEmpty()) {
- columns_ = other.columns_;
- bitField0_ = (bitField0_ & ~0x00000004);
- } else {
- ensureColumnsIsMutable();
- columns_.addAll(other.columns_);
- }
- onChanged();
- }
- } else {
- if (!other.columns_.isEmpty()) {
- if (columnsBuilder_.isEmpty()) {
- columnsBuilder_.dispose();
- columnsBuilder_ = null;
- columns_ = other.columns_;
- bitField0_ = (bitField0_ & ~0x00000004);
- columnsBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getColumnsFieldBuilder() : null;
- } else {
- columnsBuilder_.addAllMessages(other.columns_);
- }
- }
- }
- if (other.hasInMemory()) {
- setInMemory(other.getInMemory());
- }
- if (other.hasReadOnly()) {
- setReadOnly(other.getReadOnly());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- for (int i = 0; i < getAttrsCount(); i++) {
- if (!getAttrs(i).isInitialized()) {
-
- return false;
- }
- }
- for (int i = 0; i < getColumnsCount(); i++) {
- if (!getColumns(i).isInitialized()) {
-
- return false;
- }
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // optional string name = 1;
- private java.lang.Object name_ = "";
- /**
- * <code>optional string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- name_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder setName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder clearName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- name_ = getDefaultInstance().getName();
- onChanged();
- return this;
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder setNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> attrs_ =
- java.util.Collections.emptyList();
- private void ensureAttrsIsMutable() {
- if (!((bitField0_ & 0x00000002) == 0x00000002)) {
- attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>(attrs_);
- bitField0_ |= 0x00000002;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder> attrsBuilder_;
-
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> getAttrsList() {
- if (attrsBuilder_ == null) {
- return java.util.Collections.unmodifiableList(attrs_);
- } else {
- return attrsBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public int getAttrsCount() {
- if (attrsBuilder_ == null) {
- return attrs_.size();
- } else {
- return attrsBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
- if (attrsBuilder_ == null) {
- return attrs_.get(index);
- } else {
- return attrsBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public Builder setAttrs(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
- if (attrsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureAttrsIsMutable();
- attrs_.set(index, value);
- onChanged();
- } else {
- attrsBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public Builder setAttrs(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- attrs_.set(index, builderForValue.build());
- onChanged();
- } else {
- attrsBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public Builder addAttrs(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
- if (attrsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureAttrsIsMutable();
- attrs_.add(value);
- onChanged();
- } else {
- attrsBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public Builder addAttrs(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
- if (attrsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureAttrsIsMutable();
- attrs_.add(index, value);
- onChanged();
- } else {
- attrsBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public Builder addAttrs(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- attrs_.add(builderForValue.build());
- onChanged();
- } else {
- attrsBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public Builder addAttrs(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- attrs_.add(index, builderForValue.build());
- onChanged();
- } else {
- attrsBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public Builder addAllAttrs(
- java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> values) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- super.addAll(values, attrs_);
- onChanged();
- } else {
- attrsBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public Builder clearAttrs() {
- if (attrsBuilder_ == null) {
- attrs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000002);
- onChanged();
- } else {
- attrsBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public Builder removeAttrs(int index) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- attrs_.remove(index);
- onChanged();
- } else {
- attrsBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder getAttrsBuilder(
- int index) {
- return getAttrsFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder getAttrsOrBuilder(
- int index) {
- if (attrsBuilder_ == null) {
- return attrs_.get(index); } else {
- return attrsBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>
- getAttrsOrBuilderList() {
- if (attrsBuilder_ != null) {
- return attrsBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(attrs_);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder addAttrsBuilder() {
- return getAttrsFieldBuilder().addBuilder(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder addAttrsBuilder(
- int index) {
- return getAttrsFieldBuilder().addBuilder(
- index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder>
- getAttrsBuilderList() {
- return getAttrsFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>
- getAttrsFieldBuilder() {
- if (attrsBuilder_ == null) {
- attrsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>(
- attrs_,
- ((bitField0_ & 0x00000002) == 0x00000002),
- getParentForChildren(),
- isClean());
- attrs_ = null;
- }
- return attrsBuilder_;
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> columns_ =
- java.util.Collections.emptyList();
- private void ensureColumnsIsMutable() {
- if (!((bitField0_ & 0x00000004) == 0x00000004)) {
- columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema>(columns_);
- bitField0_ |= 0x00000004;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder> columnsBuilder_;
-
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> getColumnsList() {
- if (columnsBuilder_ == null) {
- return java.util.Collections.unmodifiableList(columns_);
- } else {
- return columnsBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public int getColumnsCount() {
- if (columnsBuilder_ == null) {
- return columns_.size();
- } else {
- return columnsBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
- if (columnsBuilder_ == null) {
- return columns_.get(index);
- } else {
- return columnsBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public Builder setColumns(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
- if (columnsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureColumnsIsMutable();
- columns_.set(index, value);
- onChanged();
- } else {
- columnsBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public Builder setColumns(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
- if (columnsBuilder_ == null) {
- ensureColumnsIsMutable();
- columns_.set(index, builderForValue.build());
- onChanged();
- } else {
- columnsBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public Builder addColumns(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
- if (columnsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureColumnsIsMutable();
- columns_.add(value);
- onChanged();
- } else {
- columnsBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public Builder addColumns(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
- if (columnsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureColumnsIsMutable();
- columns_.add(index, value);
- onChanged();
- } else {
- columnsBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public Builder addColumns(
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
- if (columnsBuilder_ == null) {
- ensureColumnsIsMutable();
- columns_.add(builderForValue.build());
- onChanged();
- } else {
- columnsBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public Builder addColumns(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
- if (columnsBuilder_ == null) {
- ensureColumnsIsMutable();
- columns_.add(index, builderForValue.build());
- onChanged();
- } else {
- columnsBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public Builder addAllColumns(
- java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> values) {
- if (columnsBuilder_ == null) {
- ensureColumnsIsMutable();
- super.addAll(values, columns_);
- onChanged();
- } else {
- columnsBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public Builder clearColumns() {
- if (columnsBuilder_ == null) {
- columns_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
- onChanged();
- } else {
- columnsBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public Builder removeColumns(int index) {
- if (columnsBuilder_ == null) {
- ensureColumnsIsMutable();
- columns_.remove(index);
- onChanged();
- } else {
- columnsBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder getColumnsBuilder(
- int index) {
- return getColumnsFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder getColumnsOrBuilder(
- int index) {
- if (columnsBuilder_ == null) {
- return columns_.get(index); } else {
- return columnsBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>
- getColumnsOrBuilderList() {
- if (columnsBuilder_ != null) {
- return columnsBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(columns_);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder addColumnsBuilder() {
- return getColumnsFieldBuilder().addBuilder(
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder addColumnsBuilder(
- int index) {
- return getColumnsFieldBuilder().addBuilder(
- index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder>
- getColumnsBuilderList() {
- return getColumnsFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>
- getColumnsFieldBuilder() {
- if (columnsBuilder_ == null) {
- columnsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>(
- columns_,
- ((bitField0_ & 0x00000004) == 0x00000004),
- getParentForChildren(),
- isClean());
- columns_ = null;
- }
- return columnsBuilder_;
- }
-
- // optional bool inMemory = 4;
- private boolean inMemory_ ;
- /**
- * <code>optional bool inMemory = 4;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public boolean hasInMemory() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional bool inMemory = 4;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public boolean getInMemory() {
- return inMemory_;
- }
- /**
- * <code>optional bool inMemory = 4;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public Builder setInMemory(boolean value) {
- bitField0_ |= 0x00000008;
- inMemory_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bool inMemory = 4;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public Builder clearInMemory() {
- bitField0_ = (bitField0_ & ~0x00000008);
- inMemory_ = false;
- onChanged();
- return this;
- }
-
- // optional bool readOnly = 5;
- private boolean readOnly_ ;
- /**
- * <code>optional bool readOnly = 5;</code>
- */
- public boolean hasReadOnly() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional bool readOnly = 5;</code>
- */
- public boolean getReadOnly() {
- return readOnly_;
- }
- /**
- * <code>optional bool readOnly = 5;</code>
- */
- public Builder setReadOnly(boolean value) {
- bitField0_ |= 0x00000010;
- readOnly_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bool readOnly = 5;</code>
- */
- public Builder clearReadOnly() {
- bitField0_ = (bitField0_ & ~0x00000010);
- readOnly_ = false;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema)
- }
-
- static {
- defaultInstance = new TableSchema(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema)
- }
-
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\030TableSchemaMessage.proto\022/org.apache.h" +
- "adoop.hbase.rest.protobuf.generated\032\031Col" +
- "umnSchemaMessage.proto\"\220\002\n\013TableSchema\022\014" +
- "\n\004name\030\001 \001(\t\022U\n\005attrs\030\002 \003(\0132F.org.apache" +
- ".hadoop.hbase.rest.protobuf.generated.Ta" +
- "bleSchema.Attribute\022N\n\007columns\030\003 \003(\0132=.o" +
- "rg.apache.hadoop.hbase.rest.protobuf.gen" +
- "erated.ColumnSchema\022\020\n\010inMemory\030\004 \001(\010\022\020\n" +
- "\010readOnly\030\005 \001(\010\032(\n\tAttribute\022\014\n\004name\030\001 \002" +
- "(\t\022\r\n\005value\030\002 \002(\t"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor,
- new java.lang.String[] { "Name", "Attrs", "Columns", "InMemory", "ReadOnly", });
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor =
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor.getNestedTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor,
- new java.lang.String[] { "Name", "Value", });
- return null;
- }
- };
- com.google.protobuf.Descriptors.FileDescriptor
- .internalBuildGeneratedFileFrom(descriptorData,
- new com.google.protobuf.Descriptors.FileDescriptor[] {
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.getDescriptor(),
- }, assigner);
- }
-
- // @@protoc_insertion_point(outer_class_scope)
-}
[18/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
deleted file mode 100644
index 5de6b38..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.Map;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-import javax.xml.namespace.QName;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotEnabledException;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
-import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Private
-public class SchemaResource extends ResourceBase {
- private static final Log LOG = LogFactory.getLog(SchemaResource.class);
-
- static CacheControl cacheControl;
- static {
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- TableResource tableResource;
-
- /**
- * Constructor
- * @param tableResource
- * @throws IOException
- */
- public SchemaResource(TableResource tableResource) throws IOException {
- super();
- this.tableResource = tableResource;
- }
-
- private HTableDescriptor getTableSchema() throws IOException,
- TableNotFoundException {
- HTableInterface table = servlet.getTable(tableResource.getName());
- try {
- return table.getTableDescriptor();
- } finally {
- table.close();
- }
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- ResponseBuilder response =
- Response.ok(new TableSchemaModel(getTableSchema()));
- response.cacheControl(cacheControl);
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return response.build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return processException(e);
- }
- }
-
- private Response replace(final byte[] name, final TableSchemaModel model,
- final UriInfo uriInfo, final HBaseAdmin admin) {
- if (servlet.isReadOnly()) {
- return Response.status(Response.Status.FORBIDDEN)
- .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
- .build();
- }
- try {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
- for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
- htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
- }
- for (ColumnSchemaModel family: model.getColumns()) {
- HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
- for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
- hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
- }
- htd.addFamily(hcd);
- }
- if (admin.tableExists(name)) {
- admin.disableTable(name);
- admin.modifyTable(name, htd);
- admin.enableTable(name);
- servlet.getMetrics().incrementSucessfulPutRequests(1);
- } else try {
- admin.createTable(htd);
- servlet.getMetrics().incrementSucessfulPutRequests(1);
- } catch (TableExistsException e) {
- // race, someone else created a table with the same name
- return Response.status(Response.Status.NOT_MODIFIED)
- .type(MIMETYPE_TEXT).entity("Not modified" + CRLF)
- .build();
- }
- return Response.created(uriInfo.getAbsolutePath()).build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return processException(e);
- }
- }
-
- private Response update(final byte[] name, final TableSchemaModel model,
- final UriInfo uriInfo, final HBaseAdmin admin) {
- if (servlet.isReadOnly()) {
- return Response.status(Response.Status.FORBIDDEN)
- .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
- .build();
- }
- try {
- HTableDescriptor htd = admin.getTableDescriptor(name);
- admin.disableTable(name);
- try {
- for (ColumnSchemaModel family: model.getColumns()) {
- HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
- for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
- hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
- }
- if (htd.hasFamily(hcd.getName())) {
- admin.modifyColumn(name, hcd);
- } else {
- admin.addColumn(name, hcd);
- }
- }
- } catch (IOException e) {
- return Response.status(Response.Status.SERVICE_UNAVAILABLE)
- .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
- .build();
- } finally {
- admin.enableTable(tableResource.getName());
- }
- servlet.getMetrics().incrementSucessfulPutRequests(1);
- return Response.ok().build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return processException(e);
- }
- }
-
- private Response update(final TableSchemaModel model, final boolean replace,
- final UriInfo uriInfo) {
- try {
- byte[] name = Bytes.toBytes(tableResource.getName());
- HBaseAdmin admin = servlet.getAdmin();
- if (replace || !admin.tableExists(name)) {
- return replace(name, model, uriInfo, admin);
- } else {
- return update(name, model, uriInfo, admin);
- }
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedPutRequests(1);
- return processException(e);
- }
- }
-
- @PUT
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response put(final TableSchemaModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- return update(model, true, uriInfo);
- }
-
- @POST
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response post(final TableSchemaModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- return update(model, false, uriInfo);
- }
-
- @DELETE
- public Response delete(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("DELETE " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- if (servlet.isReadOnly()) {
- return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT)
- .entity("Forbidden" + CRLF).build();
- }
- try {
- HBaseAdmin admin = servlet.getAdmin();
- try {
- admin.disableTable(tableResource.getName());
- } catch (TableNotEnabledException e) { /* this is what we want anyway */ }
- admin.deleteTable(tableResource.getName());
- servlet.getMetrics().incrementSucessfulDeleteRequests(1);
- return Response.ok().build();
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedDeleteRequests(1);
- return processException(e);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
deleted file mode 100644
index a7e52bd..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
-
-@InterfaceAudience.Private
-public class StorageClusterStatusResource extends ResourceBase {
- private static final Log LOG =
- LogFactory.getLog(StorageClusterStatusResource.class);
-
- static CacheControl cacheControl;
- static {
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- /**
- * Constructor
- * @throws IOException
- */
- public StorageClusterStatusResource() throws IOException {
- super();
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- ClusterStatus status = servlet.getAdmin().getClusterStatus();
- StorageClusterStatusModel model = new StorageClusterStatusModel();
- model.setRegions(status.getRegionsCount());
- model.setRequests(status.getRequestsCount());
- model.setAverageLoad(status.getAverageLoad());
- for (ServerName info: status.getServers()) {
- ServerLoad load = status.getLoad(info);
- StorageClusterStatusModel.Node node =
- model.addLiveNode(
- info.getHostname() + ":" +
- Integer.toString(info.getPort()),
- info.getStartcode(), load.getUsedHeapMB(),
- load.getMaxHeapMB());
- node.setRequests(load.getNumberOfRequests());
- for (RegionLoad region: load.getRegionsLoad().values()) {
- node.addRegion(region.getName(), region.getStores(),
- region.getStorefiles(), region.getStorefileSizeMB(),
- region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB(),
- region.getReadRequestsCount(), region.getWriteRequestsCount(),
- region.getRootIndexSizeKB(), region.getTotalStaticIndexSizeKB(),
- region.getTotalStaticBloomSizeKB(), region.getTotalCompactingKVs(),
- region.getCurrentCompactedKVs());
- }
- }
- for (ServerName name: status.getDeadServerNames()) {
- model.addDeadNode(name.toString());
- }
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return response.build();
- } catch (IOException e) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.SERVICE_UNAVAILABLE)
- .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
- .build();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
deleted file mode 100644
index 85e81f8..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
-
-@InterfaceAudience.Private
-public class StorageClusterVersionResource extends ResourceBase {
- private static final Log LOG =
- LogFactory.getLog(StorageClusterVersionResource.class);
-
- static CacheControl cacheControl;
- static {
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- /**
- * Constructor
- * @throws IOException
- */
- public StorageClusterVersionResource() throws IOException {
- super();
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- StorageClusterVersionModel model = new StorageClusterVersionModel();
- model.setVersion(servlet.getAdmin().getClusterStatus().getHBaseVersion());
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return response.build();
- } catch (IOException e) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.SERVICE_UNAVAILABLE)
- .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
- .build();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
deleted file mode 100644
index c458cfa..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.List;
-
-import javax.ws.rs.DefaultValue;
-import javax.ws.rs.Encoded;
-import javax.ws.rs.HeaderParam;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.PrefixFilter;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Private
-public class TableResource extends ResourceBase {
-
- String table;
- private static final Log LOG = LogFactory.getLog(TableResource.class);
-
- /**
- * Constructor
- * @param table
- * @throws IOException
- */
- public TableResource(String table) throws IOException {
- super();
- this.table = table;
- }
-
- /** @return the table name */
- String getName() {
- return table;
- }
-
- /**
- * @return true if the table exists
- * @throws IOException
- */
- boolean exists() throws IOException {
- return servlet.getAdmin().tableExists(table);
- }
-
- @Path("exists")
- public ExistsResource getExistsResource() throws IOException {
- return new ExistsResource(this);
- }
-
- @Path("regions")
- public RegionsResource getRegionsResource() throws IOException {
- return new RegionsResource(this);
- }
-
- @Path("scanner")
- public ScannerResource getScannerResource() throws IOException {
- return new ScannerResource(this);
- }
-
- @Path("schema")
- public SchemaResource getSchemaResource() throws IOException {
- return new SchemaResource(this);
- }
-
- @Path("multiget")
- public MultiRowResource getMultipleRowResource(
- final @QueryParam("v") String versions) throws IOException {
- return new MultiRowResource(this, versions);
- }
-
- @Path("{rowspec: [^*]+}")
- public RowResource getRowResource(
- // We need the @Encoded decorator so Jersey won't urldecode before
- // the RowSpec constructor has a chance to parse
- final @PathParam("rowspec") @Encoded String rowspec,
- final @QueryParam("v") String versions,
- final @QueryParam("check") String check) throws IOException {
- return new RowResource(this, rowspec, versions, check);
- }
-
- @Path("{suffixglobbingspec: .*\\*/.+}")
- public RowResource getRowResourceWithSuffixGlobbing(
- // We need the @Encoded decorator so Jersey won't urldecode before
- // the RowSpec constructor has a chance to parse
- final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec,
- final @QueryParam("v") String versions,
- final @QueryParam("check") String check) throws IOException {
- return new RowResource(this, suffixglobbingspec, versions, check);
- }
-
- @Path("{scanspec: .*[*]$}")
- public TableScanResource getScanResource(
- final @Context UriInfo uriInfo,
- final @PathParam("scanspec") String scanSpec,
- final @HeaderParam("Accept") String contentType,
- @DefaultValue(Integer.MAX_VALUE + "")
- @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit,
- @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow,
- @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow,
- @DefaultValue("") @QueryParam(Constants.SCAN_COLUMN) List<String> column,
- @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions,
- @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize,
- @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime,
- @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime,
- @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks) {
- try {
- Filter filter = null;
- if (scanSpec.indexOf('*') > 0) {
- String prefix = scanSpec.substring(0, scanSpec.indexOf('*'));
- filter = new PrefixFilter(Bytes.toBytes(prefix));
- }
- LOG.debug("Query parameters : Table Name = > " + this.table + " Start Row => " + startRow
- + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime
- + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => "
- + maxVersions + " Batch Size => " + batchSize);
- HTableInterface hTable = RESTServlet.getInstance().getTable(this.table);
- Scan tableScan = new Scan();
- tableScan.setBatch(batchSize);
- tableScan.setMaxVersions(maxVersions);
- tableScan.setTimeRange(startTime, endTime);
- tableScan.setStartRow(Bytes.toBytes(startRow));
- tableScan.setStopRow(Bytes.toBytes(endRow));
- for (String csplit : column) {
- String[] familysplit = csplit.trim().split(":");
- if (familysplit.length == 2) {
- if (familysplit[1].length() > 0) {
- LOG.debug("Scan family and column : " + familysplit[0] + " " + familysplit[1]);
- tableScan.addColumn(Bytes.toBytes(familysplit[0]), Bytes.toBytes(familysplit[1]));
- } else {
- tableScan.addFamily(Bytes.toBytes(familysplit[0]));
- LOG.debug("Scan family : " + familysplit[0] + " and empty qualifier.");
- tableScan.addColumn(Bytes.toBytes(familysplit[0]), null);
- }
- } else if (StringUtils.isNotEmpty(familysplit[0])){
- LOG.debug("Scan family : " + familysplit[0]);
- tableScan.addFamily(Bytes.toBytes(familysplit[0]));
- }
- }
- if (filter != null) {
- tableScan.setFilter(filter);
- }
- int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
- tableScan.setCaching(fetchSize);
- return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit);
- } catch (Exception exp) {
- servlet.getMetrics().incrementFailedScanRequests(1);
- processException(exp);
- LOG.warn(exp);
- return null;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
deleted file mode 100644
index 5cc2c7b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import javax.ws.rs.DefaultValue;
-import javax.ws.rs.GET;
-import javax.ws.rs.HeaderParam;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.codehaus.jackson.annotate.JsonIgnore;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-@InterfaceAudience.Private
-public class TableScanResource extends ResourceBase {
-
- private static final Log LOG = LogFactory.getLog(TableScanResource.class);
- TableResource tableResource;
- ResultScanner results;
- int userRequestedLimit;
-
- public TableScanResource(ResultScanner scanner, int userRequestedLimit) throws IOException {
- super();
- this.results = scanner;
- this.userRequestedLimit = userRequestedLimit;
- }
-
- @GET
- @Produces({ Constants.MIMETYPE_XML, Constants.MIMETYPE_JSON })
- public CellSetModelStream get(final @Context UriInfo uriInfo) {
- servlet.getMetrics().incrementRequests(1);
- final int rowsToSend = userRequestedLimit;
- servlet.getMetrics().incrementSucessfulScanRequests(1);
- final Iterator<Result> itr = results.iterator();
- return new CellSetModelStream(new ArrayList<RowModel>() {
- public Iterator<RowModel> iterator() {
- return new Iterator<RowModel>() {
- int count = rowsToSend;
-
- @Override
- public boolean hasNext() {
- if (count > 0) {
- return itr.hasNext();
- } else {
- return false;
- }
- }
-
- @Override
- public void remove() {
- throw new UnsupportedOperationException(
- "Remove method cannot be used in CellSetModelStream");
- }
-
- @Override
- public RowModel next() {
- Result rs = itr.next();
- if ((rs == null) || (count <= 0)) {
- return null;
- }
- byte[] rowKey = rs.getRow();
- RowModel rModel = new RowModel(rowKey);
- List<Cell> kvs = rs.listCells();
- for (Cell kv : kvs) {
- rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
- kv.getTimestamp(), CellUtil.cloneValue(kv)));
- }
- count--;
- return rModel;
- }
- };
- }
- });
- }
-
- @GET
- @Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF })
- public Response getProtobuf(
- final @Context UriInfo uriInfo,
- final @PathParam("scanspec") String scanSpec,
- final @HeaderParam("Accept") String contentType,
- @DefaultValue(Integer.MAX_VALUE + "") @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit,
- @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow,
- @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow,
- @DefaultValue("column") @QueryParam(Constants.SCAN_COLUMN) List<String> column,
- @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions,
- @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize,
- @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime,
- @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime,
- @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks) {
- servlet.getMetrics().incrementRequests(1);
- try {
- int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
- ProtobufStreamingUtil stream = new ProtobufStreamingUtil(this.results, contentType,
- userRequestedLimit, fetchSize);
- servlet.getMetrics().incrementSucessfulScanRequests(1);
- ResponseBuilder response = Response.ok(stream);
- response.header("content-type", contentType);
- return response.build();
- } catch (Exception exp) {
- servlet.getMetrics().incrementFailedScanRequests(1);
- processException(exp);
- LOG.warn(exp);
- return null;
- }
- }
-
- @XmlRootElement(name = "CellSet")
- @XmlAccessorType(XmlAccessType.FIELD)
- public static class CellSetModelStream {
- // JAXB needs an arraylist for streaming
- @XmlElement(name = "Row")
- @JsonIgnore
- private ArrayList<RowModel> Row;
-
- public CellSetModelStream() {
- }
-
- public CellSetModelStream(final ArrayList<RowModel> rowList) {
- this.Row = rowList;
- }
-
- // jackson needs an iterator for streaming
- @JsonProperty("Row")
- public Iterator<RowModel> getIterator() {
- return Row.iterator();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
deleted file mode 100644
index ae93825..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.servlet.ServletContext;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.model.VersionModel;
-
-/**
- * Implements REST software version reporting
- * <p>
- * <tt>/version/rest</tt>
- * <p>
- * <tt>/version</tt> (alias for <tt>/version/rest</tt>)
- */
-@InterfaceAudience.Private
-public class VersionResource extends ResourceBase {
-
- private static final Log LOG = LogFactory.getLog(VersionResource.class);
-
- static CacheControl cacheControl;
- static {
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- /**
- * Constructor
- * @throws IOException
- */
- public VersionResource() throws IOException {
- super();
- }
-
- /**
- * Build a response for a version request.
- * @param context servlet context
- * @param uriInfo (JAX-RS context variable) request URL
- * @return a response for a version request
- */
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF})
- public Response get(final @Context ServletContext context,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- ResponseBuilder response = Response.ok(new VersionModel(context));
- response.cacheControl(cacheControl);
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return response.build();
- }
-
- /**
- * Dispatch to StorageClusterVersionResource
- */
- @Path("cluster")
- public StorageClusterVersionResource getClusterVersionResource()
- throws IOException {
- return new StorageClusterVersionResource();
- }
-
- /**
- * Dispatch <tt>/version/rest</tt> to self.
- */
- @Path("rest")
- public VersionResource getVersionResource() {
- return this;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
deleted file mode 100644
index ebedf57..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.client;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.commons.httpclient.Header;
-import org.apache.commons.httpclient.HttpClient;
-import org.apache.commons.httpclient.HttpMethod;
-import org.apache.commons.httpclient.HttpVersion;
-import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
-import org.apache.commons.httpclient.URI;
-import org.apache.commons.httpclient.methods.ByteArrayRequestEntity;
-import org.apache.commons.httpclient.methods.DeleteMethod;
-import org.apache.commons.httpclient.methods.GetMethod;
-import org.apache.commons.httpclient.methods.HeadMethod;
-import org.apache.commons.httpclient.methods.PostMethod;
-import org.apache.commons.httpclient.methods.PutMethod;
-import org.apache.commons.httpclient.params.HttpClientParams;
-import org.apache.commons.httpclient.params.HttpConnectionManagerParams;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * A wrapper around HttpClient which provides some useful function and
- * semantics for interacting with the REST gateway.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class Client {
- public static final Header[] EMPTY_HEADER_ARRAY = new Header[0];
-
- private static final Log LOG = LogFactory.getLog(Client.class);
-
- private HttpClient httpClient;
- private Cluster cluster;
- private boolean sslEnabled;
-
- private Map<String, String> extraHeaders;
-
- /**
- * Default Constructor
- */
- public Client() {
- this(null);
- }
-
- private void initialize(Cluster cluster, boolean sslEnabled) {
- this.cluster = cluster;
- this.sslEnabled = sslEnabled;
- MultiThreadedHttpConnectionManager manager =
- new MultiThreadedHttpConnectionManager();
- HttpConnectionManagerParams managerParams = manager.getParams();
- managerParams.setConnectionTimeout(2000); // 2 s
- managerParams.setDefaultMaxConnectionsPerHost(10);
- managerParams.setMaxTotalConnections(100);
- extraHeaders = new ConcurrentHashMap<String, String>();
- this.httpClient = new HttpClient(manager);
- HttpClientParams clientParams = httpClient.getParams();
- clientParams.setVersion(HttpVersion.HTTP_1_1);
-
- }
- /**
- * Constructor
- * @param cluster the cluster definition
- */
- public Client(Cluster cluster) {
- initialize(cluster, false);
- }
-
- /**
- * Constructor
- * @param cluster the cluster definition
- * @param sslEnabled enable SSL or not
- */
- public Client(Cluster cluster, boolean sslEnabled) {
- initialize(cluster, sslEnabled);
- }
-
- /**
- * Shut down the client. Close any open persistent connections.
- */
- public void shutdown() {
- MultiThreadedHttpConnectionManager manager =
- (MultiThreadedHttpConnectionManager) httpClient.getHttpConnectionManager();
- manager.shutdown();
- }
-
- /**
- * @return the wrapped HttpClient
- */
- public HttpClient getHttpClient() {
- return httpClient;
- }
-
- /**
- * Add extra headers. These extra headers will be applied to all http
- * methods before they are removed. If any header is not used any more,
- * client needs to remove it explicitly.
- */
- public void addExtraHeader(final String name, final String value) {
- extraHeaders.put(name, value);
- }
-
- /**
- * Get an extra header value.
- */
- public String getExtraHeader(final String name) {
- return extraHeaders.get(name);
- }
-
- /**
- * Get all extra headers (read-only).
- */
- public Map<String, String> getExtraHeaders() {
- return Collections.unmodifiableMap(extraHeaders);
- }
-
- /**
- * Remove an extra header.
- */
- public void removeExtraHeader(final String name) {
- extraHeaders.remove(name);
- }
-
- /**
- * Execute a transaction method given only the path. Will select at random
- * one of the members of the supplied cluster definition and iterate through
- * the list until a transaction can be successfully completed. The
- * definition of success here is a complete HTTP transaction, irrespective
- * of result code.
- * @param cluster the cluster definition
- * @param method the transaction method
- * @param headers HTTP header values to send
- * @param path the properly urlencoded path
- * @return the HTTP response code
- * @throws IOException
- */
- public int executePathOnly(Cluster cluster, HttpMethod method,
- Header[] headers, String path) throws IOException {
- IOException lastException;
- if (cluster.nodes.size() < 1) {
- throw new IOException("Cluster is empty");
- }
- int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random());
- int i = start;
- do {
- cluster.lastHost = cluster.nodes.get(i);
- try {
- StringBuilder sb = new StringBuilder();
- if (sslEnabled) {
- sb.append("https://");
- } else {
- sb.append("http://");
- }
- sb.append(cluster.lastHost);
- sb.append(path);
- URI uri = new URI(sb.toString(), true);
- return executeURI(method, headers, uri.toString());
- } catch (IOException e) {
- lastException = e;
- }
- } while (++i != start && i < cluster.nodes.size());
- throw lastException;
- }
-
- /**
- * Execute a transaction method given a complete URI.
- * @param method the transaction method
- * @param headers HTTP header values to send
- * @param uri a properly urlencoded URI
- * @return the HTTP response code
- * @throws IOException
- */
- public int executeURI(HttpMethod method, Header[] headers, String uri)
- throws IOException {
- method.setURI(new URI(uri, true));
- for (Map.Entry<String, String> e: extraHeaders.entrySet()) {
- method.addRequestHeader(e.getKey(), e.getValue());
- }
- if (headers != null) {
- for (Header header: headers) {
- method.addRequestHeader(header);
- }
- }
- long startTime = System.currentTimeMillis();
- int code = httpClient.executeMethod(method);
- long endTime = System.currentTimeMillis();
- if (LOG.isDebugEnabled()) {
- LOG.debug(method.getName() + " " + uri + " " + code + " " +
- method.getStatusText() + " in " + (endTime - startTime) + " ms");
- }
- return code;
- }
-
- /**
- * Execute a transaction method. Will call either <tt>executePathOnly</tt>
- * or <tt>executeURI</tt> depending on whether a path only is supplied in
- * 'path', or if a complete URI is passed instead, respectively.
- * @param cluster the cluster definition
- * @param method the HTTP method
- * @param headers HTTP header values to send
- * @param path the properly urlencoded path or URI
- * @return the HTTP response code
- * @throws IOException
- */
- public int execute(Cluster cluster, HttpMethod method, Header[] headers,
- String path) throws IOException {
- if (path.startsWith("/")) {
- return executePathOnly(cluster, method, headers, path);
- }
- return executeURI(method, headers, path);
- }
-
- /**
- * @return the cluster definition
- */
- public Cluster getCluster() {
- return cluster;
- }
-
- /**
- * @param cluster the cluster definition
- */
- public void setCluster(Cluster cluster) {
- this.cluster = cluster;
- }
-
- /**
- * Send a HEAD request
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response head(String path) throws IOException {
- return head(cluster, path, null);
- }
-
- /**
- * Send a HEAD request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param headers the HTTP headers to include in the request
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response head(Cluster cluster, String path, Header[] headers)
- throws IOException {
- HeadMethod method = new HeadMethod();
- try {
- int code = execute(cluster, method, null, path);
- headers = method.getResponseHeaders();
- return new Response(code, headers, null);
- } finally {
- method.releaseConnection();
- }
- }
-
- /**
- * Send a GET request
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(String path) throws IOException {
- return get(cluster, path);
- }
-
- /**
- * Send a GET request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(Cluster cluster, String path) throws IOException {
- return get(cluster, path, EMPTY_HEADER_ARRAY);
- }
-
- /**
- * Send a GET request
- * @param path the path or URI
- * @param accept Accept header value
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(String path, String accept) throws IOException {
- return get(cluster, path, accept);
- }
-
- /**
- * Send a GET request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param accept Accept header value
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(Cluster cluster, String path, String accept)
- throws IOException {
- Header[] headers = new Header[1];
- headers[0] = new Header("Accept", accept);
- return get(cluster, path, headers);
- }
-
- /**
- * Send a GET request
- * @param path the path or URI
- * @param headers the HTTP headers to include in the request,
- * <tt>Accept</tt> must be supplied
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(String path, Header[] headers) throws IOException {
- return get(cluster, path, headers);
- }
-
- /**
- * Send a GET request
- * @param c the cluster definition
- * @param path the path or URI
- * @param headers the HTTP headers to include in the request
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(Cluster c, String path, Header[] headers)
- throws IOException {
- GetMethod method = new GetMethod();
- try {
- int code = execute(c, method, headers, path);
- headers = method.getResponseHeaders();
- byte[] body = method.getResponseBody();
- InputStream in = method.getResponseBodyAsStream();
- return new Response(code, headers, body, in);
- } finally {
- method.releaseConnection();
- }
- }
-
- /**
- * Send a PUT request
- * @param path the path or URI
- * @param contentType the content MIME type
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response put(String path, String contentType, byte[] content)
- throws IOException {
- return put(cluster, path, contentType, content);
- }
-
- /**
- * Send a PUT request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param contentType the content MIME type
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response put(Cluster cluster, String path, String contentType,
- byte[] content) throws IOException {
- Header[] headers = new Header[1];
- headers[0] = new Header("Content-Type", contentType);
- return put(cluster, path, headers, content);
- }
-
- /**
- * Send a PUT request
- * @param path the path or URI
- * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
- * supplied
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response put(String path, Header[] headers, byte[] content)
- throws IOException {
- return put(cluster, path, headers, content);
- }
-
- /**
- * Send a PUT request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
- * supplied
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response put(Cluster cluster, String path, Header[] headers,
- byte[] content) throws IOException {
- PutMethod method = new PutMethod();
- try {
- method.setRequestEntity(new ByteArrayRequestEntity(content));
- int code = execute(cluster, method, headers, path);
- headers = method.getResponseHeaders();
- content = method.getResponseBody();
- return new Response(code, headers, content);
- } finally {
- method.releaseConnection();
- }
- }
-
- /**
- * Send a POST request
- * @param path the path or URI
- * @param contentType the content MIME type
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response post(String path, String contentType, byte[] content)
- throws IOException {
- return post(cluster, path, contentType, content);
- }
-
- /**
- * Send a POST request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param contentType the content MIME type
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response post(Cluster cluster, String path, String contentType,
- byte[] content) throws IOException {
- Header[] headers = new Header[1];
- headers[0] = new Header("Content-Type", contentType);
- return post(cluster, path, headers, content);
- }
-
- /**
- * Send a POST request
- * @param path the path or URI
- * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
- * supplied
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response post(String path, Header[] headers, byte[] content)
- throws IOException {
- return post(cluster, path, headers, content);
- }
-
- /**
- * Send a POST request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
- * supplied
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response post(Cluster cluster, String path, Header[] headers,
- byte[] content) throws IOException {
- PostMethod method = new PostMethod();
- try {
- method.setRequestEntity(new ByteArrayRequestEntity(content));
- int code = execute(cluster, method, headers, path);
- headers = method.getResponseHeaders();
- content = method.getResponseBody();
- return new Response(code, headers, content);
- } finally {
- method.releaseConnection();
- }
- }
-
- /**
- * Send a DELETE request
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response delete(String path) throws IOException {
- return delete(cluster, path);
- }
-
- /**
- * Send a DELETE request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response delete(Cluster cluster, String path) throws IOException {
- DeleteMethod method = new DeleteMethod();
- try {
- int code = execute(cluster, method, null, path);
- Header[] headers = method.getResponseHeaders();
- byte[] content = method.getResponseBody();
- return new Response(code, headers, content);
- } finally {
- method.releaseConnection();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
deleted file mode 100644
index a2de329..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.client;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * A list of 'host:port' addresses of HTTP servers operating as a single
- * entity, for example multiple redundant web service gateways.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class Cluster {
- protected List<String> nodes =
- Collections.synchronizedList(new ArrayList<String>());
- protected String lastHost;
-
- /**
- * Constructor
- */
- public Cluster() {}
-
- /**
- * Constructor
- * @param nodes a list of service locations, in 'host:port' format
- */
- public Cluster(List<String> nodes) {
- nodes.addAll(nodes);
- }
-
- /**
- * @return true if no locations have been added, false otherwise
- */
- public boolean isEmpty() {
- return nodes.isEmpty();
- }
-
- /**
- * Add a node to the cluster
- * @param node the service location in 'host:port' format
- */
- public Cluster add(String node) {
- nodes.add(node);
- return this;
- }
-
- /**
- * Add a node to the cluster
- * @param name host name
- * @param port service port
- */
- public Cluster add(String name, int port) {
- StringBuilder sb = new StringBuilder();
- sb.append(name);
- sb.append(':');
- sb.append(port);
- return add(sb.toString());
- }
-
- /**
- * Remove a node from the cluster
- * @param node the service location in 'host:port' format
- */
- public Cluster remove(String node) {
- nodes.remove(node);
- return this;
- }
-
- /**
- * Remove a node from the cluster
- * @param name host name
- * @param port service port
- */
- public Cluster remove(String name, int port) {
- StringBuilder sb = new StringBuilder();
- sb.append(name);
- sb.append(':');
- sb.append(port);
- return remove(sb.toString());
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
deleted file mode 100644
index 23da9c8..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.client;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.rest.Constants;
-import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
-import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
-import org.apache.hadoop.hbase.rest.model.TableListModel;
-import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
-import org.apache.hadoop.hbase.rest.model.VersionModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class RemoteAdmin {
-
- final Client client;
- final Configuration conf;
- final String accessToken;
- final int maxRetries;
- final long sleepTime;
-
- // This unmarshaller is necessary for getting the /version/cluster resource.
- // This resource does not support protobufs. Therefore this is necessary to
- // request/interpret it as XML.
- private static volatile Unmarshaller versionClusterUnmarshaller;
-
- /**
- * Constructor
- *
- * @param client
- * @param conf
- */
- public RemoteAdmin(Client client, Configuration conf) {
- this(client, conf, null);
- }
-
- static Unmarshaller getUnmarsheller() throws JAXBException {
-
- if (versionClusterUnmarshaller == null) {
-
- RemoteAdmin.versionClusterUnmarshaller = JAXBContext.newInstance(
- StorageClusterVersionModel.class).createUnmarshaller();
- }
- return RemoteAdmin.versionClusterUnmarshaller;
- }
-
- /**
- * Constructor
- * @param client
- * @param conf
- * @param accessToken
- */
- public RemoteAdmin(Client client, Configuration conf, String accessToken) {
- this.client = client;
- this.conf = conf;
- this.accessToken = accessToken;
- this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10);
- this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000);
- }
-
- /**
- * @param tableName name of table to check
- * @return true if all regions of the table are available
- * @throws IOException if a remote or network exception occurs
- */
- public boolean isTableAvailable(String tableName) throws IOException {
- return isTableAvailable(Bytes.toBytes(tableName));
- }
-
- /**
- * @return string representing the rest api's version
- * @throws IOEXception
- * if the endpoint does not exist, there is a timeout, or some other
- * general failure mode
- */
- public VersionModel getRestVersion() throws IOException {
-
- StringBuilder path = new StringBuilder();
- path.append('/');
- if (accessToken != null) {
- path.append(accessToken);
- path.append('/');
- }
-
- path.append("version/rest");
-
- int code = 0;
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.get(path.toString(),
- Constants.MIMETYPE_PROTOBUF);
- code = response.getCode();
- switch (code) {
- case 200:
-
- VersionModel v = new VersionModel();
- return (VersionModel) v.getObjectFromMessage(response.getBody());
- case 404:
- throw new IOException("REST version not found");
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) {
- }
- break;
- default:
- throw new IOException("get request to " + path.toString()
- + " returned " + code);
- }
- }
- throw new IOException("get request to " + path.toString() + " timed out");
- }
-
- /**
- * @return string representing the cluster's version
- * @throws IOEXception if the endpoint does not exist, there is a timeout, or some other general failure mode
- */
- public StorageClusterStatusModel getClusterStatus() throws IOException {
-
- StringBuilder path = new StringBuilder();
- path.append('/');
- if (accessToken !=null) {
- path.append(accessToken);
- path.append('/');
- }
-
- path.append("status/cluster");
-
- int code = 0;
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.get(path.toString(),
- Constants.MIMETYPE_PROTOBUF);
- code = response.getCode();
- switch (code) {
- case 200:
- StorageClusterStatusModel s = new StorageClusterStatusModel();
- return (StorageClusterStatusModel) s.getObjectFromMessage(response
- .getBody());
- case 404:
- throw new IOException("Cluster version not found");
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) {
- }
- break;
- default:
- throw new IOException("get request to " + path + " returned " + code);
- }
- }
- throw new IOException("get request to " + path + " timed out");
- }
-
- /**
- * @return string representing the cluster's version
- * @throws IOEXception
- * if the endpoint does not exist, there is a timeout, or some other
- * general failure mode
- */
- public StorageClusterVersionModel getClusterVersion() throws IOException {
-
- StringBuilder path = new StringBuilder();
- path.append('/');
- if (accessToken != null) {
- path.append(accessToken);
- path.append('/');
- }
-
- path.append("version/cluster");
-
- int code = 0;
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.get(path.toString(), Constants.MIMETYPE_XML);
- code = response.getCode();
- switch (code) {
- case 200:
- try {
-
- return (StorageClusterVersionModel) getUnmarsheller().unmarshal(
- new ByteArrayInputStream(response.getBody()));
- } catch (JAXBException jaxbe) {
-
- throw new IOException(
- "Issue parsing StorageClusterVersionModel object in XML form: "
- + jaxbe.getLocalizedMessage());
- }
- case 404:
- throw new IOException("Cluster version not found");
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) {
- }
- break;
- default:
- throw new IOException(path.toString() + " request returned " + code);
- }
- }
- throw new IOException("get request to " + path.toString()
- + " request timed out");
- }
-
- /**
- * @param tableName name of table to check
- * @return true if all regions of the table are available
- * @throws IOException if a remote or network exception occurs
- */
- public boolean isTableAvailable(byte[] tableName) throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- if (accessToken != null) {
- path.append(accessToken);
- path.append('/');
- }
- path.append(Bytes.toStringBinary(tableName));
- path.append('/');
- path.append("exists");
- int code = 0;
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF);
- code = response.getCode();
- switch (code) {
- case 200:
- return true;
- case 404:
- return false;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("get request to " + path.toString() + " returned " + code);
- }
- }
- throw new IOException("get request to " + path.toString() + " timed out");
- }
-
- /**
- * Creates a new table.
- * @param desc table descriptor for table
- * @throws IOException if a remote or network exception occurs
- */
- public void createTable(HTableDescriptor desc)
- throws IOException {
- TableSchemaModel model = new TableSchemaModel(desc);
- StringBuilder path = new StringBuilder();
- path.append('/');
- if (accessToken != null) {
- path.append(accessToken);
- path.append('/');
- }
- path.append(desc.getTableName());
- path.append('/');
- path.append("schema");
- int code = 0;
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.put(path.toString(), Constants.MIMETYPE_PROTOBUF,
- model.createProtobufOutput());
- code = response.getCode();
- switch (code) {
- case 201:
- return;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("create request to " + path.toString() + " returned " + code);
- }
- }
- throw new IOException("create request to " + path.toString() + " timed out");
- }
-
- /**
- * Deletes a table.
- * @param tableName name of table to delete
- * @throws IOException if a remote or network exception occurs
- */
- public void deleteTable(final String tableName) throws IOException {
- deleteTable(Bytes.toBytes(tableName));
- }
-
- /**
- * Deletes a table.
- * @param tableName name of table to delete
- * @throws IOException if a remote or network exception occurs
- */
- public void deleteTable(final byte [] tableName) throws IOException {
- StringBuilder path = new StringBuilder();
- path.append('/');
- if (accessToken != null) {
- path.append(accessToken);
- path.append('/');
- }
- path.append(Bytes.toStringBinary(tableName));
- path.append('/');
- path.append("schema");
- int code = 0;
- for (int i = 0; i < maxRetries; i++) {
- Response response = client.delete(path.toString());
- code = response.getCode();
- switch (code) {
- case 200:
- return;
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) { }
- break;
- default:
- throw new IOException("delete request to " + path.toString() + " returned " + code);
- }
- }
- throw new IOException("delete request to " + path.toString() + " timed out");
- }
-
- /**
- * @return string representing the cluster's version
- * @throws IOEXception
- * if the endpoint does not exist, there is a timeout, or some other
- * general failure mode
- */
- public TableListModel getTableList() throws IOException {
-
- StringBuilder path = new StringBuilder();
- path.append('/');
- if (accessToken != null) {
- path.append(accessToken);
- path.append('/');
- }
-
- int code = 0;
- for (int i = 0; i < maxRetries; i++) {
- // Response response = client.get(path.toString(),
- // Constants.MIMETYPE_XML);
- Response response = client.get(path.toString(),
- Constants.MIMETYPE_PROTOBUF);
- code = response.getCode();
- switch (code) {
- case 200:
- TableListModel t = new TableListModel();
- return (TableListModel) t.getObjectFromMessage(response.getBody());
- case 404:
- throw new IOException("Table list not found");
- case 509:
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) {
- }
- break;
- default:
- throw new IOException("get request to " + path.toString()
- + " request returned " + code);
- }
- }
- throw new IOException("get request to " + path.toString()
- + " request timed out");
- }
-}
[12/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
deleted file mode 100644
index f5f6a95..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
+++ /dev/null
@@ -1,1904 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: ColumnSchemaMessage.proto
-
-package org.apache.hadoop.hbase.rest.protobuf.generated;
-
-public final class ColumnSchemaMessage {
- private ColumnSchemaMessage() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- public interface ColumnSchemaOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // optional string name = 1;
- /**
- * <code>optional string name = 1;</code>
- */
- boolean hasName();
- /**
- * <code>optional string name = 1;</code>
- */
- java.lang.String getName();
- /**
- * <code>optional string name = 1;</code>
- */
- com.google.protobuf.ByteString
- getNameBytes();
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>
- getAttrsList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index);
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- int getAttrsCount();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>
- getAttrsOrBuilderList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder getAttrsOrBuilder(
- int index);
-
- // optional int32 ttl = 3;
- /**
- * <code>optional int32 ttl = 3;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- boolean hasTtl();
- /**
- * <code>optional int32 ttl = 3;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- int getTtl();
-
- // optional int32 maxVersions = 4;
- /**
- * <code>optional int32 maxVersions = 4;</code>
- */
- boolean hasMaxVersions();
- /**
- * <code>optional int32 maxVersions = 4;</code>
- */
- int getMaxVersions();
-
- // optional string compression = 5;
- /**
- * <code>optional string compression = 5;</code>
- */
- boolean hasCompression();
- /**
- * <code>optional string compression = 5;</code>
- */
- java.lang.String getCompression();
- /**
- * <code>optional string compression = 5;</code>
- */
- com.google.protobuf.ByteString
- getCompressionBytes();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema}
- */
- public static final class ColumnSchema extends
- com.google.protobuf.GeneratedMessage
- implements ColumnSchemaOrBuilder {
- // Use ColumnSchema.newBuilder() to construct.
- private ColumnSchema(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private ColumnSchema(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final ColumnSchema defaultInstance;
- public static ColumnSchema getDefaultInstance() {
- return defaultInstance;
- }
-
- public ColumnSchema getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private ColumnSchema(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- name_ = input.readBytes();
- break;
- }
- case 18: {
- if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>();
- mutable_bitField0_ |= 0x00000002;
- }
- attrs_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.PARSER, extensionRegistry));
- break;
- }
- case 24: {
- bitField0_ |= 0x00000002;
- ttl_ = input.readInt32();
- break;
- }
- case 32: {
- bitField0_ |= 0x00000004;
- maxVersions_ = input.readInt32();
- break;
- }
- case 42: {
- bitField0_ |= 0x00000008;
- compression_ = input.readBytes();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- attrs_ = java.util.Collections.unmodifiableList(attrs_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.class, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder.class);
- }
-
- public static com.google.protobuf.Parser<ColumnSchema> PARSER =
- new com.google.protobuf.AbstractParser<ColumnSchema>() {
- public ColumnSchema parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new ColumnSchema(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<ColumnSchema> getParserForType() {
- return PARSER;
- }
-
- public interface AttributeOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required string name = 1;
- /**
- * <code>required string name = 1;</code>
- */
- boolean hasName();
- /**
- * <code>required string name = 1;</code>
- */
- java.lang.String getName();
- /**
- * <code>required string name = 1;</code>
- */
- com.google.protobuf.ByteString
- getNameBytes();
-
- // required string value = 2;
- /**
- * <code>required string value = 2;</code>
- */
- boolean hasValue();
- /**
- * <code>required string value = 2;</code>
- */
- java.lang.String getValue();
- /**
- * <code>required string value = 2;</code>
- */
- com.google.protobuf.ByteString
- getValueBytes();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute}
- */
- public static final class Attribute extends
- com.google.protobuf.GeneratedMessage
- implements AttributeOrBuilder {
- // Use Attribute.newBuilder() to construct.
- private Attribute(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Attribute(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Attribute defaultInstance;
- public static Attribute getDefaultInstance() {
- return defaultInstance;
- }
-
- public Attribute getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Attribute(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- name_ = input.readBytes();
- break;
- }
- case 18: {
- bitField0_ |= 0x00000002;
- value_ = input.readBytes();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.class, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Attribute> PARSER =
- new com.google.protobuf.AbstractParser<Attribute>() {
- public Attribute parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Attribute(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Attribute> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // required string name = 1;
- public static final int NAME_FIELD_NUMBER = 1;
- private java.lang.Object name_;
- /**
- * <code>required string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // required string value = 2;
- public static final int VALUE_FIELD_NUMBER = 2;
- private java.lang.Object value_;
- /**
- * <code>required string value = 2;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>required string value = 2;</code>
- */
- public java.lang.String getValue() {
- java.lang.Object ref = value_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- value_ = s;
- }
- return s;
- }
- }
- /**
- * <code>required string value = 2;</code>
- */
- public com.google.protobuf.ByteString
- getValueBytes() {
- java.lang.Object ref = value_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- value_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- private void initFields() {
- name_ = "";
- value_ = "";
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasName()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasValue()) {
- memoizedIsInitialized = 0;
- return false;
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, getValueBytes());
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, getValueBytes());
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.class, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- value_ = "";
- bitField0_ = (bitField0_ & ~0x00000002);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute result = new org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.value_ = value_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance()) return this;
- if (other.hasName()) {
- bitField0_ |= 0x00000001;
- name_ = other.name_;
- onChanged();
- }
- if (other.hasValue()) {
- bitField0_ |= 0x00000002;
- value_ = other.value_;
- onChanged();
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasName()) {
-
- return false;
- }
- if (!hasValue()) {
-
- return false;
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // required string name = 1;
- private java.lang.Object name_ = "";
- /**
- * <code>required string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- name_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder setName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder clearName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- name_ = getDefaultInstance().getName();
- onChanged();
- return this;
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder setNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
-
- // required string value = 2;
- private java.lang.Object value_ = "";
- /**
- * <code>required string value = 2;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>required string value = 2;</code>
- */
- public java.lang.String getValue() {
- java.lang.Object ref = value_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- value_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>required string value = 2;</code>
- */
- public com.google.protobuf.ByteString
- getValueBytes() {
- java.lang.Object ref = value_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- value_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>required string value = 2;</code>
- */
- public Builder setValue(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- value_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required string value = 2;</code>
- */
- public Builder clearValue() {
- bitField0_ = (bitField0_ & ~0x00000002);
- value_ = getDefaultInstance().getValue();
- onChanged();
- return this;
- }
- /**
- * <code>required string value = 2;</code>
- */
- public Builder setValueBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- value_ = value;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute)
- }
-
- static {
- defaultInstance = new Attribute(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute)
- }
-
- private int bitField0_;
- // optional string name = 1;
- public static final int NAME_FIELD_NUMBER = 1;
- private java.lang.Object name_;
- /**
- * <code>optional string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
- public static final int ATTRS_FIELD_NUMBER = 2;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> attrs_;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> getAttrsList() {
- return attrs_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>
- getAttrsOrBuilderList() {
- return attrs_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public int getAttrsCount() {
- return attrs_.size();
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
- return attrs_.get(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder getAttrsOrBuilder(
- int index) {
- return attrs_.get(index);
- }
-
- // optional int32 ttl = 3;
- public static final int TTL_FIELD_NUMBER = 3;
- private int ttl_;
- /**
- * <code>optional int32 ttl = 3;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public boolean hasTtl() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional int32 ttl = 3;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public int getTtl() {
- return ttl_;
- }
-
- // optional int32 maxVersions = 4;
- public static final int MAXVERSIONS_FIELD_NUMBER = 4;
- private int maxVersions_;
- /**
- * <code>optional int32 maxVersions = 4;</code>
- */
- public boolean hasMaxVersions() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional int32 maxVersions = 4;</code>
- */
- public int getMaxVersions() {
- return maxVersions_;
- }
-
- // optional string compression = 5;
- public static final int COMPRESSION_FIELD_NUMBER = 5;
- private java.lang.Object compression_;
- /**
- * <code>optional string compression = 5;</code>
- */
- public boolean hasCompression() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional string compression = 5;</code>
- */
- public java.lang.String getCompression() {
- java.lang.Object ref = compression_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- compression_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string compression = 5;</code>
- */
- public com.google.protobuf.ByteString
- getCompressionBytes() {
- java.lang.Object ref = compression_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- compression_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- private void initFields() {
- name_ = "";
- attrs_ = java.util.Collections.emptyList();
- ttl_ = 0;
- maxVersions_ = 0;
- compression_ = "";
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- for (int i = 0; i < getAttrsCount(); i++) {
- if (!getAttrs(i).isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getNameBytes());
- }
- for (int i = 0; i < attrs_.size(); i++) {
- output.writeMessage(2, attrs_.get(i));
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeInt32(3, ttl_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeInt32(4, maxVersions_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeBytes(5, getCompressionBytes());
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getNameBytes());
- }
- for (int i = 0; i < attrs_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, attrs_.get(i));
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(3, ttl_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(4, maxVersions_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(5, getCompressionBytes());
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.class, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getAttrsFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- if (attrsBuilder_ == null) {
- attrs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- attrsBuilder_.clear();
- }
- ttl_ = 0;
- bitField0_ = (bitField0_ & ~0x00000004);
- maxVersions_ = 0;
- bitField0_ = (bitField0_ & ~0x00000008);
- compression_ = "";
- bitField0_ = (bitField0_ & ~0x00000010);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema result = new org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (attrsBuilder_ == null) {
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- attrs_ = java.util.Collections.unmodifiableList(attrs_);
- bitField0_ = (bitField0_ & ~0x00000002);
- }
- result.attrs_ = attrs_;
- } else {
- result.attrs_ = attrsBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000002;
- }
- result.ttl_ = ttl_;
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000004;
- }
- result.maxVersions_ = maxVersions_;
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
- to_bitField0_ |= 0x00000008;
- }
- result.compression_ = compression_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance()) return this;
- if (other.hasName()) {
- bitField0_ |= 0x00000001;
- name_ = other.name_;
- onChanged();
- }
- if (attrsBuilder_ == null) {
- if (!other.attrs_.isEmpty()) {
- if (attrs_.isEmpty()) {
- attrs_ = other.attrs_;
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- ensureAttrsIsMutable();
- attrs_.addAll(other.attrs_);
- }
- onChanged();
- }
- } else {
- if (!other.attrs_.isEmpty()) {
- if (attrsBuilder_.isEmpty()) {
- attrsBuilder_.dispose();
- attrsBuilder_ = null;
- attrs_ = other.attrs_;
- bitField0_ = (bitField0_ & ~0x00000002);
- attrsBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getAttrsFieldBuilder() : null;
- } else {
- attrsBuilder_.addAllMessages(other.attrs_);
- }
- }
- }
- if (other.hasTtl()) {
- setTtl(other.getTtl());
- }
- if (other.hasMaxVersions()) {
- setMaxVersions(other.getMaxVersions());
- }
- if (other.hasCompression()) {
- bitField0_ |= 0x00000010;
- compression_ = other.compression_;
- onChanged();
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- for (int i = 0; i < getAttrsCount(); i++) {
- if (!getAttrs(i).isInitialized()) {
-
- return false;
- }
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // optional string name = 1;
- private java.lang.Object name_ = "";
- /**
- * <code>optional string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- name_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder setName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder clearName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- name_ = getDefaultInstance().getName();
- onChanged();
- return this;
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder setNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> attrs_ =
- java.util.Collections.emptyList();
- private void ensureAttrsIsMutable() {
- if (!((bitField0_ & 0x00000002) == 0x00000002)) {
- attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>(attrs_);
- bitField0_ |= 0x00000002;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder> attrsBuilder_;
-
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> getAttrsList() {
- if (attrsBuilder_ == null) {
- return java.util.Collections.unmodifiableList(attrs_);
- } else {
- return attrsBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public int getAttrsCount() {
- if (attrsBuilder_ == null) {
- return attrs_.size();
- } else {
- return attrsBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
- if (attrsBuilder_ == null) {
- return attrs_.get(index);
- } else {
- return attrsBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public Builder setAttrs(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
- if (attrsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureAttrsIsMutable();
- attrs_.set(index, value);
- onChanged();
- } else {
- attrsBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public Builder setAttrs(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- attrs_.set(index, builderForValue.build());
- onChanged();
- } else {
- attrsBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public Builder addAttrs(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
- if (attrsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureAttrsIsMutable();
- attrs_.add(value);
- onChanged();
- } else {
- attrsBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public Builder addAttrs(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
- if (attrsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureAttrsIsMutable();
- attrs_.add(index, value);
- onChanged();
- } else {
- attrsBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public Builder addAttrs(
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- attrs_.add(builderForValue.build());
- onChanged();
- } else {
- attrsBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public Builder addAttrs(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- attrs_.add(index, builderForValue.build());
- onChanged();
- } else {
- attrsBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public Builder addAllAttrs(
- java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> values) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- super.addAll(values, attrs_);
- onChanged();
- } else {
- attrsBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public Builder clearAttrs() {
- if (attrsBuilder_ == null) {
- attrs_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000002);
- onChanged();
- } else {
- attrsBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public Builder removeAttrs(int index) {
- if (attrsBuilder_ == null) {
- ensureAttrsIsMutable();
- attrs_.remove(index);
- onChanged();
- } else {
- attrsBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder getAttrsBuilder(
- int index) {
- return getAttrsFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder getAttrsOrBuilder(
- int index) {
- if (attrsBuilder_ == null) {
- return attrs_.get(index); } else {
- return attrsBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>
- getAttrsOrBuilderList() {
- if (attrsBuilder_ != null) {
- return attrsBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(attrs_);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder addAttrsBuilder() {
- return getAttrsFieldBuilder().addBuilder(
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder addAttrsBuilder(
- int index) {
- return getAttrsFieldBuilder().addBuilder(
- index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder>
- getAttrsBuilderList() {
- return getAttrsFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>
- getAttrsFieldBuilder() {
- if (attrsBuilder_ == null) {
- attrsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.AttributeOrBuilder>(
- attrs_,
- ((bitField0_ & 0x00000002) == 0x00000002),
- getParentForChildren(),
- isClean());
- attrs_ = null;
- }
- return attrsBuilder_;
- }
-
- // optional int32 ttl = 3;
- private int ttl_ ;
- /**
- * <code>optional int32 ttl = 3;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public boolean hasTtl() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional int32 ttl = 3;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public int getTtl() {
- return ttl_;
- }
- /**
- * <code>optional int32 ttl = 3;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public Builder setTtl(int value) {
- bitField0_ |= 0x00000004;
- ttl_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 ttl = 3;</code>
- *
- * <pre>
- * optional helpful encodings of commonly used attributes
- * </pre>
- */
- public Builder clearTtl() {
- bitField0_ = (bitField0_ & ~0x00000004);
- ttl_ = 0;
- onChanged();
- return this;
- }
-
- // optional int32 maxVersions = 4;
- private int maxVersions_ ;
- /**
- * <code>optional int32 maxVersions = 4;</code>
- */
- public boolean hasMaxVersions() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional int32 maxVersions = 4;</code>
- */
- public int getMaxVersions() {
- return maxVersions_;
- }
- /**
- * <code>optional int32 maxVersions = 4;</code>
- */
- public Builder setMaxVersions(int value) {
- bitField0_ |= 0x00000008;
- maxVersions_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 maxVersions = 4;</code>
- */
- public Builder clearMaxVersions() {
- bitField0_ = (bitField0_ & ~0x00000008);
- maxVersions_ = 0;
- onChanged();
- return this;
- }
-
- // optional string compression = 5;
- private java.lang.Object compression_ = "";
- /**
- * <code>optional string compression = 5;</code>
- */
- public boolean hasCompression() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional string compression = 5;</code>
- */
- public java.lang.String getCompression() {
- java.lang.Object ref = compression_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- compression_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string compression = 5;</code>
- */
- public com.google.protobuf.ByteString
- getCompressionBytes() {
- java.lang.Object ref = compression_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- compression_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string compression = 5;</code>
- */
- public Builder setCompression(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000010;
- compression_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string compression = 5;</code>
- */
- public Builder clearCompression() {
- bitField0_ = (bitField0_ & ~0x00000010);
- compression_ = getDefaultInstance().getCompression();
- onChanged();
- return this;
- }
- /**
- * <code>optional string compression = 5;</code>
- */
- public Builder setCompressionBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000010;
- compression_ = value;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema)
- }
-
- static {
- defaultInstance = new ColumnSchema(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema)
- }
-
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\031ColumnSchemaMessage.proto\022/org.apache." +
- "hadoop.hbase.rest.protobuf.generated\"\325\001\n" +
- "\014ColumnSchema\022\014\n\004name\030\001 \001(\t\022V\n\005attrs\030\002 \003" +
- "(\0132G.org.apache.hadoop.hbase.rest.protob" +
- "uf.generated.ColumnSchema.Attribute\022\013\n\003t" +
- "tl\030\003 \001(\005\022\023\n\013maxVersions\030\004 \001(\005\022\023\n\013compres" +
- "sion\030\005 \001(\t\032(\n\tAttribute\022\014\n\004name\030\001 \002(\t\022\r\n" +
- "\005value\030\002 \002(\t"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor,
- new java.lang.String[] { "Name", "Attrs", "Ttl", "MaxVersions", "Compression", });
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor =
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor.getNestedTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_Attribute_descriptor,
- new java.lang.String[] { "Name", "Value", });
- return null;
- }
- };
- com.google.protobuf.Descriptors.FileDescriptor
- .internalBuildGeneratedFileFrom(descriptorData,
- new com.google.protobuf.Descriptors.FileDescriptor[] {
- }, assigner);
- }
-
- // @@protoc_insertion_point(outer_class_scope)
-}
[20/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
new file mode 100644
index 0000000..0f852ca
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
@@ -0,0 +1,145 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.util.Iterator;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestStorageClusterStatusModel extends TestModelBase<StorageClusterStatusModel> {
+
+ public TestStorageClusterStatusModel() throws Exception {
+ super(StorageClusterStatusModel.class);
+
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
+ "<ClusterStatus averageLoad=\"1.0\" regions=\"2\" requests=\"0\">" +
+ "<DeadNodes/><LiveNodes>" +
+ "<Node heapSizeMB=\"128\" maxHeapSizeMB=\"1024\" name=\"test1\" requests=\"0\" startCode=\"1245219839331\">" +
+ "<Region currentCompactedKVs=\"1\" memstoreSizeMB=\"0\" name=\"aGJhc2U6cm9vdCwsMA==\" readRequestsCount=\"1\" " +
+ "rootIndexSizeKB=\"1\" storefileIndexSizeMB=\"0\" storefileSizeMB=\"0\" storefiles=\"1\" stores=\"1\" " +
+ "totalCompactingKVs=\"1\" totalStaticBloomSizeKB=\"1\" totalStaticIndexSizeKB=\"1\" writeRequestsCount=\"2\"/>" +
+ "</Node>" +
+ "<Node heapSizeMB=\"512\" maxHeapSizeMB=\"1024\" name=\"test2\" requests=\"0\" startCode=\"1245239331198\">" +
+ "<Region currentCompactedKVs=\"1\" memstoreSizeMB=\"0\" name=\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\" " +
+ "readRequestsCount=\"1\" rootIndexSizeKB=\"1\" storefileIndexSizeMB=\"0\" storefileSizeMB=\"0\" " +
+ "storefiles=\"1\" stores=\"1\" totalCompactingKVs=\"1\" totalStaticBloomSizeKB=\"1\" " +
+ "totalStaticIndexSizeKB=\"1\" writeRequestsCount=\"2\"/></Node></LiveNodes></ClusterStatus>";
+
+ AS_PB =
+ "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" +
+ "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" +
+ "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8=";
+
+
+ //Using jackson will break json backward compatibilty for this representation
+ //but the original one was broken as it would only print one Node element
+ //so the format itself was broken
+ AS_JSON =
+ "{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," +
+ "\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," +
+ "\"storefileSizeMB\":0,\"memstoreSizeMB\":0,\"storefileIndexSizeMB\":0," +
+ "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," +
+ "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," +
+ "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245219839331," +
+ "\"heapSizeMB\":128,\"maxHeapSizeMB\":1024},{\"name\":\"test2\"," +
+ "\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\",\"stores\":1," +
+ "\"storefiles\":1,\"storefileSizeMB\":0,\"memstoreSizeMB\":0,\"storefileIndexSizeMB\":0," +
+ "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," +
+ "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," +
+ "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245239331198," +
+ "\"heapSizeMB\":512,\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}";
+ }
+
+ protected StorageClusterStatusModel buildTestModel() {
+ StorageClusterStatusModel model = new StorageClusterStatusModel();
+ model.setRegions(2);
+ model.setRequests(0);
+ model.setAverageLoad(1.0);
+ model.addLiveNode("test1", 1245219839331L, 128, 1024)
+ .addRegion(Bytes.toBytes("hbase:root,,0"), 1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1);
+ model.addLiveNode("test2", 1245239331198L, 512, 1024)
+ .addRegion(Bytes.toBytes(TableName.META_TABLE_NAME+",,1246000043724"),1, 1, 0, 0, 0,
+ 1, 2, 1, 1, 1, 1, 1);
+ return model;
+ }
+
+ protected void checkModel(StorageClusterStatusModel model) {
+ assertEquals(model.getRegions(), 2);
+ assertEquals(model.getRequests(), 0);
+ assertEquals(model.getAverageLoad(), 1.0);
+ Iterator<StorageClusterStatusModel.Node> nodes =
+ model.getLiveNodes().iterator();
+ StorageClusterStatusModel.Node node = nodes.next();
+ assertEquals(node.getName(), "test1");
+ assertEquals(node.getStartCode(), 1245219839331L);
+ assertEquals(node.getHeapSizeMB(), 128);
+ assertEquals(node.getMaxHeapSizeMB(), 1024);
+ Iterator<StorageClusterStatusModel.Node.Region> regions =
+ node.getRegions().iterator();
+ StorageClusterStatusModel.Node.Region region = regions.next();
+ assertTrue(Bytes.toString(region.getName()).equals(
+ "hbase:root,,0"));
+ assertEquals(region.getStores(), 1);
+ assertEquals(region.getStorefiles(), 1);
+ assertEquals(region.getStorefileSizeMB(), 0);
+ assertEquals(region.getMemstoreSizeMB(), 0);
+ assertEquals(region.getStorefileIndexSizeMB(), 0);
+ assertEquals(region.getReadRequestsCount(), 1);
+ assertEquals(region.getWriteRequestsCount(), 2);
+ assertEquals(region.getRootIndexSizeKB(), 1);
+ assertEquals(region.getTotalStaticIndexSizeKB(), 1);
+ assertEquals(region.getTotalStaticBloomSizeKB(), 1);
+ assertEquals(region.getTotalCompactingKVs(), 1);
+ assertEquals(region.getCurrentCompactedKVs(), 1);
+ assertFalse(regions.hasNext());
+ node = nodes.next();
+ assertEquals(node.getName(), "test2");
+ assertEquals(node.getStartCode(), 1245239331198L);
+ assertEquals(node.getHeapSizeMB(), 512);
+ assertEquals(node.getMaxHeapSizeMB(), 1024);
+ regions = node.getRegions().iterator();
+ region = regions.next();
+ assertEquals(Bytes.toString(region.getName()),
+ TableName.META_TABLE_NAME+",,1246000043724");
+ assertEquals(region.getStores(), 1);
+ assertEquals(region.getStorefiles(), 1);
+ assertEquals(region.getStorefileSizeMB(), 0);
+ assertEquals(region.getMemstoreSizeMB(), 0);
+ assertEquals(region.getStorefileIndexSizeMB(), 0);
+ assertEquals(region.getReadRequestsCount(), 1);
+ assertEquals(region.getWriteRequestsCount(), 2);
+ assertEquals(region.getRootIndexSizeKB(), 1);
+ assertEquals(region.getTotalStaticIndexSizeKB(), 1);
+ assertEquals(region.getTotalStaticBloomSizeKB(), 1);
+ assertEquals(region.getTotalCompactingKVs(), 1);
+ assertEquals(region.getCurrentCompactedKVs(), 1);
+
+ assertFalse(regions.hasNext());
+ assertFalse(nodes.hasNext());
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
new file mode 100644
index 0000000..bd4fa1f
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
@@ -0,0 +1,60 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.SmallTests;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestStorageClusterVersionModel extends TestModelBase<StorageClusterVersionModel> {
+ private static final String VERSION = "0.0.1-testing";
+
+ public TestStorageClusterVersionModel() throws Exception {
+ super(StorageClusterVersionModel.class);
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"+
+ "<ClusterVersion>" + VERSION + "</ClusterVersion>";
+
+ AS_JSON = "\"0.0.1-testing\"";
+ }
+
+ protected StorageClusterVersionModel buildTestModel() {
+ StorageClusterVersionModel model = new StorageClusterVersionModel();
+ model.setVersion(VERSION);
+ return model;
+ }
+
+ protected void checkModel(StorageClusterVersionModel model) {
+ assertEquals(model.getVersion(), VERSION);
+ }
+
+ @Override
+ public void testFromPB() throws Exception {
+ //ignore test no pb
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
new file mode 100644
index 0000000..dadb9ad
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
@@ -0,0 +1,96 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestTableInfoModel extends TestModelBase<TableInfoModel> {
+ private static final String TABLE = "testtable";
+ private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
+ private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
+ private static final long ID = 8731042424L;
+ private static final String LOCATION = "testhost:9876";
+
+ public TestTableInfoModel() throws Exception {
+ super(TableInfoModel.class);
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><TableInfo " +
+ "name=\"testtable\"><Region endKey=\"enp5eng=\" id=\"8731042424\" " +
+ "location=\"testhost:9876\" " +
+ "name=\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\" " +
+ "startKey=\"YWJyYWNhZGJyYQ==\"/></TableInfo>";
+
+ AS_PB =
+ "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" +
+ "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY=";
+
+ AS_JSON =
+ "{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424," +
+ "\"location\":\"testhost:9876\",\"" +
+ "name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" +
+ "startKey\":\"YWJyYWNhZGJyYQ==\"}]}";
+ }
+
+ protected TableInfoModel buildTestModel() {
+ TableInfoModel model = new TableInfoModel();
+ model.setName(TABLE);
+ model.add(new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION));
+ return model;
+ }
+
+ protected void checkModel(TableInfoModel model) {
+ assertEquals(model.getName(), TABLE);
+ Iterator<TableRegionModel> regions = model.getRegions().iterator();
+ TableRegionModel region = regions.next();
+ assertTrue(Bytes.equals(region.getStartKey(), START_KEY));
+ assertTrue(Bytes.equals(region.getEndKey(), END_KEY));
+ assertEquals(region.getId(), ID);
+ assertEquals(region.getLocation(), LOCATION);
+ assertFalse(regions.hasNext());
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
new file mode 100644
index 0000000..4cb9194
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
@@ -0,0 +1,73 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestTableListModel extends TestModelBase<TableListModel> {
+ private static final String TABLE1 = "table1";
+ private static final String TABLE2 = "table2";
+ private static final String TABLE3 = "table3";
+
+ public TestTableListModel() throws Exception {
+ super(TableListModel.class);
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><TableList><table " +
+ "name=\"table1\"/><table name=\"table2\"/><table name=\"table3\"/></TableList>";
+
+ AS_PB = "CgZ0YWJsZTEKBnRhYmxlMgoGdGFibGUz";
+
+ AS_JSON =
+ "{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}";
+ }
+
+ protected TableListModel buildTestModel() {
+ TableListModel model = new TableListModel();
+ model.add(new TableModel(TABLE1));
+ model.add(new TableModel(TABLE2));
+ model.add(new TableModel(TABLE3));
+ return model;
+ }
+
+ protected void checkModel(TableListModel model) {
+ Iterator<TableModel> tables = model.getTables().iterator();
+ TableModel table = tables.next();
+ assertEquals(table.getName(), TABLE1);
+ table = tables.next();
+ assertEquals(table.getName(), TABLE2);
+ table = tables.next();
+ assertEquals(table.getName(), TABLE3);
+ assertFalse(tables.hasNext());
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
new file mode 100644
index 0000000..5c4b1a9
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
@@ -0,0 +1,93 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestTableRegionModel extends TestModelBase<TableRegionModel> {
+ private static final String TABLE = "testtable";
+ private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
+ private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
+ private static final long ID = 8731042424L;
+ private static final String LOCATION = "testhost:9876";
+
+ public TestTableRegionModel() throws Exception {
+ super(TableRegionModel.class);
+
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Region endKey=\"enp5eng=\" " +
+ "id=\"8731042424\" location=\"testhost:9876\" " +
+ "name=\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\" " +
+ "startKey=\"YWJyYWNhZGJyYQ==\"/>";
+
+ AS_JSON =
+ "{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," +
+ "\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" +
+ "startKey\":\"YWJyYWNhZGJyYQ==\"}";
+ }
+
+ protected TableRegionModel buildTestModel() {
+ TableRegionModel model =
+ new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION);
+ return model;
+ }
+
+ protected void checkModel(TableRegionModel model) {
+ assertTrue(Bytes.equals(model.getStartKey(), START_KEY));
+ assertTrue(Bytes.equals(model.getEndKey(), END_KEY));
+ assertEquals(model.getId(), ID);
+ assertEquals(model.getLocation(), LOCATION);
+ assertEquals(model.getName(),
+ TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) +
+ ".ad9860f031282c46ed431d7af8f94aca.");
+ }
+
+ public void testGetName() {
+ TableRegionModel model = buildTestModel();
+ String modelName = model.getName();
+ HRegionInfo hri = new HRegionInfo(TableName.valueOf(TABLE),
+ START_KEY, END_KEY, false, ID);
+ assertEquals(modelName, hri.getRegionNameAsString());
+ }
+
+ public void testSetName() {
+ TableRegionModel model = buildTestModel();
+ String name = model.getName();
+ model.setName(name);
+ assertEquals(name, model.getName());
+ }
+
+ @Override
+ public void testFromPB() throws Exception {
+ //no pb ignore
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
new file mode 100644
index 0000000..b725f7b
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
@@ -0,0 +1,117 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestTableSchemaModel extends TestModelBase<TableSchemaModel> {
+
+ public static final String TABLE_NAME = "testTable";
+ private static final boolean IS_META = false;
+ private static final boolean IS_ROOT = false;
+ private static final boolean READONLY = false;
+
+ TestColumnSchemaModel testColumnSchemaModel;
+
+ private JAXBContext context;
+
+ public TestTableSchemaModel() throws Exception {
+ super(TableSchemaModel.class);
+ testColumnSchemaModel = new TestColumnSchemaModel();
+
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
+ "<TableSchema name=\"testTable\" IS_META=\"false\" IS_ROOT=\"false\" READONLY=\"false\">" +
+ "<ColumnSchema name=\"testcolumn\" BLOCKSIZE=\"16384\" BLOOMFILTER=\"NONE\" " +
+ "BLOCKCACHE=\"true\" COMPRESSION=\"GZ\" VERSIONS=\"1\" TTL=\"86400\" IN_MEMORY=\"false\"/>" +
+ "</TableSchema>";
+
+ AS_PB =
+ "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" +
+ "TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" +
+ "RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" +
+ "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA";
+
+ AS_JSON =
+ "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," +
+ "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," +
+ "\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," +
+ "\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}";
+ }
+
+ protected TableSchemaModel buildTestModel() {
+ return buildTestModel(TABLE_NAME);
+ }
+
+ public TableSchemaModel buildTestModel(String name) {
+ TableSchemaModel model = new TableSchemaModel();
+ model.setName(name);
+ model.__setIsMeta(IS_META);
+ model.__setIsRoot(IS_ROOT);
+ model.__setReadOnly(READONLY);
+ model.addColumnFamily(testColumnSchemaModel.buildTestModel());
+ return model;
+ }
+
+ protected void checkModel(TableSchemaModel model) {
+ checkModel(model, TABLE_NAME);
+ }
+
+ public void checkModel(TableSchemaModel model, String tableName) {
+ assertEquals(model.getName(), tableName);
+ assertEquals(model.__getIsMeta(), IS_META);
+ assertEquals(model.__getIsRoot(), IS_ROOT);
+ assertEquals(model.__getReadOnly(), READONLY);
+ Iterator<ColumnSchemaModel> families = model.getColumns().iterator();
+ assertTrue(families.hasNext());
+ ColumnSchemaModel family = families.next();
+ testColumnSchemaModel.checkModel(family);
+ assertFalse(families.hasNext());
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
new file mode 100644
index 0000000..553bb35
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
@@ -0,0 +1,80 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestVersionModel extends TestModelBase<VersionModel> {
+ private static final String REST_VERSION = "0.0.1";
+ private static final String OS_VERSION =
+ "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64";
+ private static final String JVM_VERSION =
+ "Sun Microsystems Inc. 1.6.0_13-11.3-b02";
+ private static final String JETTY_VERSION = "6.1.14";
+ private static final String JERSEY_VERSION = "1.1.0-ea";
+
+ public TestVersionModel() throws Exception {
+ super(VersionModel.class);
+ AS_XML =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Version JVM=\"Sun " +
+ "Microsystems Inc. 1.6.0_13-11.3-b02\" Jersey=\"1.1.0-ea\" " +
+ "OS=\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\" REST=\"0.0.1\" Server=\"6.1.14\"/>";
+
+ AS_PB =
+ "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" +
+ "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE=";
+
+ AS_JSON =
+ "{\"JVM\":\"Sun Microsystems Inc. 1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," +
+ "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" +
+ "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}";
+ }
+
+ protected VersionModel buildTestModel() {
+ VersionModel model = new VersionModel();
+ model.setRESTVersion(REST_VERSION);
+ model.setOSVersion(OS_VERSION);
+ model.setJVMVersion(JVM_VERSION);
+ model.setServerVersion(JETTY_VERSION);
+ model.setJerseyVersion(JERSEY_VERSION);
+ return model;
+ }
+
+ protected void checkModel(VersionModel model) {
+ assertEquals(model.getRESTVersion(), REST_VERSION);
+ assertEquals(model.getOSVersion(), OS_VERSION);
+ assertEquals(model.getJVMVersion(), JVM_VERSION);
+ assertEquals(model.getServerVersion(), JETTY_VERSION);
+ assertEquals(model.getJerseyVersion(), JERSEY_VERSION);
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/resources/hbase-site.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/hbase-site.xml b/hbase-rest/src/test/resources/hbase-site.xml
new file mode 100644
index 0000000..8c8312c
--- /dev/null
+++ b/hbase-rest/src/test/resources/hbase-site.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.regionserver.msginterval</name>
+ <value>1000</value>
+ <description>Interval between messages from the RegionServer to HMaster
+ in milliseconds. Default is 15. Set this value low if you want unit
+ tests to be responsive.
+ </description>
+ </property>
+ <property>
+ <name>hbase.defaults.for.version.skip</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>hbase.server.thread.wakefrequency</name>
+ <value>1000</value>
+ <description>Time to sleep in between searches for work (in milliseconds).
+ Used as sleep interval by service threads such as hbase:meta scanner and log roller.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.event.waiting.time</name>
+ <value>50</value>
+ <description>Time to sleep between checks to see if a table event took place.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>5</value>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase master web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.port</name>
+ <value>0</value>
+ <description>Always have masters and regionservers come up on port '0' so we don't clash over
+ default ports.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.port</name>
+ <value>0</value>
+ <description>Always have masters and regionservers come up on port '0' so we don't clash over
+ default ports.
+ </description>
+ </property>
+ <property>
+ <name>hbase.ipc.client.fallback-to-simple-auth-allowed</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase regionserver web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port.auto</name>
+ <value>true</value>
+ <description>Info server auto port bind. Enables automatic port
+ search if hbase.regionserver.info.port is already in use.
+ Enabled for testing to run multiple tests on one machine.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.thread.wakefrequency</name>
+ <value>3000</value>
+ <description>The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.safemode</name>
+ <value>false</value>
+ <description>
+ Turn on/off safe mode in region server. Always on for production, always off
+ for tests.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.max.filesize</name>
+ <value>67108864</value>
+ <description>
+ Maximum desired file size for an HRegion. If filesize exceeds
+ value + (value / 2), the HRegion is split in two. Default: 256M.
+
+ Keep the maximum filesize small so we split more often in tests.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.log.dir</name>
+ <value>${user.dir}/../logs</value>
+ </property>
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>21818</value>
+ <description>Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+ </description>
+ </property>
+ <property>
+ <name>hbase.defaults.for.version.skip</name>
+ <value>true</value>
+ <description>
+ Set to true to skip the 'hbase.defaults.for.version'.
+ Setting this to true can be useful in contexts other than
+ the other side of a maven generation; i.e. running in an
+ ide. You'll want to set this boolean to true to avoid
+ seeing the RuntimException complaint: "hbase-default.xml file
+ seems to be for and old version of HBase (@@@VERSION@@@), this
+ version is X.X.X-SNAPSHOT"
+ </description>
+ </property>
+ <property>
+ <name>hbase.table.sanity.checks</name>
+ <value>false</value>
+ <description>Skip sanity checks in tests
+ </description>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/resources/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/hdfs-site.xml b/hbase-rest/src/test/resources/hdfs-site.xml
new file mode 100644
index 0000000..03be0c7
--- /dev/null
+++ b/hbase-rest/src/test/resources/hdfs-site.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+ <!-- hadoop-2.0.5+'s HDFS-4305 by default enforces a min blocks size
+ of 1024*1024. Many unit tests that use the hlog use smaller
+ blocks. Setting this config to 0 to have tests pass -->
+ <property>
+ <name>dfs.namenode.fs-limits.min-block-size</name>
+ <value>0</value>
+ </property>
+</configuration>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/log4j.properties b/hbase-rest/src/test/resources/log4j.properties
new file mode 100644
index 0000000..6ee91ef
--- /dev/null
+++ b/hbase-rest/src/test/resources/log4j.properties
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+
+log4j.logger.org.apache.hadoop=WARN
+log4j.logger.org.apache.zookeeper=ERROR
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+
+#These two settings are workarounds against spurious logs from the minicluster.
+#See HBASE-4709
+log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR
+log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/resources/mapred-queues.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/mapred-queues.xml b/hbase-rest/src/test/resources/mapred-queues.xml
new file mode 100644
index 0000000..43f3e2a
--- /dev/null
+++ b/hbase-rest/src/test/resources/mapred-queues.xml
@@ -0,0 +1,75 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<!-- This is the template for queue configuration. The format supports nesting of
+ queues within queues - a feature called hierarchical queues. All queues are
+ defined within the 'queues' tag which is the top level element for this
+ XML document.
+ The 'aclsEnabled' attribute should be set to true, if ACLs should be checked
+ on queue operations such as submitting jobs, killing jobs etc. -->
+<queues aclsEnabled="false">
+
+ <!-- Configuration for a queue is specified by defining a 'queue' element. -->
+ <queue>
+
+ <!-- Name of a queue. Queue name cannot contain a ':' -->
+ <name>default</name>
+
+ <!-- properties for a queue, typically used by schedulers,
+ can be defined here -->
+ <properties>
+ </properties>
+
+ <!-- State of the queue. If running, the queue will accept new jobs.
+ If stopped, the queue will not accept new jobs. -->
+ <state>running</state>
+
+ <!-- Specifies the ACLs to check for submitting jobs to this queue.
+ If set to '*', it allows all users to submit jobs to the queue.
+ For specifying a list of users and groups the format to use is
+ user1,user2 group1,group2 -->
+ <acl-submit-job>*</acl-submit-job>
+
+ <!-- Specifies the ACLs to check for modifying jobs in this queue.
+ Modifications include killing jobs, tasks of jobs or changing
+ priorities.
+ If set to '*', it allows all users to submit jobs to the queue.
+ For specifying a list of users and groups the format to use is
+ user1,user2 group1,group2 -->
+ <acl-administer-jobs>*</acl-administer-jobs>
+ </queue>
+
+ <!-- Here is a sample of a hierarchical queue configuration
+ where q2 is a child of q1. In this example, q2 is a leaf level
+ queue as it has no queues configured within it. Currently, ACLs
+ and state are only supported for the leaf level queues.
+ Note also the usage of properties for the queue q2.
+ <queue>
+ <name>q1</name>
+ <queue>
+ <name>q2</name>
+ <properties>
+ <property key="capacity" value="20"/>
+ <property key="user-limit" value="30"/>
+ </properties>
+ </queue>
+ </queue>
+ -->
+</queues>
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/resources/mapred-site.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/mapred-site.xml b/hbase-rest/src/test/resources/mapred-site.xml
new file mode 100644
index 0000000..787ffb7
--- /dev/null
+++ b/hbase-rest/src/test/resources/mapred-site.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+<property>
+ <name>mapred.map.child.java.opts</name>
+ <value>-Djava.awt.headless=true</value>
+</property>
+
+<property>
+ <name>mapred.reduce.child.java.opts</name>
+ <value>-Djava.awt.headless=true</value>
+</property>
+</configuration>
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/resources/zoo.cfg
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/zoo.cfg b/hbase-rest/src/test/resources/zoo.cfg
new file mode 100644
index 0000000..a7b8ec8
--- /dev/null
+++ b/hbase-rest/src/test/resources/zoo.cfg
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just
+# example sakes.
+dataDir=/tmp/hbase-test-zookeeper-deleteme
+# the port at which the clients will connect
+clientPort=9999
+#
+# Be sure to read the maintenance section of the
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+autopurge.purgeInterval=1
+
+server.1=i-am-a-test-server:7999:8999
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 0d0b282..80eafd5 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -124,8 +124,6 @@
<jspcompiler uriroot="${src.webapps}/master" outputdir="${generated.sources}/java" package="org.apache.hadoop.hbase.generated.master" webxml="${build.webapps}/master/WEB-INF/web.xml"/>
<mkdir dir="${build.webapps}/regionserver/WEB-INF"/>
<jspcompiler uriroot="${src.webapps}/regionserver" outputdir="${generated.sources}/java" package="org.apache.hadoop.hbase.generated.regionserver" webxml="${build.webapps}/regionserver/WEB-INF/web.xml"/>
- <mkdir dir="${build.webapps}/rest/WEB-INF"/>
- <jspcompiler uriroot="${src.webapps}/rest" outputdir="${generated.sources}/java" package="org.apache.hadoop.hbase.generated.rest" webxml="${build.webapps}/rest/WEB-INF/web.xml"/>
</target>
</configuration>
<goals>
@@ -424,27 +422,6 @@
<groupId>org.jamon</groupId>
<artifactId>jamon-runtime</artifactId>
</dependency>
- <!-- REST dependencies -->
- <dependency>
- <groupId>com.google.protobuf</groupId>
- <artifactId>protobuf-java</artifactId>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-core</artifactId>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-json</artifactId>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-server</artifactId>
- </dependency>
- <dependency>
- <groupId>javax.xml.bind</groupId>
- <artifactId>jaxb-api</artifactId>
- </dependency>
<!-- tracing Dependencies -->
<dependency>
<groupId>org.cloudera.htrace</groupId>
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
deleted file mode 100644
index f3dba9a..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * Common constants for org.apache.hadoop.hbase.rest
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public interface Constants {
- // All constants in a public interface are 'public static final'
-
- String VERSION_STRING = "0.0.3";
-
- int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours
-
- int DEFAULT_LISTEN_PORT = 8080;
-
- String MIMETYPE_TEXT = "text/plain";
- String MIMETYPE_HTML = "text/html";
- String MIMETYPE_XML = "text/xml";
- String MIMETYPE_BINARY = "application/octet-stream";
- String MIMETYPE_PROTOBUF = "application/x-protobuf";
- String MIMETYPE_PROTOBUF_IETF = "application/protobuf";
- String MIMETYPE_JSON = "application/json";
-
- String CRLF = "\r\n";
-
- String REST_KEYTAB_FILE = "hbase.rest.keytab.file";
- String REST_KERBEROS_PRINCIPAL = "hbase.rest.kerberos.principal";
- String REST_AUTHENTICATION_TYPE = "hbase.rest.authentication.type";
- String REST_AUTHENTICATION_PRINCIPAL = "hbase.rest.authentication.kerberos.principal";
-
- String REST_SSL_ENABLED = "hbase.rest.ssl.enabled";
- String REST_SSL_KEYSTORE_STORE = "hbase.rest.ssl.keystore.store";
- String REST_SSL_KEYSTORE_PASSWORD = "hbase.rest.ssl.keystore.password";
- String REST_SSL_KEYSTORE_KEYPASSWORD = "hbase.rest.ssl.keystore.keypassword";
-
- String REST_DNS_NAMESERVER = "hbase.rest.dns.nameserver";
- String REST_DNS_INTERFACE = "hbase.rest.dns.interface";
-
- String FILTER_CLASSES = "hbase.rest.filter.classes";
- String SCAN_START_ROW = "startrow";
- String SCAN_END_ROW = "endrow";
- String SCAN_COLUMN = "column";
- String SCAN_START_TIME = "starttime";
- String SCAN_END_TIME = "endtime";
- String SCAN_MAX_VERSIONS = "maxversions";
- String SCAN_BATCH_SIZE = "batchsize";
- String SCAN_LIMIT = "limit";
- String SCAN_FETCH_SIZE = "hbase.rest.scan.fetchsize";
-
- String ROW_KEYS_PARAM_NAME = "row";
- /** If this query parameter is present when processing row or scanner resources,
- it disables server side block caching */
- String NOCACHE_PARAM_NAME = "nocache";
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
deleted file mode 100644
index 90b3302..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-@InterfaceAudience.Private
-public class ExistsResource extends ResourceBase {
-
- static CacheControl cacheControl;
- static {
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- TableResource tableResource;
-
- /**
- * Constructor
- * @param tableResource
- * @throws IOException
- */
- public ExistsResource(TableResource tableResource) throws IOException {
- super();
- this.tableResource = tableResource;
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
- MIMETYPE_PROTOBUF_IETF, MIMETYPE_BINARY})
- public Response get(final @Context UriInfo uriInfo) {
- try {
- if (!tableResource.exists()) {
- return Response.status(Response.Status.NOT_FOUND)
- .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
- .build();
- }
- } catch (IOException e) {
- return Response.status(Response.Status.SERVICE_UNAVAILABLE)
- .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
- .build();
- }
- ResponseBuilder response = Response.ok();
- response.cacheControl(cacheControl);
- return response.build();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
deleted file mode 100644
index e31037a..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-
-import org.apache.hadoop.hbase.rest.MetricsRESTSource;
-
-@InterfaceAudience.Private
-public class MetricsREST {
-
- public MetricsRESTSource getSource() {
- return source;
- }
-
- private MetricsRESTSource source;
-
- public MetricsREST() {
- source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class);
- }
-
- /**
- * @param inc How much to add to requests.
- */
- public void incrementRequests(final int inc) {
- source.incrementRequests(inc);
- }
-
- /**
- * @param inc How much to add to sucessfulGetCount.
- */
- public void incrementSucessfulGetRequests(final int inc) {
- source.incrementSucessfulGetRequests(inc);
- }
-
- /**
- * @param inc How much to add to sucessfulPutCount.
- */
- public void incrementSucessfulPutRequests(final int inc) {
- source.incrementSucessfulPutRequests(inc);
- }
-
- /**
- * @param inc How much to add to failedPutCount.
- */
- public void incrementFailedPutRequests(final int inc) {
- source.incrementFailedPutRequests(inc);
- }
-
- /**
- * @param inc How much to add to failedGetCount.
- */
- public void incrementFailedGetRequests(final int inc) {
- source.incrementFailedGetRequests(inc);
- }
-
- /**
- * @param inc How much to add to sucessfulDeleteCount.
- */
- public void incrementSucessfulDeleteRequests(final int inc) {
- source.incrementSucessfulDeleteRequests(inc);
- }
-
- /**
- * @param inc How much to add to failedDeleteCount.
- */
- public void incrementFailedDeleteRequests(final int inc) {
- source.incrementFailedDeleteRequests(inc);
- }
-
- /**
- * @param inc How much to add to sucessfulScanCount.
- */
- public synchronized void incrementSucessfulScanRequests(final int inc) {
- source.incrementSucessfulScanRequests(inc);
- }
-
- /**
- * @param inc How much to add to failedScanCount.
- */
- public void incrementFailedScanRequests(final int inc) {
- source.incrementFailedScanRequests(inc);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
deleted file mode 100644
index c88ac91..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-
-@InterfaceAudience.Private
-public class MultiRowResource extends ResourceBase implements Constants {
- private static final Log LOG = LogFactory.getLog(MultiRowResource.class);
-
- TableResource tableResource;
- Integer versions = null;
-
- /**
- * Constructor
- *
- * @param tableResource
- * @param versions
- * @throws java.io.IOException
- */
- public MultiRowResource(TableResource tableResource, String versions) throws IOException {
- super();
- this.tableResource = tableResource;
-
- if (versions != null) {
- this.versions = Integer.valueOf(versions);
-
- }
- }
-
- @GET
- @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
- public Response get(final @Context UriInfo uriInfo) {
- MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
-
- servlet.getMetrics().incrementRequests(1);
- try {
- CellSetModel model = new CellSetModel();
- for (String rk : params.get(ROW_KEYS_PARAM_NAME)) {
- RowSpec rowSpec = new RowSpec(rk);
-
- if (this.versions != null) {
- rowSpec.setMaxVersions(this.versions);
- }
- ResultGenerator generator =
- ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, null,
- !params.containsKey(NOCACHE_PARAM_NAME));
- Cell value = null;
- RowModel rowModel = new RowModel(rk);
- if (generator.hasNext()) {
- while ((value = generator.next()) != null) {
- rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil
- .cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value)));
- }
- model.addRow(rowModel);
- } else {
- LOG.trace("The row : " + rk + " not found in the table.");
- }
- }
-
- if (model.getRows().size() == 0) {
- //If no rows found.
- servlet.getMetrics().incrementFailedGetRequests(1);
- return Response.status(Response.Status.NOT_FOUND)
- .type(MIMETYPE_TEXT).entity("No rows found." + CRLF)
- .build();
- } else {
- servlet.getMetrics().incrementSucessfulGetRequests(1);
- return Response.ok(model).build();
- }
- } catch (Exception e) {
- servlet.getMetrics().incrementFailedGetRequests(1);
- return processException(e);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
deleted file mode 100644
index bbaf1f7..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Common interface for models capable of supporting protobuf marshalling
- * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and
- * ProtobufMessageBodyProducer adapters.
- */
-@InterfaceAudience.Private
-public interface ProtobufMessageHandler {
- /**
- * @return the protobuf represention of the model
- */
- byte[] createProtobufOutput();
-
- /**
- * Initialize the model from a protobuf representation.
- * @param message the raw bytes of the protobuf message
- * @return reference to self for convenience
- * @throws IOException
- */
- ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException;
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
deleted file mode 100644
index 93bb940..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.List;
-
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.StreamingOutput;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-public class ProtobufStreamingUtil implements StreamingOutput {
-
- private static final Log LOG = LogFactory.getLog(ProtobufStreamingUtil.class);
- private String contentType;
- private ResultScanner resultScanner;
- private int limit;
- private int fetchSize;
-
- protected ProtobufStreamingUtil(ResultScanner scanner, String type, int limit, int fetchSize) {
- this.resultScanner = scanner;
- this.contentType = type;
- this.limit = limit;
- this.fetchSize = fetchSize;
- LOG.debug("Created ScanStreamingUtil with content type = " + this.contentType + " user limit : "
- + this.limit + " scan fetch size : " + this.fetchSize);
- }
-
- @Override
- public void write(OutputStream outStream) throws IOException, WebApplicationException {
- Result[] rowsToSend;
- if(limit < fetchSize){
- rowsToSend = this.resultScanner.next(limit);
- writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream);
- } else {
- int count = limit;
- while (count > 0) {
- if (count < fetchSize) {
- rowsToSend = this.resultScanner.next(count);
- } else {
- rowsToSend = this.resultScanner.next(this.fetchSize);
- }
- if(rowsToSend.length == 0){
- break;
- }
- count = count - rowsToSend.length;
- writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream);
- }
- }
- }
-
- private void writeToStream(CellSetModel model, String contentType, OutputStream outStream)
- throws IOException {
- byte[] objectBytes = model.createProtobufOutput();
- outStream.write(Bytes.toBytes((short)objectBytes.length));
- outStream.write(objectBytes);
- outStream.flush();
- LOG.trace("Wrote " + model.getRows().size() + " rows to stream successfully.");
- }
-
- private CellSetModel createModelFromResults(Result[] results) {
- CellSetModel cellSetModel = new CellSetModel();
- for (Result rs : results) {
- byte[] rowKey = rs.getRow();
- RowModel rModel = new RowModel(rowKey);
- List<Cell> kvs = rs.listCells();
- for (Cell kv : kvs) {
- rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv
- .getTimestamp(), CellUtil.cloneValue(kv)));
- }
- cellSetModel.addRow(rModel);
- }
- return cellSetModel;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
deleted file mode 100644
index 878b30a..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.rest.filter.AuthFilter;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.util.HttpServerUtil;
-import org.apache.hadoop.hbase.util.InfoServer;
-import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.hbase.util.VersionInfo;
-import org.apache.hadoop.net.DNS;
-import org.mortbay.jetty.Connector;
-import org.mortbay.jetty.Server;
-import org.mortbay.jetty.nio.SelectChannelConnector;
-import org.mortbay.jetty.security.SslSelectChannelConnector;
-import org.mortbay.jetty.servlet.Context;
-import org.mortbay.jetty.servlet.FilterHolder;
-import org.mortbay.jetty.servlet.ServletHolder;
-import org.mortbay.thread.QueuedThreadPool;
-
-import com.google.common.base.Preconditions;
-import com.sun.jersey.api.json.JSONConfiguration;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-
-/**
- * Main class for launching REST gateway as a servlet hosted by Jetty.
- * <p>
- * The following options are supported:
- * <ul>
- * <li>-p --port : service port</li>
- * <li>-ro --readonly : server mode</li>
- * </ul>
- */
-@InterfaceAudience.Private
-public class RESTServer implements Constants {
-
- private static void printUsageAndExit(Options options, int exitCode) {
- HelpFormatter formatter = new HelpFormatter();
- formatter.printHelp("bin/hbase rest start", "", options,
- "\nTo run the REST server as a daemon, execute " +
- "bin/hbase-daemon.sh start|stop rest [--infoport <port>] [-p <port>] [-ro]\n", true);
- System.exit(exitCode);
- }
-
- /**
- * The main method for the HBase rest server.
- * @param args command-line arguments
- * @throws Exception exception
- */
- public static void main(String[] args) throws Exception {
- Log LOG = LogFactory.getLog("RESTServer");
-
- VersionInfo.logVersion();
- FilterHolder authFilter = null;
- Configuration conf = HBaseConfiguration.create();
- Class<? extends ServletContainer> containerClass = ServletContainer.class;
- UserProvider userProvider = UserProvider.instantiate(conf);
- // login the server principal (if using secure Hadoop)
- if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) {
- String machineName = Strings.domainNamePointerToHostName(
- DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"),
- conf.get(REST_DNS_NAMESERVER, "default")));
- String keytabFilename = conf.get(REST_KEYTAB_FILE);
- Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(),
- REST_KEYTAB_FILE + " should be set if security is enabled");
- String principalConfig = conf.get(REST_KERBEROS_PRINCIPAL);
- Preconditions.checkArgument(principalConfig != null && !principalConfig.isEmpty(),
- REST_KERBEROS_PRINCIPAL + " should be set if security is enabled");
- userProvider.login(REST_KEYTAB_FILE, REST_KERBEROS_PRINCIPAL, machineName);
- if (conf.get(REST_AUTHENTICATION_TYPE) != null) {
- containerClass = RESTServletContainer.class;
- authFilter = new FilterHolder();
- authFilter.setClassName(AuthFilter.class.getName());
- authFilter.setName("AuthenticationFilter");
- }
- }
-
- RESTServlet servlet = RESTServlet.getInstance(conf, userProvider);
-
- Options options = new Options();
- options.addOption("p", "port", true, "Port to bind to [default: 8080]");
- options.addOption("ro", "readonly", false, "Respond only to GET HTTP " +
- "method requests [default: false]");
- options.addOption(null, "infoport", true, "Port for web UI");
-
- CommandLine commandLine = null;
- try {
- commandLine = new PosixParser().parse(options, args);
- } catch (ParseException e) {
- LOG.error("Could not parse: ", e);
- printUsageAndExit(options, -1);
- }
-
- // check for user-defined port setting, if so override the conf
- if (commandLine != null && commandLine.hasOption("port")) {
- String val = commandLine.getOptionValue("port");
- servlet.getConfiguration()
- .setInt("hbase.rest.port", Integer.valueOf(val));
- LOG.debug("port set to " + val);
- }
-
- // check if server should only process GET requests, if so override the conf
- if (commandLine != null && commandLine.hasOption("readonly")) {
- servlet.getConfiguration().setBoolean("hbase.rest.readonly", true);
- LOG.debug("readonly set to true");
- }
-
- // check for user-defined info server port setting, if so override the conf
- if (commandLine != null && commandLine.hasOption("infoport")) {
- String val = commandLine.getOptionValue("infoport");
- servlet.getConfiguration()
- .setInt("hbase.rest.info.port", Integer.valueOf(val));
- LOG.debug("Web UI port set to " + val);
- }
-
- @SuppressWarnings("unchecked")
- List<String> remainingArgs = commandLine != null ?
- commandLine.getArgList() : new ArrayList<String>();
- if (remainingArgs.size() != 1) {
- printUsageAndExit(options, 1);
- }
-
- String command = remainingArgs.get(0);
- if ("start".equals(command)) {
- // continue and start container
- } else if ("stop".equals(command)) {
- System.exit(1);
- } else {
- printUsageAndExit(options, 1);
- }
-
- // set up the Jersey servlet container for Jetty
- ServletHolder sh = new ServletHolder(containerClass);
- sh.setInitParameter(
- "com.sun.jersey.config.property.resourceConfigClass",
- ResourceConfig.class.getCanonicalName());
- sh.setInitParameter("com.sun.jersey.config.property.packages",
- "jetty");
- // The servlet holder below is instantiated to only handle the case
- // of the /status/cluster returning arrays of nodes (live/dead). Without
- // this servlet holder, the problem is that the node arrays in the response
- // are collapsed to single nodes. We want to be able to treat the
- // node lists as POJO in the response to /status/cluster servlet call,
- // but not change the behavior for any of the other servlets
- // Hence we don't use the servlet holder for all servlets / paths
- ServletHolder shPojoMap = new ServletHolder(containerClass);
- @SuppressWarnings("unchecked")
- Map<String, String> shInitMap = sh.getInitParameters();
- for (Entry<String, String> e : shInitMap.entrySet()) {
- shPojoMap.setInitParameter(e.getKey(), e.getValue());
- }
- shPojoMap.setInitParameter(JSONConfiguration.FEATURE_POJO_MAPPING, "true");
-
- // set up Jetty and run the embedded server
-
- Server server = new Server();
-
- Connector connector = new SelectChannelConnector();
- if(conf.getBoolean(REST_SSL_ENABLED, false)) {
- SslSelectChannelConnector sslConnector = new SslSelectChannelConnector();
- String keystore = conf.get(REST_SSL_KEYSTORE_STORE);
- String password = HBaseConfiguration.getPassword(conf,
- REST_SSL_KEYSTORE_PASSWORD, null);
- String keyPassword = HBaseConfiguration.getPassword(conf,
- REST_SSL_KEYSTORE_KEYPASSWORD, password);
- sslConnector.setKeystore(keystore);
- sslConnector.setPassword(password);
- sslConnector.setKeyPassword(keyPassword);
- connector = sslConnector;
- }
- connector.setPort(servlet.getConfiguration().getInt("hbase.rest.port", 8080));
- connector.setHost(servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0"));
-
- server.addConnector(connector);
-
- // Set the default max thread number to 100 to limit
- // the number of concurrent requests so that REST server doesn't OOM easily.
- // Jetty set the default max thread number to 250, if we don't set it.
- //
- // Our default min thread number 2 is the same as that used by Jetty.
- int maxThreads = servlet.getConfiguration().getInt("hbase.rest.threads.max", 100);
- int minThreads = servlet.getConfiguration().getInt("hbase.rest.threads.min", 2);
- QueuedThreadPool threadPool = new QueuedThreadPool(maxThreads);
- threadPool.setMinThreads(minThreads);
- server.setThreadPool(threadPool);
-
- server.setSendServerVersion(false);
- server.setSendDateHeader(false);
- server.setStopAtShutdown(true);
- // set up context
- Context context = new Context(server, "/", Context.SESSIONS);
- context.addServlet(shPojoMap, "/status/cluster");
- context.addServlet(sh, "/*");
- if (authFilter != null) {
- context.addFilter(authFilter, "/*", 1);
- }
-
- // Load filters from configuration.
- String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES,
- ArrayUtils.EMPTY_STRING_ARRAY);
- for (String filter : filterClasses) {
- filter = filter.trim();
- context.addFilter(Class.forName(filter), "/*", 0);
- }
- HttpServerUtil.constrainHttpMethods(context);
-
- // Put up info server.
- int port = conf.getInt("hbase.rest.info.port", 8085);
- if (port >= 0) {
- conf.setLong("startcode", System.currentTimeMillis());
- String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
- InfoServer infoServer = new InfoServer("rest", a, port, false, conf);
- infoServer.setAttribute("hbase.conf", conf);
- infoServer.start();
- }
-
- // start server
- server.start();
- server.join();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
deleted file mode 100644
index a98663e..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.util.ConnectionCache;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-
-/**
- * Singleton class encapsulating global REST servlet state and functions.
- */
-@InterfaceAudience.Private
-public class RESTServlet implements Constants {
- private static RESTServlet INSTANCE;
- private final Configuration conf;
- private final MetricsREST metrics = new MetricsREST();
- private final ConnectionCache connectionCache;
- private final UserGroupInformation realUser;
-
- static final String CLEANUP_INTERVAL = "hbase.rest.connection.cleanup-interval";
- static final String MAX_IDLETIME = "hbase.rest.connection.max-idletime";
- static final String HBASE_REST_SUPPORT_PROXYUSER = "hbase.rest.support.proxyuser";
-
- UserGroupInformation getRealUser() {
- return realUser;
- }
-
- /**
- * @return the RESTServlet singleton instance
- */
- public synchronized static RESTServlet getInstance() {
- assert(INSTANCE != null);
- return INSTANCE;
- }
-
- /**
- * @param conf Existing configuration to use in rest servlet
- * @param userProvider the login user provider
- * @return the RESTServlet singleton instance
- * @throws IOException
- */
- public synchronized static RESTServlet getInstance(Configuration conf,
- UserProvider userProvider) throws IOException {
- if (INSTANCE == null) {
- INSTANCE = new RESTServlet(conf, userProvider);
- }
- return INSTANCE;
- }
-
- public synchronized static void stop() {
- if (INSTANCE != null) INSTANCE = null;
- }
-
- /**
- * Constructor with existing configuration
- * @param conf existing configuration
- * @param userProvider the login user provider
- * @throws IOException
- */
- RESTServlet(final Configuration conf,
- final UserProvider userProvider) throws IOException {
- this.realUser = userProvider.getCurrent().getUGI();
- this.conf = conf;
-
- int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
- int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
- connectionCache = new ConnectionCache(
- conf, userProvider, cleanInterval, maxIdleTime);
- if (supportsProxyuser()) {
- ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
- }
- }
-
- HBaseAdmin getAdmin() throws IOException {
- return connectionCache.getAdmin();
- }
-
- /**
- * Caller closes the table afterwards.
- */
- HTableInterface getTable(String tableName) throws IOException {
- return connectionCache.getTable(tableName);
- }
-
- Configuration getConfiguration() {
- return conf;
- }
-
- MetricsREST getMetrics() {
- return metrics;
- }
-
- /**
- * Helper method to determine if server should
- * only respond to GET HTTP method requests.
- * @return boolean for server read-only state
- */
- boolean isReadOnly() {
- return getConfiguration().getBoolean("hbase.rest.readonly", false);
- }
-
- void setEffectiveUser(String effectiveUser) {
- connectionCache.setEffectiveUser(effectiveUser);
- }
-
- boolean supportsProxyuser() {
- return conf.getBoolean(HBASE_REST_SUPPORT_PROXYUSER, false);
- }
-}
[29/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
new file mode 100644
index 0000000..05ff7a3
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
@@ -0,0 +1,3955 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: StorageClusterStatusMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class StorageClusterStatusMessage {
+ private StorageClusterStatusMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface StorageClusterStatusOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+ *
+ * <pre>
+ * node status
+ * </pre>
+ */
+ java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>
+ getLiveNodesList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+ *
+ * <pre>
+ * node status
+ * </pre>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index);
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+ *
+ * <pre>
+ * node status
+ * </pre>
+ */
+ int getLiveNodesCount();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+ *
+ * <pre>
+ * node status
+ * </pre>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
+ getLiveNodesOrBuilderList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+ *
+ * <pre>
+ * node status
+ * </pre>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder getLiveNodesOrBuilder(
+ int index);
+
+ // repeated string deadNodes = 2;
+ /**
+ * <code>repeated string deadNodes = 2;</code>
+ */
+ java.util.List<java.lang.String>
+ getDeadNodesList();
+ /**
+ * <code>repeated string deadNodes = 2;</code>
+ */
+ int getDeadNodesCount();
+ /**
+ * <code>repeated string deadNodes = 2;</code>
+ */
+ java.lang.String getDeadNodes(int index);
+ /**
+ * <code>repeated string deadNodes = 2;</code>
+ */
+ com.google.protobuf.ByteString
+ getDeadNodesBytes(int index);
+
+ // optional int32 regions = 3;
+ /**
+ * <code>optional int32 regions = 3;</code>
+ *
+ * <pre>
+ * summary statistics
+ * </pre>
+ */
+ boolean hasRegions();
+ /**
+ * <code>optional int32 regions = 3;</code>
+ *
+ * <pre>
+ * summary statistics
+ * </pre>
+ */
+ int getRegions();
+
+ // optional int32 requests = 4;
+ /**
+ * <code>optional int32 requests = 4;</code>
+ */
+ boolean hasRequests();
+ /**
+ * <code>optional int32 requests = 4;</code>
+ */
+ int getRequests();
+
+ // optional double averageLoad = 5;
+ /**
+ * <code>optional double averageLoad = 5;</code>
+ */
+ boolean hasAverageLoad();
+ /**
+ * <code>optional double averageLoad = 5;</code>
+ */
+ double getAverageLoad();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus}
+ */
+ public static final class StorageClusterStatus extends
+ com.google.protobuf.GeneratedMessage
+ implements StorageClusterStatusOrBuilder {
+ // Use StorageClusterStatus.newBuilder() to construct.
+ private StorageClusterStatus(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private StorageClusterStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final StorageClusterStatus defaultInstance;
+ public static StorageClusterStatus getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public StorageClusterStatus getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private StorageClusterStatus(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ liveNodes_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.PARSER, extensionRegistry));
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ deadNodes_ = new com.google.protobuf.LazyStringArrayList();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ deadNodes_.add(input.readBytes());
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000001;
+ regions_ = input.readInt32();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000002;
+ requests_ = input.readInt32();
+ break;
+ }
+ case 41: {
+ bitField0_ |= 0x00000004;
+ averageLoad_ = input.readDouble();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ liveNodes_ = java.util.Collections.unmodifiableList(liveNodes_);
+ }
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ deadNodes_ = new com.google.protobuf.UnmodifiableLazyStringList(deadNodes_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<StorageClusterStatus> PARSER =
+ new com.google.protobuf.AbstractParser<StorageClusterStatus>() {
+ public StorageClusterStatus parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new StorageClusterStatus(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<StorageClusterStatus> getParserForType() {
+ return PARSER;
+ }
+
+ public interface RegionOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes name = 1;
+ /**
+ * <code>required bytes name = 1;</code>
+ */
+ boolean hasName();
+ /**
+ * <code>required bytes name = 1;</code>
+ */
+ com.google.protobuf.ByteString getName();
+
+ // optional int32 stores = 2;
+ /**
+ * <code>optional int32 stores = 2;</code>
+ */
+ boolean hasStores();
+ /**
+ * <code>optional int32 stores = 2;</code>
+ */
+ int getStores();
+
+ // optional int32 storefiles = 3;
+ /**
+ * <code>optional int32 storefiles = 3;</code>
+ */
+ boolean hasStorefiles();
+ /**
+ * <code>optional int32 storefiles = 3;</code>
+ */
+ int getStorefiles();
+
+ // optional int32 storefileSizeMB = 4;
+ /**
+ * <code>optional int32 storefileSizeMB = 4;</code>
+ */
+ boolean hasStorefileSizeMB();
+ /**
+ * <code>optional int32 storefileSizeMB = 4;</code>
+ */
+ int getStorefileSizeMB();
+
+ // optional int32 memstoreSizeMB = 5;
+ /**
+ * <code>optional int32 memstoreSizeMB = 5;</code>
+ */
+ boolean hasMemstoreSizeMB();
+ /**
+ * <code>optional int32 memstoreSizeMB = 5;</code>
+ */
+ int getMemstoreSizeMB();
+
+ // optional int32 storefileIndexSizeMB = 6;
+ /**
+ * <code>optional int32 storefileIndexSizeMB = 6;</code>
+ */
+ boolean hasStorefileIndexSizeMB();
+ /**
+ * <code>optional int32 storefileIndexSizeMB = 6;</code>
+ */
+ int getStorefileIndexSizeMB();
+
+ // optional int64 readRequestsCount = 7;
+ /**
+ * <code>optional int64 readRequestsCount = 7;</code>
+ */
+ boolean hasReadRequestsCount();
+ /**
+ * <code>optional int64 readRequestsCount = 7;</code>
+ */
+ long getReadRequestsCount();
+
+ // optional int64 writeRequestsCount = 8;
+ /**
+ * <code>optional int64 writeRequestsCount = 8;</code>
+ */
+ boolean hasWriteRequestsCount();
+ /**
+ * <code>optional int64 writeRequestsCount = 8;</code>
+ */
+ long getWriteRequestsCount();
+
+ // optional int32 rootIndexSizeKB = 9;
+ /**
+ * <code>optional int32 rootIndexSizeKB = 9;</code>
+ */
+ boolean hasRootIndexSizeKB();
+ /**
+ * <code>optional int32 rootIndexSizeKB = 9;</code>
+ */
+ int getRootIndexSizeKB();
+
+ // optional int32 totalStaticIndexSizeKB = 10;
+ /**
+ * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+ */
+ boolean hasTotalStaticIndexSizeKB();
+ /**
+ * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+ */
+ int getTotalStaticIndexSizeKB();
+
+ // optional int32 totalStaticBloomSizeKB = 11;
+ /**
+ * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+ */
+ boolean hasTotalStaticBloomSizeKB();
+ /**
+ * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+ */
+ int getTotalStaticBloomSizeKB();
+
+ // optional int64 totalCompactingKVs = 12;
+ /**
+ * <code>optional int64 totalCompactingKVs = 12;</code>
+ */
+ boolean hasTotalCompactingKVs();
+ /**
+ * <code>optional int64 totalCompactingKVs = 12;</code>
+ */
+ long getTotalCompactingKVs();
+
+ // optional int64 currentCompactedKVs = 13;
+ /**
+ * <code>optional int64 currentCompactedKVs = 13;</code>
+ */
+ boolean hasCurrentCompactedKVs();
+ /**
+ * <code>optional int64 currentCompactedKVs = 13;</code>
+ */
+ long getCurrentCompactedKVs();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region}
+ */
+ public static final class Region extends
+ com.google.protobuf.GeneratedMessage
+ implements RegionOrBuilder {
+ // Use Region.newBuilder() to construct.
+ private Region(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Region(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Region defaultInstance;
+ public static Region getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Region getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Region(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ stores_ = input.readInt32();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ storefiles_ = input.readInt32();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ storefileSizeMB_ = input.readInt32();
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000010;
+ memstoreSizeMB_ = input.readInt32();
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000020;
+ storefileIndexSizeMB_ = input.readInt32();
+ break;
+ }
+ case 56: {
+ bitField0_ |= 0x00000040;
+ readRequestsCount_ = input.readInt64();
+ break;
+ }
+ case 64: {
+ bitField0_ |= 0x00000080;
+ writeRequestsCount_ = input.readInt64();
+ break;
+ }
+ case 72: {
+ bitField0_ |= 0x00000100;
+ rootIndexSizeKB_ = input.readInt32();
+ break;
+ }
+ case 80: {
+ bitField0_ |= 0x00000200;
+ totalStaticIndexSizeKB_ = input.readInt32();
+ break;
+ }
+ case 88: {
+ bitField0_ |= 0x00000400;
+ totalStaticBloomSizeKB_ = input.readInt32();
+ break;
+ }
+ case 96: {
+ bitField0_ |= 0x00000800;
+ totalCompactingKVs_ = input.readInt64();
+ break;
+ }
+ case 104: {
+ bitField0_ |= 0x00001000;
+ currentCompactedKVs_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Region> PARSER =
+ new com.google.protobuf.AbstractParser<Region>() {
+ public Region parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Region(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Region> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required bytes name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString name_;
+ /**
+ * <code>required bytes name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bytes name = 1;</code>
+ */
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+
+ // optional int32 stores = 2;
+ public static final int STORES_FIELD_NUMBER = 2;
+ private int stores_;
+ /**
+ * <code>optional int32 stores = 2;</code>
+ */
+ public boolean hasStores() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional int32 stores = 2;</code>
+ */
+ public int getStores() {
+ return stores_;
+ }
+
+ // optional int32 storefiles = 3;
+ public static final int STOREFILES_FIELD_NUMBER = 3;
+ private int storefiles_;
+ /**
+ * <code>optional int32 storefiles = 3;</code>
+ */
+ public boolean hasStorefiles() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int32 storefiles = 3;</code>
+ */
+ public int getStorefiles() {
+ return storefiles_;
+ }
+
+ // optional int32 storefileSizeMB = 4;
+ public static final int STOREFILESIZEMB_FIELD_NUMBER = 4;
+ private int storefileSizeMB_;
+ /**
+ * <code>optional int32 storefileSizeMB = 4;</code>
+ */
+ public boolean hasStorefileSizeMB() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int32 storefileSizeMB = 4;</code>
+ */
+ public int getStorefileSizeMB() {
+ return storefileSizeMB_;
+ }
+
+ // optional int32 memstoreSizeMB = 5;
+ public static final int MEMSTORESIZEMB_FIELD_NUMBER = 5;
+ private int memstoreSizeMB_;
+ /**
+ * <code>optional int32 memstoreSizeMB = 5;</code>
+ */
+ public boolean hasMemstoreSizeMB() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional int32 memstoreSizeMB = 5;</code>
+ */
+ public int getMemstoreSizeMB() {
+ return memstoreSizeMB_;
+ }
+
+ // optional int32 storefileIndexSizeMB = 6;
+ public static final int STOREFILEINDEXSIZEMB_FIELD_NUMBER = 6;
+ private int storefileIndexSizeMB_;
+ /**
+ * <code>optional int32 storefileIndexSizeMB = 6;</code>
+ */
+ public boolean hasStorefileIndexSizeMB() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>optional int32 storefileIndexSizeMB = 6;</code>
+ */
+ public int getStorefileIndexSizeMB() {
+ return storefileIndexSizeMB_;
+ }
+
+ // optional int64 readRequestsCount = 7;
+ public static final int READREQUESTSCOUNT_FIELD_NUMBER = 7;
+ private long readRequestsCount_;
+ /**
+ * <code>optional int64 readRequestsCount = 7;</code>
+ */
+ public boolean hasReadRequestsCount() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * <code>optional int64 readRequestsCount = 7;</code>
+ */
+ public long getReadRequestsCount() {
+ return readRequestsCount_;
+ }
+
+ // optional int64 writeRequestsCount = 8;
+ public static final int WRITEREQUESTSCOUNT_FIELD_NUMBER = 8;
+ private long writeRequestsCount_;
+ /**
+ * <code>optional int64 writeRequestsCount = 8;</code>
+ */
+ public boolean hasWriteRequestsCount() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * <code>optional int64 writeRequestsCount = 8;</code>
+ */
+ public long getWriteRequestsCount() {
+ return writeRequestsCount_;
+ }
+
+ // optional int32 rootIndexSizeKB = 9;
+ public static final int ROOTINDEXSIZEKB_FIELD_NUMBER = 9;
+ private int rootIndexSizeKB_;
+ /**
+ * <code>optional int32 rootIndexSizeKB = 9;</code>
+ */
+ public boolean hasRootIndexSizeKB() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional int32 rootIndexSizeKB = 9;</code>
+ */
+ public int getRootIndexSizeKB() {
+ return rootIndexSizeKB_;
+ }
+
+ // optional int32 totalStaticIndexSizeKB = 10;
+ public static final int TOTALSTATICINDEXSIZEKB_FIELD_NUMBER = 10;
+ private int totalStaticIndexSizeKB_;
+ /**
+ * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+ */
+ public boolean hasTotalStaticIndexSizeKB() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+ */
+ public int getTotalStaticIndexSizeKB() {
+ return totalStaticIndexSizeKB_;
+ }
+
+ // optional int32 totalStaticBloomSizeKB = 11;
+ public static final int TOTALSTATICBLOOMSIZEKB_FIELD_NUMBER = 11;
+ private int totalStaticBloomSizeKB_;
+ /**
+ * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+ */
+ public boolean hasTotalStaticBloomSizeKB() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+ */
+ public int getTotalStaticBloomSizeKB() {
+ return totalStaticBloomSizeKB_;
+ }
+
+ // optional int64 totalCompactingKVs = 12;
+ public static final int TOTALCOMPACTINGKVS_FIELD_NUMBER = 12;
+ private long totalCompactingKVs_;
+ /**
+ * <code>optional int64 totalCompactingKVs = 12;</code>
+ */
+ public boolean hasTotalCompactingKVs() {
+ return ((bitField0_ & 0x00000800) == 0x00000800);
+ }
+ /**
+ * <code>optional int64 totalCompactingKVs = 12;</code>
+ */
+ public long getTotalCompactingKVs() {
+ return totalCompactingKVs_;
+ }
+
+ // optional int64 currentCompactedKVs = 13;
+ public static final int CURRENTCOMPACTEDKVS_FIELD_NUMBER = 13;
+ private long currentCompactedKVs_;
+ /**
+ * <code>optional int64 currentCompactedKVs = 13;</code>
+ */
+ public boolean hasCurrentCompactedKVs() {
+ return ((bitField0_ & 0x00001000) == 0x00001000);
+ }
+ /**
+ * <code>optional int64 currentCompactedKVs = 13;</code>
+ */
+ public long getCurrentCompactedKVs() {
+ return currentCompactedKVs_;
+ }
+
+ private void initFields() {
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ stores_ = 0;
+ storefiles_ = 0;
+ storefileSizeMB_ = 0;
+ memstoreSizeMB_ = 0;
+ storefileIndexSizeMB_ = 0;
+ readRequestsCount_ = 0L;
+ writeRequestsCount_ = 0L;
+ rootIndexSizeKB_ = 0;
+ totalStaticIndexSizeKB_ = 0;
+ totalStaticBloomSizeKB_ = 0;
+ totalCompactingKVs_ = 0L;
+ currentCompactedKVs_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, name_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt32(2, stores_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(3, storefiles_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeInt32(4, storefileSizeMB_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeInt32(5, memstoreSizeMB_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeInt32(6, storefileIndexSizeMB_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeInt64(7, readRequestsCount_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeInt64(8, writeRequestsCount_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeInt32(9, rootIndexSizeKB_);
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ output.writeInt32(10, totalStaticIndexSizeKB_);
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ output.writeInt32(11, totalStaticBloomSizeKB_);
+ }
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
+ output.writeInt64(12, totalCompactingKVs_);
+ }
+ if (((bitField0_ & 0x00001000) == 0x00001000)) {
+ output.writeInt64(13, currentCompactedKVs_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, name_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(2, stores_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, storefiles_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, storefileSizeMB_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(5, memstoreSizeMB_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(6, storefileIndexSizeMB_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(7, readRequestsCount_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(8, writeRequestsCount_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(9, rootIndexSizeKB_);
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(10, totalStaticIndexSizeKB_);
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(11, totalStaticBloomSizeKB_);
+ }
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(12, totalCompactingKVs_);
+ }
+ if (((bitField0_ & 0x00001000) == 0x00001000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(13, currentCompactedKVs_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ stores_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ storefiles_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ storefileSizeMB_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ memstoreSizeMB_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ storefileIndexSizeMB_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ readRequestsCount_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ writeRequestsCount_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000080);
+ rootIndexSizeKB_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ totalStaticIndexSizeKB_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ totalStaticBloomSizeKB_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000400);
+ totalCompactingKVs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000800);
+ currentCompactedKVs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00001000);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.stores_ = stores_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.storefiles_ = storefiles_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.storefileSizeMB_ = storefileSizeMB_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.memstoreSizeMB_ = memstoreSizeMB_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.storefileIndexSizeMB_ = storefileIndexSizeMB_;
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.readRequestsCount_ = readRequestsCount_;
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ result.writeRequestsCount_ = writeRequestsCount_;
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ result.rootIndexSizeKB_ = rootIndexSizeKB_;
+ if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+ to_bitField0_ |= 0x00000200;
+ }
+ result.totalStaticIndexSizeKB_ = totalStaticIndexSizeKB_;
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+ to_bitField0_ |= 0x00000400;
+ }
+ result.totalStaticBloomSizeKB_ = totalStaticBloomSizeKB_;
+ if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
+ to_bitField0_ |= 0x00000800;
+ }
+ result.totalCompactingKVs_ = totalCompactingKVs_;
+ if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
+ to_bitField0_ |= 0x00001000;
+ }
+ result.currentCompactedKVs_ = currentCompactedKVs_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasStores()) {
+ setStores(other.getStores());
+ }
+ if (other.hasStorefiles()) {
+ setStorefiles(other.getStorefiles());
+ }
+ if (other.hasStorefileSizeMB()) {
+ setStorefileSizeMB(other.getStorefileSizeMB());
+ }
+ if (other.hasMemstoreSizeMB()) {
+ setMemstoreSizeMB(other.getMemstoreSizeMB());
+ }
+ if (other.hasStorefileIndexSizeMB()) {
+ setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
+ }
+ if (other.hasReadRequestsCount()) {
+ setReadRequestsCount(other.getReadRequestsCount());
+ }
+ if (other.hasWriteRequestsCount()) {
+ setWriteRequestsCount(other.getWriteRequestsCount());
+ }
+ if (other.hasRootIndexSizeKB()) {
+ setRootIndexSizeKB(other.getRootIndexSizeKB());
+ }
+ if (other.hasTotalStaticIndexSizeKB()) {
+ setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB());
+ }
+ if (other.hasTotalStaticBloomSizeKB()) {
+ setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
+ }
+ if (other.hasTotalCompactingKVs()) {
+ setTotalCompactingKVs(other.getTotalCompactingKVs());
+ }
+ if (other.hasCurrentCompactedKVs()) {
+ setCurrentCompactedKVs(other.getCurrentCompactedKVs());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasName()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required bytes name = 1;
+ private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>required bytes name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bytes name = 1;</code>
+ */
+ public com.google.protobuf.ByteString getName() {
+ return name_;
+ }
+ /**
+ * <code>required bytes name = 1;</code>
+ */
+ public Builder setName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bytes name = 1;</code>
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+
+ // optional int32 stores = 2;
+ private int stores_ ;
+ /**
+ * <code>optional int32 stores = 2;</code>
+ */
+ public boolean hasStores() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional int32 stores = 2;</code>
+ */
+ public int getStores() {
+ return stores_;
+ }
+ /**
+ * <code>optional int32 stores = 2;</code>
+ */
+ public Builder setStores(int value) {
+ bitField0_ |= 0x00000002;
+ stores_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 stores = 2;</code>
+ */
+ public Builder clearStores() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ stores_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 storefiles = 3;
+ private int storefiles_ ;
+ /**
+ * <code>optional int32 storefiles = 3;</code>
+ */
+ public boolean hasStorefiles() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int32 storefiles = 3;</code>
+ */
+ public int getStorefiles() {
+ return storefiles_;
+ }
+ /**
+ * <code>optional int32 storefiles = 3;</code>
+ */
+ public Builder setStorefiles(int value) {
+ bitField0_ |= 0x00000004;
+ storefiles_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 storefiles = 3;</code>
+ */
+ public Builder clearStorefiles() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ storefiles_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 storefileSizeMB = 4;
+ private int storefileSizeMB_ ;
+ /**
+ * <code>optional int32 storefileSizeMB = 4;</code>
+ */
+ public boolean hasStorefileSizeMB() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int32 storefileSizeMB = 4;</code>
+ */
+ public int getStorefileSizeMB() {
+ return storefileSizeMB_;
+ }
+ /**
+ * <code>optional int32 storefileSizeMB = 4;</code>
+ */
+ public Builder setStorefileSizeMB(int value) {
+ bitField0_ |= 0x00000008;
+ storefileSizeMB_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 storefileSizeMB = 4;</code>
+ */
+ public Builder clearStorefileSizeMB() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ storefileSizeMB_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 memstoreSizeMB = 5;
+ private int memstoreSizeMB_ ;
+ /**
+ * <code>optional int32 memstoreSizeMB = 5;</code>
+ */
+ public boolean hasMemstoreSizeMB() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional int32 memstoreSizeMB = 5;</code>
+ */
+ public int getMemstoreSizeMB() {
+ return memstoreSizeMB_;
+ }
+ /**
+ * <code>optional int32 memstoreSizeMB = 5;</code>
+ */
+ public Builder setMemstoreSizeMB(int value) {
+ bitField0_ |= 0x00000010;
+ memstoreSizeMB_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 memstoreSizeMB = 5;</code>
+ */
+ public Builder clearMemstoreSizeMB() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ memstoreSizeMB_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 storefileIndexSizeMB = 6;
+ private int storefileIndexSizeMB_ ;
+ /**
+ * <code>optional int32 storefileIndexSizeMB = 6;</code>
+ */
+ public boolean hasStorefileIndexSizeMB() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>optional int32 storefileIndexSizeMB = 6;</code>
+ */
+ public int getStorefileIndexSizeMB() {
+ return storefileIndexSizeMB_;
+ }
+ /**
+ * <code>optional int32 storefileIndexSizeMB = 6;</code>
+ */
+ public Builder setStorefileIndexSizeMB(int value) {
+ bitField0_ |= 0x00000020;
+ storefileIndexSizeMB_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 storefileIndexSizeMB = 6;</code>
+ */
+ public Builder clearStorefileIndexSizeMB() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ storefileIndexSizeMB_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 readRequestsCount = 7;
+ private long readRequestsCount_ ;
+ /**
+ * <code>optional int64 readRequestsCount = 7;</code>
+ */
+ public boolean hasReadRequestsCount() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * <code>optional int64 readRequestsCount = 7;</code>
+ */
+ public long getReadRequestsCount() {
+ return readRequestsCount_;
+ }
+ /**
+ * <code>optional int64 readRequestsCount = 7;</code>
+ */
+ public Builder setReadRequestsCount(long value) {
+ bitField0_ |= 0x00000040;
+ readRequestsCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 readRequestsCount = 7;</code>
+ */
+ public Builder clearReadRequestsCount() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ readRequestsCount_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 writeRequestsCount = 8;
+ private long writeRequestsCount_ ;
+ /**
+ * <code>optional int64 writeRequestsCount = 8;</code>
+ */
+ public boolean hasWriteRequestsCount() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * <code>optional int64 writeRequestsCount = 8;</code>
+ */
+ public long getWriteRequestsCount() {
+ return writeRequestsCount_;
+ }
+ /**
+ * <code>optional int64 writeRequestsCount = 8;</code>
+ */
+ public Builder setWriteRequestsCount(long value) {
+ bitField0_ |= 0x00000080;
+ writeRequestsCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 writeRequestsCount = 8;</code>
+ */
+ public Builder clearWriteRequestsCount() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ writeRequestsCount_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 rootIndexSizeKB = 9;
+ private int rootIndexSizeKB_ ;
+ /**
+ * <code>optional int32 rootIndexSizeKB = 9;</code>
+ */
+ public boolean hasRootIndexSizeKB() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional int32 rootIndexSizeKB = 9;</code>
+ */
+ public int getRootIndexSizeKB() {
+ return rootIndexSizeKB_;
+ }
+ /**
+ * <code>optional int32 rootIndexSizeKB = 9;</code>
+ */
+ public Builder setRootIndexSizeKB(int value) {
+ bitField0_ |= 0x00000100;
+ rootIndexSizeKB_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 rootIndexSizeKB = 9;</code>
+ */
+ public Builder clearRootIndexSizeKB() {
+ bitField0_ = (bitField0_ & ~0x00000100);
+ rootIndexSizeKB_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 totalStaticIndexSizeKB = 10;
+ private int totalStaticIndexSizeKB_ ;
+ /**
+ * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+ */
+ public boolean hasTotalStaticIndexSizeKB() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+ */
+ public int getTotalStaticIndexSizeKB() {
+ return totalStaticIndexSizeKB_;
+ }
+ /**
+ * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+ */
+ public Builder setTotalStaticIndexSizeKB(int value) {
+ bitField0_ |= 0x00000200;
+ totalStaticIndexSizeKB_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+ */
+ public Builder clearTotalStaticIndexSizeKB() {
+ bitField0_ = (bitField0_ & ~0x00000200);
+ totalStaticIndexSizeKB_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 totalStaticBloomSizeKB = 11;
+ private int totalStaticBloomSizeKB_ ;
+ /**
+ * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+ */
+ public boolean hasTotalStaticBloomSizeKB() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+ */
+ public int getTotalStaticBloomSizeKB() {
+ return totalStaticBloomSizeKB_;
+ }
+ /**
+ * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+ */
+ public Builder setTotalStaticBloomSizeKB(int value) {
+ bitField0_ |= 0x00000400;
+ totalStaticBloomSizeKB_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+ */
+ public Builder clearTotalStaticBloomSizeKB() {
+ bitField0_ = (bitField0_ & ~0x00000400);
+ totalStaticBloomSizeKB_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 totalCompactingKVs = 12;
+ private long totalCompactingKVs_ ;
+ /**
+ * <code>optional int64 totalCompactingKVs = 12;</code>
+ */
+ public boolean hasTotalCompactingKVs() {
+ return ((bitField0_ & 0x00000800) == 0x00000800);
+ }
+ /**
+ * <code>optional int64 totalCompactingKVs = 12;</code>
+ */
+ public long getTotalCompactingKVs() {
+ return totalCompactingKVs_;
+ }
+ /**
+ * <code>optional int64 totalCompactingKVs = 12;</code>
+ */
+ public Builder setTotalCompactingKVs(long value) {
+ bitField0_ |= 0x00000800;
+ totalCompactingKVs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 totalCompactingKVs = 12;</code>
+ */
+ public Builder clearTotalCompactingKVs() {
+ bitField0_ = (bitField0_ & ~0x00000800);
+ totalCompactingKVs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 currentCompactedKVs = 13;
+ private long currentCompactedKVs_ ;
+ /**
+ * <code>optional int64 currentCompactedKVs = 13;</code>
+ */
+ public boolean hasCurrentCompactedKVs() {
+ return ((bitField0_ & 0x00001000) == 0x00001000);
+ }
+ /**
+ * <code>optional int64 currentCompactedKVs = 13;</code>
+ */
+ public long getCurrentCompactedKVs() {
+ return currentCompactedKVs_;
+ }
+ /**
+ * <code>optional int64 currentCompactedKVs = 13;</code>
+ */
+ public Builder setCurrentCompactedKVs(long value) {
+ bitField0_ |= 0x00001000;
+ currentCompactedKVs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 currentCompactedKVs = 13;</code>
+ */
+ public Builder clearCurrentCompactedKVs() {
+ bitField0_ = (bitField0_ & ~0x00001000);
+ currentCompactedKVs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
+ }
+
+ static {
+ defaultInstance = new Region(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
+ }
+
+ public interface NodeOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string name = 1;
+ /**
+ * <code>required string name = 1;</code>
+ *
+ * <pre>
+ * name:port
+ * </pre>
+ */
+ boolean hasName();
+ /**
+ * <code>required string name = 1;</code>
+ *
+ * <pre>
+ * name:port
+ * </pre>
+ */
+ java.lang.String getName();
+ /**
+ * <code>required string name = 1;</code>
+ *
+ * <pre>
+ * name:port
+ * </pre>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ // optional int64 startCode = 2;
+ /**
+ * <code>optional int64 startCode = 2;</code>
+ */
+ boolean hasStartCode();
+ /**
+ * <code>optional int64 startCode = 2;</code>
+ */
+ long getStartCode();
+
+ // optional int32 requests = 3;
+ /**
+ * <code>optional int32 requests = 3;</code>
+ */
+ boolean hasRequests();
+ /**
+ * <code>optional int32 requests = 3;</code>
+ */
+ int getRequests();
+
+ // optional int32 heapSizeMB = 4;
+ /**
+ * <code>optional int32 heapSizeMB = 4;</code>
+ */
+ boolean hasHeapSizeMB();
+ /**
+ * <code>optional int32 heapSizeMB = 4;</code>
+ */
+ int getHeapSizeMB();
+
+ // optional int32 maxHeapSizeMB = 5;
+ /**
+ * <code>optional int32 maxHeapSizeMB = 5;</code>
+ */
+ boolean hasMaxHeapSizeMB();
+ /**
+ * <code>optional int32 maxHeapSizeMB = 5;</code>
+ */
+ int getMaxHeapSizeMB();
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>
+ getRegionsList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index);
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ int getRegionsCount();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
+ getRegionsOrBuilderList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node}
+ */
+ public static final class Node extends
+ com.google.protobuf.GeneratedMessage
+ implements NodeOrBuilder {
+ // Use Node.newBuilder() to construct.
+ private Node(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Node(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Node defaultInstance;
+ public static Node getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Node getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Node(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ startCode_ = input.readInt64();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ requests_ = input.readInt32();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ heapSizeMB_ = input.readInt32();
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000010;
+ maxHeapSizeMB_ = input.readInt32();
+ break;
+ }
+ case 50: {
+ if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+ regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
+ mutable_bitField0_ |= 0x00000020;
+ }
+ regions_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+ regions_ = java.util.Collections.unmodifiableList(regions_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Node> PARSER =
+ new com.google.protobuf.AbstractParser<Node>() {
+ public Node parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Node(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Node> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ /**
+ * <code>required string name = 1;</code>
+ *
+ * <pre>
+ * name:port
+ * </pre>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string name = 1;</code>
+ *
+ * <pre>
+ * name:port
+ * </pre>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ *
+ * <pre>
+ * name:port
+ * </pre>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int64 startCode = 2;
+ public static final int STARTCODE_FIELD_NUMBER = 2;
+ private long startCode_;
+ /**
+ * <code>optional int64 startCode = 2;</code>
+ */
+ public boolean hasStartCode() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional int64 startCode = 2;</code>
+ */
+ public long getStartCode() {
+ return startCode_;
+ }
+
+ // optional int32 requests = 3;
+ public static final int REQUESTS_FIELD_NUMBER = 3;
+ private int requests_;
+ /**
+ * <code>optional int32 requests = 3;</code>
+ */
+ public boolean hasRequests() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int32 requests = 3;</code>
+ */
+ public int getRequests() {
+ return requests_;
+ }
+
+ // optional int32 heapSizeMB = 4;
+ public static final int HEAPSIZEMB_FIELD_NUMBER = 4;
+ private int heapSizeMB_;
+ /**
+ * <code>optional int32 heapSizeMB = 4;</code>
+ */
+ public boolean hasHeapSizeMB() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int32 heapSizeMB = 4;</code>
+ */
+ public int getHeapSizeMB() {
+ return heapSizeMB_;
+ }
+
+ // optional int32 maxHeapSizeMB = 5;
+ public static final int MAXHEAPSIZEMB_FIELD_NUMBER = 5;
+ private int maxHeapSizeMB_;
+ /**
+ * <code>optional int32 maxHeapSizeMB = 5;</code>
+ */
+ public boolean hasMaxHeapSizeMB() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional int32 maxHeapSizeMB = 5;</code>
+ */
+ public int getMaxHeapSizeMB() {
+ return maxHeapSizeMB_;
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
+ public static final int REGIONS_FIELD_NUMBER = 6;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> regions_;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
+ return regions_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
+ getRegionsOrBuilderList() {
+ return regions_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ public int getRegionsCount() {
+ return regions_.size();
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
+ return regions_.get(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
+ int index) {
+ return regions_.get(index);
+ }
+
+ private void initFields() {
+ name_ = "";
+ startCode_ = 0L;
+ requests_ = 0;
+ heapSizeMB_ = 0;
+ maxHeapSizeMB_ = 0;
+ regions_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getRegionsCount(); i++) {
+ if (!getRegions(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt64(2, startCode_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(3, requests_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeInt32(4, heapSizeMB_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeInt32(5, maxHeapSizeMB_);
+ }
+ for (int i = 0; i < regions_.size(); i++) {
+ output.writeMessage(6, regions_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(2, startCode_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, requests_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, heapSizeMB_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(5, maxHeapSizeMB_);
+ }
+ for (int i = 0; i < regions_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(6, regions_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getRegionsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ startCode_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ requests_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ heapSizeMB_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ maxHeapSizeMB_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ if (regionsBuilder_ == null) {
+ regions_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000020);
+ } else {
+ regionsBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.startCode_ = startCode_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.requests_ = requests_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.heapSizeMB_ = heapSizeMB_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.maxHeapSizeMB_ = maxHeapSizeMB_;
+ if (regionsBuilder_ == null) {
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ regions_ = java.util.Collections.unmodifiableList(regions_);
+ bitField0_ = (bitField0_ & ~0x00000020);
+ }
+ result.regions_ = regions_;
+ } else {
+ result.regions_ = regionsBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ bitField0_ |= 0x00000001;
+ name_ = other.name_;
+ onChanged();
+ }
+ if (other.hasStartCode()) {
+ setStartCode(other.getStartCode());
+ }
+ if (other.hasRequests()) {
+ setRequests(other.getRequests());
+ }
+ if (other.hasHeapSizeMB()) {
+ setHeapSizeMB(other.getHeapSizeMB());
+ }
+ if (other.hasMaxHeapSizeMB()) {
+ setMaxHeapSizeMB(other.getMaxHeapSizeMB());
+ }
+ if (regionsBuilder_ == null) {
+ if (!other.regions_.isEmpty()) {
+ if (regions_.isEmpty()) {
+ regions_ = other.regions_;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ } else {
+ ensureRegionsIsMutable();
+ regions_.addAll(other.regions_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.regions_.isEmpty()) {
+ if (regionsBuilder_.isEmpty()) {
+ regionsBuilder_.dispose();
+ regionsBuilder_ = null;
+ regions_ = other.regions_;
+ bitField0_ = (bitField0_ & ~0x00000020);
+
<TRUNCATED>
[11/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java
deleted file mode 100644
index ef9d1b9..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java
+++ /dev/null
@@ -1,1578 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: ScannerMessage.proto
-
-package org.apache.hadoop.hbase.rest.protobuf.generated;
-
-public final class ScannerMessage {
- private ScannerMessage() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- public interface ScannerOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // optional bytes startRow = 1;
- /**
- * <code>optional bytes startRow = 1;</code>
- */
- boolean hasStartRow();
- /**
- * <code>optional bytes startRow = 1;</code>
- */
- com.google.protobuf.ByteString getStartRow();
-
- // optional bytes endRow = 2;
- /**
- * <code>optional bytes endRow = 2;</code>
- */
- boolean hasEndRow();
- /**
- * <code>optional bytes endRow = 2;</code>
- */
- com.google.protobuf.ByteString getEndRow();
-
- // repeated bytes columns = 3;
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- java.util.List<com.google.protobuf.ByteString> getColumnsList();
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- int getColumnsCount();
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- com.google.protobuf.ByteString getColumns(int index);
-
- // optional int32 batch = 4;
- /**
- * <code>optional int32 batch = 4;</code>
- */
- boolean hasBatch();
- /**
- * <code>optional int32 batch = 4;</code>
- */
- int getBatch();
-
- // optional int64 startTime = 5;
- /**
- * <code>optional int64 startTime = 5;</code>
- */
- boolean hasStartTime();
- /**
- * <code>optional int64 startTime = 5;</code>
- */
- long getStartTime();
-
- // optional int64 endTime = 6;
- /**
- * <code>optional int64 endTime = 6;</code>
- */
- boolean hasEndTime();
- /**
- * <code>optional int64 endTime = 6;</code>
- */
- long getEndTime();
-
- // optional int32 maxVersions = 7;
- /**
- * <code>optional int32 maxVersions = 7;</code>
- */
- boolean hasMaxVersions();
- /**
- * <code>optional int32 maxVersions = 7;</code>
- */
- int getMaxVersions();
-
- // optional string filter = 8;
- /**
- * <code>optional string filter = 8;</code>
- */
- boolean hasFilter();
- /**
- * <code>optional string filter = 8;</code>
- */
- java.lang.String getFilter();
- /**
- * <code>optional string filter = 8;</code>
- */
- com.google.protobuf.ByteString
- getFilterBytes();
-
- // optional int32 caching = 9;
- /**
- * <code>optional int32 caching = 9;</code>
- *
- * <pre>
- * specifies REST scanner caching
- * </pre>
- */
- boolean hasCaching();
- /**
- * <code>optional int32 caching = 9;</code>
- *
- * <pre>
- * specifies REST scanner caching
- * </pre>
- */
- int getCaching();
-
- // repeated string labels = 10;
- /**
- * <code>repeated string labels = 10;</code>
- */
- java.util.List<java.lang.String>
- getLabelsList();
- /**
- * <code>repeated string labels = 10;</code>
- */
- int getLabelsCount();
- /**
- * <code>repeated string labels = 10;</code>
- */
- java.lang.String getLabels(int index);
- /**
- * <code>repeated string labels = 10;</code>
- */
- com.google.protobuf.ByteString
- getLabelsBytes(int index);
-
- // optional bool cacheBlocks = 11;
- /**
- * <code>optional bool cacheBlocks = 11;</code>
- *
- * <pre>
- * server side block caching hint
- * </pre>
- */
- boolean hasCacheBlocks();
- /**
- * <code>optional bool cacheBlocks = 11;</code>
- *
- * <pre>
- * server side block caching hint
- * </pre>
- */
- boolean getCacheBlocks();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Scanner}
- */
- public static final class Scanner extends
- com.google.protobuf.GeneratedMessage
- implements ScannerOrBuilder {
- // Use Scanner.newBuilder() to construct.
- private Scanner(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Scanner(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Scanner defaultInstance;
- public static Scanner getDefaultInstance() {
- return defaultInstance;
- }
-
- public Scanner getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Scanner(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- startRow_ = input.readBytes();
- break;
- }
- case 18: {
- bitField0_ |= 0x00000002;
- endRow_ = input.readBytes();
- break;
- }
- case 26: {
- if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- columns_ = new java.util.ArrayList<com.google.protobuf.ByteString>();
- mutable_bitField0_ |= 0x00000004;
- }
- columns_.add(input.readBytes());
- break;
- }
- case 32: {
- bitField0_ |= 0x00000004;
- batch_ = input.readInt32();
- break;
- }
- case 40: {
- bitField0_ |= 0x00000008;
- startTime_ = input.readInt64();
- break;
- }
- case 48: {
- bitField0_ |= 0x00000010;
- endTime_ = input.readInt64();
- break;
- }
- case 56: {
- bitField0_ |= 0x00000020;
- maxVersions_ = input.readInt32();
- break;
- }
- case 66: {
- bitField0_ |= 0x00000040;
- filter_ = input.readBytes();
- break;
- }
- case 72: {
- bitField0_ |= 0x00000080;
- caching_ = input.readInt32();
- break;
- }
- case 82: {
- if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
- labels_ = new com.google.protobuf.LazyStringArrayList();
- mutable_bitField0_ |= 0x00000200;
- }
- labels_.add(input.readBytes());
- break;
- }
- case 88: {
- bitField0_ |= 0x00000100;
- cacheBlocks_ = input.readBool();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- columns_ = java.util.Collections.unmodifiableList(columns_);
- }
- if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
- labels_ = new com.google.protobuf.UnmodifiableLazyStringList(labels_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.class, org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Scanner> PARSER =
- new com.google.protobuf.AbstractParser<Scanner>() {
- public Scanner parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Scanner(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Scanner> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // optional bytes startRow = 1;
- public static final int STARTROW_FIELD_NUMBER = 1;
- private com.google.protobuf.ByteString startRow_;
- /**
- * <code>optional bytes startRow = 1;</code>
- */
- public boolean hasStartRow() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional bytes startRow = 1;</code>
- */
- public com.google.protobuf.ByteString getStartRow() {
- return startRow_;
- }
-
- // optional bytes endRow = 2;
- public static final int ENDROW_FIELD_NUMBER = 2;
- private com.google.protobuf.ByteString endRow_;
- /**
- * <code>optional bytes endRow = 2;</code>
- */
- public boolean hasEndRow() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional bytes endRow = 2;</code>
- */
- public com.google.protobuf.ByteString getEndRow() {
- return endRow_;
- }
-
- // repeated bytes columns = 3;
- public static final int COLUMNS_FIELD_NUMBER = 3;
- private java.util.List<com.google.protobuf.ByteString> columns_;
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public java.util.List<com.google.protobuf.ByteString>
- getColumnsList() {
- return columns_;
- }
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public int getColumnsCount() {
- return columns_.size();
- }
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public com.google.protobuf.ByteString getColumns(int index) {
- return columns_.get(index);
- }
-
- // optional int32 batch = 4;
- public static final int BATCH_FIELD_NUMBER = 4;
- private int batch_;
- /**
- * <code>optional int32 batch = 4;</code>
- */
- public boolean hasBatch() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional int32 batch = 4;</code>
- */
- public int getBatch() {
- return batch_;
- }
-
- // optional int64 startTime = 5;
- public static final int STARTTIME_FIELD_NUMBER = 5;
- private long startTime_;
- /**
- * <code>optional int64 startTime = 5;</code>
- */
- public boolean hasStartTime() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional int64 startTime = 5;</code>
- */
- public long getStartTime() {
- return startTime_;
- }
-
- // optional int64 endTime = 6;
- public static final int ENDTIME_FIELD_NUMBER = 6;
- private long endTime_;
- /**
- * <code>optional int64 endTime = 6;</code>
- */
- public boolean hasEndTime() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional int64 endTime = 6;</code>
- */
- public long getEndTime() {
- return endTime_;
- }
-
- // optional int32 maxVersions = 7;
- public static final int MAXVERSIONS_FIELD_NUMBER = 7;
- private int maxVersions_;
- /**
- * <code>optional int32 maxVersions = 7;</code>
- */
- public boolean hasMaxVersions() {
- return ((bitField0_ & 0x00000020) == 0x00000020);
- }
- /**
- * <code>optional int32 maxVersions = 7;</code>
- */
- public int getMaxVersions() {
- return maxVersions_;
- }
-
- // optional string filter = 8;
- public static final int FILTER_FIELD_NUMBER = 8;
- private java.lang.Object filter_;
- /**
- * <code>optional string filter = 8;</code>
- */
- public boolean hasFilter() {
- return ((bitField0_ & 0x00000040) == 0x00000040);
- }
- /**
- * <code>optional string filter = 8;</code>
- */
- public java.lang.String getFilter() {
- java.lang.Object ref = filter_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- filter_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string filter = 8;</code>
- */
- public com.google.protobuf.ByteString
- getFilterBytes() {
- java.lang.Object ref = filter_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- filter_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // optional int32 caching = 9;
- public static final int CACHING_FIELD_NUMBER = 9;
- private int caching_;
- /**
- * <code>optional int32 caching = 9;</code>
- *
- * <pre>
- * specifies REST scanner caching
- * </pre>
- */
- public boolean hasCaching() {
- return ((bitField0_ & 0x00000080) == 0x00000080);
- }
- /**
- * <code>optional int32 caching = 9;</code>
- *
- * <pre>
- * specifies REST scanner caching
- * </pre>
- */
- public int getCaching() {
- return caching_;
- }
-
- // repeated string labels = 10;
- public static final int LABELS_FIELD_NUMBER = 10;
- private com.google.protobuf.LazyStringList labels_;
- /**
- * <code>repeated string labels = 10;</code>
- */
- public java.util.List<java.lang.String>
- getLabelsList() {
- return labels_;
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public int getLabelsCount() {
- return labels_.size();
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public java.lang.String getLabels(int index) {
- return labels_.get(index);
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public com.google.protobuf.ByteString
- getLabelsBytes(int index) {
- return labels_.getByteString(index);
- }
-
- // optional bool cacheBlocks = 11;
- public static final int CACHEBLOCKS_FIELD_NUMBER = 11;
- private boolean cacheBlocks_;
- /**
- * <code>optional bool cacheBlocks = 11;</code>
- *
- * <pre>
- * server side block caching hint
- * </pre>
- */
- public boolean hasCacheBlocks() {
- return ((bitField0_ & 0x00000100) == 0x00000100);
- }
- /**
- * <code>optional bool cacheBlocks = 11;</code>
- *
- * <pre>
- * server side block caching hint
- * </pre>
- */
- public boolean getCacheBlocks() {
- return cacheBlocks_;
- }
-
- private void initFields() {
- startRow_ = com.google.protobuf.ByteString.EMPTY;
- endRow_ = com.google.protobuf.ByteString.EMPTY;
- columns_ = java.util.Collections.emptyList();
- batch_ = 0;
- startTime_ = 0L;
- endTime_ = 0L;
- maxVersions_ = 0;
- filter_ = "";
- caching_ = 0;
- labels_ = com.google.protobuf.LazyStringArrayList.EMPTY;
- cacheBlocks_ = false;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, startRow_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, endRow_);
- }
- for (int i = 0; i < columns_.size(); i++) {
- output.writeBytes(3, columns_.get(i));
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeInt32(4, batch_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeInt64(5, startTime_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- output.writeInt64(6, endTime_);
- }
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
- output.writeInt32(7, maxVersions_);
- }
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
- output.writeBytes(8, getFilterBytes());
- }
- if (((bitField0_ & 0x00000080) == 0x00000080)) {
- output.writeInt32(9, caching_);
- }
- for (int i = 0; i < labels_.size(); i++) {
- output.writeBytes(10, labels_.getByteString(i));
- }
- if (((bitField0_ & 0x00000100) == 0x00000100)) {
- output.writeBool(11, cacheBlocks_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, startRow_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, endRow_);
- }
- {
- int dataSize = 0;
- for (int i = 0; i < columns_.size(); i++) {
- dataSize += com.google.protobuf.CodedOutputStream
- .computeBytesSizeNoTag(columns_.get(i));
- }
- size += dataSize;
- size += 1 * getColumnsList().size();
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(4, batch_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(5, startTime_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(6, endTime_);
- }
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(7, maxVersions_);
- }
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(8, getFilterBytes());
- }
- if (((bitField0_ & 0x00000080) == 0x00000080)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(9, caching_);
- }
- {
- int dataSize = 0;
- for (int i = 0; i < labels_.size(); i++) {
- dataSize += com.google.protobuf.CodedOutputStream
- .computeBytesSizeNoTag(labels_.getByteString(i));
- }
- size += dataSize;
- size += 1 * getLabelsList().size();
- }
- if (((bitField0_ & 0x00000100) == 0x00000100)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBoolSize(11, cacheBlocks_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.Scanner}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.ScannerOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.class, org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- startRow_ = com.google.protobuf.ByteString.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000001);
- endRow_ = com.google.protobuf.ByteString.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000002);
- columns_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
- batch_ = 0;
- bitField0_ = (bitField0_ & ~0x00000008);
- startTime_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000010);
- endTime_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000020);
- maxVersions_ = 0;
- bitField0_ = (bitField0_ & ~0x00000040);
- filter_ = "";
- bitField0_ = (bitField0_ & ~0x00000080);
- caching_ = 0;
- bitField0_ = (bitField0_ & ~0x00000100);
- labels_ = com.google.protobuf.LazyStringArrayList.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000200);
- cacheBlocks_ = false;
- bitField0_ = (bitField0_ & ~0x00000400);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner result = new org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.startRow_ = startRow_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.endRow_ = endRow_;
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- columns_ = java.util.Collections.unmodifiableList(columns_);
- bitField0_ = (bitField0_ & ~0x00000004);
- }
- result.columns_ = columns_;
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000004;
- }
- result.batch_ = batch_;
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
- to_bitField0_ |= 0x00000008;
- }
- result.startTime_ = startTime_;
- if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
- to_bitField0_ |= 0x00000010;
- }
- result.endTime_ = endTime_;
- if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
- to_bitField0_ |= 0x00000020;
- }
- result.maxVersions_ = maxVersions_;
- if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
- to_bitField0_ |= 0x00000040;
- }
- result.filter_ = filter_;
- if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
- to_bitField0_ |= 0x00000080;
- }
- result.caching_ = caching_;
- if (((bitField0_ & 0x00000200) == 0x00000200)) {
- labels_ = new com.google.protobuf.UnmodifiableLazyStringList(
- labels_);
- bitField0_ = (bitField0_ & ~0x00000200);
- }
- result.labels_ = labels_;
- if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
- to_bitField0_ |= 0x00000100;
- }
- result.cacheBlocks_ = cacheBlocks_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance()) return this;
- if (other.hasStartRow()) {
- setStartRow(other.getStartRow());
- }
- if (other.hasEndRow()) {
- setEndRow(other.getEndRow());
- }
- if (!other.columns_.isEmpty()) {
- if (columns_.isEmpty()) {
- columns_ = other.columns_;
- bitField0_ = (bitField0_ & ~0x00000004);
- } else {
- ensureColumnsIsMutable();
- columns_.addAll(other.columns_);
- }
- onChanged();
- }
- if (other.hasBatch()) {
- setBatch(other.getBatch());
- }
- if (other.hasStartTime()) {
- setStartTime(other.getStartTime());
- }
- if (other.hasEndTime()) {
- setEndTime(other.getEndTime());
- }
- if (other.hasMaxVersions()) {
- setMaxVersions(other.getMaxVersions());
- }
- if (other.hasFilter()) {
- bitField0_ |= 0x00000080;
- filter_ = other.filter_;
- onChanged();
- }
- if (other.hasCaching()) {
- setCaching(other.getCaching());
- }
- if (!other.labels_.isEmpty()) {
- if (labels_.isEmpty()) {
- labels_ = other.labels_;
- bitField0_ = (bitField0_ & ~0x00000200);
- } else {
- ensureLabelsIsMutable();
- labels_.addAll(other.labels_);
- }
- onChanged();
- }
- if (other.hasCacheBlocks()) {
- setCacheBlocks(other.getCacheBlocks());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // optional bytes startRow = 1;
- private com.google.protobuf.ByteString startRow_ = com.google.protobuf.ByteString.EMPTY;
- /**
- * <code>optional bytes startRow = 1;</code>
- */
- public boolean hasStartRow() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional bytes startRow = 1;</code>
- */
- public com.google.protobuf.ByteString getStartRow() {
- return startRow_;
- }
- /**
- * <code>optional bytes startRow = 1;</code>
- */
- public Builder setStartRow(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- startRow_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bytes startRow = 1;</code>
- */
- public Builder clearStartRow() {
- bitField0_ = (bitField0_ & ~0x00000001);
- startRow_ = getDefaultInstance().getStartRow();
- onChanged();
- return this;
- }
-
- // optional bytes endRow = 2;
- private com.google.protobuf.ByteString endRow_ = com.google.protobuf.ByteString.EMPTY;
- /**
- * <code>optional bytes endRow = 2;</code>
- */
- public boolean hasEndRow() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional bytes endRow = 2;</code>
- */
- public com.google.protobuf.ByteString getEndRow() {
- return endRow_;
- }
- /**
- * <code>optional bytes endRow = 2;</code>
- */
- public Builder setEndRow(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- endRow_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bytes endRow = 2;</code>
- */
- public Builder clearEndRow() {
- bitField0_ = (bitField0_ & ~0x00000002);
- endRow_ = getDefaultInstance().getEndRow();
- onChanged();
- return this;
- }
-
- // repeated bytes columns = 3;
- private java.util.List<com.google.protobuf.ByteString> columns_ = java.util.Collections.emptyList();
- private void ensureColumnsIsMutable() {
- if (!((bitField0_ & 0x00000004) == 0x00000004)) {
- columns_ = new java.util.ArrayList<com.google.protobuf.ByteString>(columns_);
- bitField0_ |= 0x00000004;
- }
- }
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public java.util.List<com.google.protobuf.ByteString>
- getColumnsList() {
- return java.util.Collections.unmodifiableList(columns_);
- }
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public int getColumnsCount() {
- return columns_.size();
- }
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public com.google.protobuf.ByteString getColumns(int index) {
- return columns_.get(index);
- }
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public Builder setColumns(
- int index, com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureColumnsIsMutable();
- columns_.set(index, value);
- onChanged();
- return this;
- }
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public Builder addColumns(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureColumnsIsMutable();
- columns_.add(value);
- onChanged();
- return this;
- }
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public Builder addAllColumns(
- java.lang.Iterable<? extends com.google.protobuf.ByteString> values) {
- ensureColumnsIsMutable();
- super.addAll(values, columns_);
- onChanged();
- return this;
- }
- /**
- * <code>repeated bytes columns = 3;</code>
- */
- public Builder clearColumns() {
- columns_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
- onChanged();
- return this;
- }
-
- // optional int32 batch = 4;
- private int batch_ ;
- /**
- * <code>optional int32 batch = 4;</code>
- */
- public boolean hasBatch() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional int32 batch = 4;</code>
- */
- public int getBatch() {
- return batch_;
- }
- /**
- * <code>optional int32 batch = 4;</code>
- */
- public Builder setBatch(int value) {
- bitField0_ |= 0x00000008;
- batch_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 batch = 4;</code>
- */
- public Builder clearBatch() {
- bitField0_ = (bitField0_ & ~0x00000008);
- batch_ = 0;
- onChanged();
- return this;
- }
-
- // optional int64 startTime = 5;
- private long startTime_ ;
- /**
- * <code>optional int64 startTime = 5;</code>
- */
- public boolean hasStartTime() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional int64 startTime = 5;</code>
- */
- public long getStartTime() {
- return startTime_;
- }
- /**
- * <code>optional int64 startTime = 5;</code>
- */
- public Builder setStartTime(long value) {
- bitField0_ |= 0x00000010;
- startTime_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int64 startTime = 5;</code>
- */
- public Builder clearStartTime() {
- bitField0_ = (bitField0_ & ~0x00000010);
- startTime_ = 0L;
- onChanged();
- return this;
- }
-
- // optional int64 endTime = 6;
- private long endTime_ ;
- /**
- * <code>optional int64 endTime = 6;</code>
- */
- public boolean hasEndTime() {
- return ((bitField0_ & 0x00000020) == 0x00000020);
- }
- /**
- * <code>optional int64 endTime = 6;</code>
- */
- public long getEndTime() {
- return endTime_;
- }
- /**
- * <code>optional int64 endTime = 6;</code>
- */
- public Builder setEndTime(long value) {
- bitField0_ |= 0x00000020;
- endTime_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int64 endTime = 6;</code>
- */
- public Builder clearEndTime() {
- bitField0_ = (bitField0_ & ~0x00000020);
- endTime_ = 0L;
- onChanged();
- return this;
- }
-
- // optional int32 maxVersions = 7;
- private int maxVersions_ ;
- /**
- * <code>optional int32 maxVersions = 7;</code>
- */
- public boolean hasMaxVersions() {
- return ((bitField0_ & 0x00000040) == 0x00000040);
- }
- /**
- * <code>optional int32 maxVersions = 7;</code>
- */
- public int getMaxVersions() {
- return maxVersions_;
- }
- /**
- * <code>optional int32 maxVersions = 7;</code>
- */
- public Builder setMaxVersions(int value) {
- bitField0_ |= 0x00000040;
- maxVersions_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 maxVersions = 7;</code>
- */
- public Builder clearMaxVersions() {
- bitField0_ = (bitField0_ & ~0x00000040);
- maxVersions_ = 0;
- onChanged();
- return this;
- }
-
- // optional string filter = 8;
- private java.lang.Object filter_ = "";
- /**
- * <code>optional string filter = 8;</code>
- */
- public boolean hasFilter() {
- return ((bitField0_ & 0x00000080) == 0x00000080);
- }
- /**
- * <code>optional string filter = 8;</code>
- */
- public java.lang.String getFilter() {
- java.lang.Object ref = filter_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- filter_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string filter = 8;</code>
- */
- public com.google.protobuf.ByteString
- getFilterBytes() {
- java.lang.Object ref = filter_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- filter_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string filter = 8;</code>
- */
- public Builder setFilter(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000080;
- filter_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string filter = 8;</code>
- */
- public Builder clearFilter() {
- bitField0_ = (bitField0_ & ~0x00000080);
- filter_ = getDefaultInstance().getFilter();
- onChanged();
- return this;
- }
- /**
- * <code>optional string filter = 8;</code>
- */
- public Builder setFilterBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000080;
- filter_ = value;
- onChanged();
- return this;
- }
-
- // optional int32 caching = 9;
- private int caching_ ;
- /**
- * <code>optional int32 caching = 9;</code>
- *
- * <pre>
- * specifies REST scanner caching
- * </pre>
- */
- public boolean hasCaching() {
- return ((bitField0_ & 0x00000100) == 0x00000100);
- }
- /**
- * <code>optional int32 caching = 9;</code>
- *
- * <pre>
- * specifies REST scanner caching
- * </pre>
- */
- public int getCaching() {
- return caching_;
- }
- /**
- * <code>optional int32 caching = 9;</code>
- *
- * <pre>
- * specifies REST scanner caching
- * </pre>
- */
- public Builder setCaching(int value) {
- bitField0_ |= 0x00000100;
- caching_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int32 caching = 9;</code>
- *
- * <pre>
- * specifies REST scanner caching
- * </pre>
- */
- public Builder clearCaching() {
- bitField0_ = (bitField0_ & ~0x00000100);
- caching_ = 0;
- onChanged();
- return this;
- }
-
- // repeated string labels = 10;
- private com.google.protobuf.LazyStringList labels_ = com.google.protobuf.LazyStringArrayList.EMPTY;
- private void ensureLabelsIsMutable() {
- if (!((bitField0_ & 0x00000200) == 0x00000200)) {
- labels_ = new com.google.protobuf.LazyStringArrayList(labels_);
- bitField0_ |= 0x00000200;
- }
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public java.util.List<java.lang.String>
- getLabelsList() {
- return java.util.Collections.unmodifiableList(labels_);
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public int getLabelsCount() {
- return labels_.size();
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public java.lang.String getLabels(int index) {
- return labels_.get(index);
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public com.google.protobuf.ByteString
- getLabelsBytes(int index) {
- return labels_.getByteString(index);
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public Builder setLabels(
- int index, java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureLabelsIsMutable();
- labels_.set(index, value);
- onChanged();
- return this;
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public Builder addLabels(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureLabelsIsMutable();
- labels_.add(value);
- onChanged();
- return this;
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public Builder addAllLabels(
- java.lang.Iterable<java.lang.String> values) {
- ensureLabelsIsMutable();
- super.addAll(values, labels_);
- onChanged();
- return this;
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public Builder clearLabels() {
- labels_ = com.google.protobuf.LazyStringArrayList.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000200);
- onChanged();
- return this;
- }
- /**
- * <code>repeated string labels = 10;</code>
- */
- public Builder addLabelsBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureLabelsIsMutable();
- labels_.add(value);
- onChanged();
- return this;
- }
-
- // optional bool cacheBlocks = 11;
- private boolean cacheBlocks_ ;
- /**
- * <code>optional bool cacheBlocks = 11;</code>
- *
- * <pre>
- * server side block caching hint
- * </pre>
- */
- public boolean hasCacheBlocks() {
- return ((bitField0_ & 0x00000400) == 0x00000400);
- }
- /**
- * <code>optional bool cacheBlocks = 11;</code>
- *
- * <pre>
- * server side block caching hint
- * </pre>
- */
- public boolean getCacheBlocks() {
- return cacheBlocks_;
- }
- /**
- * <code>optional bool cacheBlocks = 11;</code>
- *
- * <pre>
- * server side block caching hint
- * </pre>
- */
- public Builder setCacheBlocks(boolean value) {
- bitField0_ |= 0x00000400;
- cacheBlocks_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bool cacheBlocks = 11;</code>
- *
- * <pre>
- * server side block caching hint
- * </pre>
- */
- public Builder clearCacheBlocks() {
- bitField0_ = (bitField0_ & ~0x00000400);
- cacheBlocks_ = false;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Scanner)
- }
-
- static {
- defaultInstance = new Scanner(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.Scanner)
- }
-
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\024ScannerMessage.proto\022/org.apache.hadoo" +
- "p.hbase.rest.protobuf.generated\"\312\001\n\007Scan" +
- "ner\022\020\n\010startRow\030\001 \001(\014\022\016\n\006endRow\030\002 \001(\014\022\017\n" +
- "\007columns\030\003 \003(\014\022\r\n\005batch\030\004 \001(\005\022\021\n\tstartTi" +
- "me\030\005 \001(\003\022\017\n\007endTime\030\006 \001(\003\022\023\n\013maxVersions" +
- "\030\007 \001(\005\022\016\n\006filter\030\010 \001(\t\022\017\n\007caching\030\t \001(\005\022" +
- "\016\n\006labels\030\n \003(\t\022\023\n\013cacheBlocks\030\013 \001(\010"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor,
- new java.lang.String[] { "StartRow", "EndRow", "Columns", "Batch", "StartTime", "EndTime", "MaxVersions", "Filter", "Caching", "Labels", "CacheBlocks", });
- return null;
- }
- };
- com.google.protobuf.Descriptors.FileDescriptor
- .internalBuildGeneratedFileFrom(descriptorData,
- new com.google.protobuf.Descriptors.FileDescriptor[] {
- }, assigner);
- }
-
- // @@protoc_insertion_point(outer_class_scope)
-}
[09/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java
deleted file mode 100644
index 421c0ec..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java
+++ /dev/null
@@ -1,1802 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: TableInfoMessage.proto
-
-package org.apache.hadoop.hbase.rest.protobuf.generated;
-
-public final class TableInfoMessage {
- private TableInfoMessage() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- public interface TableInfoOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required string name = 1;
- /**
- * <code>required string name = 1;</code>
- */
- boolean hasName();
- /**
- * <code>required string name = 1;</code>
- */
- java.lang.String getName();
- /**
- * <code>required string name = 1;</code>
- */
- com.google.protobuf.ByteString
- getNameBytes();
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region>
- getRegionsList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index);
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- int getRegionsCount();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>
- getRegionsOrBuilderList();
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder getRegionsOrBuilder(
- int index);
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo}
- */
- public static final class TableInfo extends
- com.google.protobuf.GeneratedMessage
- implements TableInfoOrBuilder {
- // Use TableInfo.newBuilder() to construct.
- private TableInfo(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private TableInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final TableInfo defaultInstance;
- public static TableInfo getDefaultInstance() {
- return defaultInstance;
- }
-
- public TableInfo getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private TableInfo(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- name_ = input.readBytes();
- break;
- }
- case 18: {
- if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region>();
- mutable_bitField0_ |= 0x00000002;
- }
- regions_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.PARSER, extensionRegistry));
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
- regions_ = java.util.Collections.unmodifiableList(regions_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Builder.class);
- }
-
- public static com.google.protobuf.Parser<TableInfo> PARSER =
- new com.google.protobuf.AbstractParser<TableInfo>() {
- public TableInfo parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new TableInfo(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<TableInfo> getParserForType() {
- return PARSER;
- }
-
- public interface RegionOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required string name = 1;
- /**
- * <code>required string name = 1;</code>
- */
- boolean hasName();
- /**
- * <code>required string name = 1;</code>
- */
- java.lang.String getName();
- /**
- * <code>required string name = 1;</code>
- */
- com.google.protobuf.ByteString
- getNameBytes();
-
- // optional bytes startKey = 2;
- /**
- * <code>optional bytes startKey = 2;</code>
- */
- boolean hasStartKey();
- /**
- * <code>optional bytes startKey = 2;</code>
- */
- com.google.protobuf.ByteString getStartKey();
-
- // optional bytes endKey = 3;
- /**
- * <code>optional bytes endKey = 3;</code>
- */
- boolean hasEndKey();
- /**
- * <code>optional bytes endKey = 3;</code>
- */
- com.google.protobuf.ByteString getEndKey();
-
- // optional int64 id = 4;
- /**
- * <code>optional int64 id = 4;</code>
- */
- boolean hasId();
- /**
- * <code>optional int64 id = 4;</code>
- */
- long getId();
-
- // optional string location = 5;
- /**
- * <code>optional string location = 5;</code>
- */
- boolean hasLocation();
- /**
- * <code>optional string location = 5;</code>
- */
- java.lang.String getLocation();
- /**
- * <code>optional string location = 5;</code>
- */
- com.google.protobuf.ByteString
- getLocationBytes();
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region}
- */
- public static final class Region extends
- com.google.protobuf.GeneratedMessage
- implements RegionOrBuilder {
- // Use Region.newBuilder() to construct.
- private Region(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Region(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Region defaultInstance;
- public static Region getDefaultInstance() {
- return defaultInstance;
- }
-
- public Region getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Region(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- name_ = input.readBytes();
- break;
- }
- case 18: {
- bitField0_ |= 0x00000002;
- startKey_ = input.readBytes();
- break;
- }
- case 26: {
- bitField0_ |= 0x00000004;
- endKey_ = input.readBytes();
- break;
- }
- case 32: {
- bitField0_ |= 0x00000008;
- id_ = input.readInt64();
- break;
- }
- case 42: {
- bitField0_ |= 0x00000010;
- location_ = input.readBytes();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Region> PARSER =
- new com.google.protobuf.AbstractParser<Region>() {
- public Region parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Region(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Region> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // required string name = 1;
- public static final int NAME_FIELD_NUMBER = 1;
- private java.lang.Object name_;
- /**
- * <code>required string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // optional bytes startKey = 2;
- public static final int STARTKEY_FIELD_NUMBER = 2;
- private com.google.protobuf.ByteString startKey_;
- /**
- * <code>optional bytes startKey = 2;</code>
- */
- public boolean hasStartKey() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional bytes startKey = 2;</code>
- */
- public com.google.protobuf.ByteString getStartKey() {
- return startKey_;
- }
-
- // optional bytes endKey = 3;
- public static final int ENDKEY_FIELD_NUMBER = 3;
- private com.google.protobuf.ByteString endKey_;
- /**
- * <code>optional bytes endKey = 3;</code>
- */
- public boolean hasEndKey() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional bytes endKey = 3;</code>
- */
- public com.google.protobuf.ByteString getEndKey() {
- return endKey_;
- }
-
- // optional int64 id = 4;
- public static final int ID_FIELD_NUMBER = 4;
- private long id_;
- /**
- * <code>optional int64 id = 4;</code>
- */
- public boolean hasId() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional int64 id = 4;</code>
- */
- public long getId() {
- return id_;
- }
-
- // optional string location = 5;
- public static final int LOCATION_FIELD_NUMBER = 5;
- private java.lang.Object location_;
- /**
- * <code>optional string location = 5;</code>
- */
- public boolean hasLocation() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional string location = 5;</code>
- */
- public java.lang.String getLocation() {
- java.lang.Object ref = location_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- location_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string location = 5;</code>
- */
- public com.google.protobuf.ByteString
- getLocationBytes() {
- java.lang.Object ref = location_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- location_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- private void initFields() {
- name_ = "";
- startKey_ = com.google.protobuf.ByteString.EMPTY;
- endKey_ = com.google.protobuf.ByteString.EMPTY;
- id_ = 0L;
- location_ = "";
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasName()) {
- memoizedIsInitialized = 0;
- return false;
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, startKey_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeBytes(3, endKey_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeInt64(4, id_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- output.writeBytes(5, getLocationBytes());
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, startKey_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(3, endKey_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(4, id_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(5, getLocationBytes());
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- startKey_ = com.google.protobuf.ByteString.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000002);
- endKey_ = com.google.protobuf.ByteString.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000004);
- id_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000008);
- location_ = "";
- bitField0_ = (bitField0_ & ~0x00000010);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.startKey_ = startKey_;
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000004;
- }
- result.endKey_ = endKey_;
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000008;
- }
- result.id_ = id_;
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
- to_bitField0_ |= 0x00000010;
- }
- result.location_ = location_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance()) return this;
- if (other.hasName()) {
- bitField0_ |= 0x00000001;
- name_ = other.name_;
- onChanged();
- }
- if (other.hasStartKey()) {
- setStartKey(other.getStartKey());
- }
- if (other.hasEndKey()) {
- setEndKey(other.getEndKey());
- }
- if (other.hasId()) {
- setId(other.getId());
- }
- if (other.hasLocation()) {
- bitField0_ |= 0x00000010;
- location_ = other.location_;
- onChanged();
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasName()) {
-
- return false;
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // required string name = 1;
- private java.lang.Object name_ = "";
- /**
- * <code>required string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- name_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder setName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder clearName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- name_ = getDefaultInstance().getName();
- onChanged();
- return this;
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder setNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
-
- // optional bytes startKey = 2;
- private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY;
- /**
- * <code>optional bytes startKey = 2;</code>
- */
- public boolean hasStartKey() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional bytes startKey = 2;</code>
- */
- public com.google.protobuf.ByteString getStartKey() {
- return startKey_;
- }
- /**
- * <code>optional bytes startKey = 2;</code>
- */
- public Builder setStartKey(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- startKey_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bytes startKey = 2;</code>
- */
- public Builder clearStartKey() {
- bitField0_ = (bitField0_ & ~0x00000002);
- startKey_ = getDefaultInstance().getStartKey();
- onChanged();
- return this;
- }
-
- // optional bytes endKey = 3;
- private com.google.protobuf.ByteString endKey_ = com.google.protobuf.ByteString.EMPTY;
- /**
- * <code>optional bytes endKey = 3;</code>
- */
- public boolean hasEndKey() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional bytes endKey = 3;</code>
- */
- public com.google.protobuf.ByteString getEndKey() {
- return endKey_;
- }
- /**
- * <code>optional bytes endKey = 3;</code>
- */
- public Builder setEndKey(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000004;
- endKey_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bytes endKey = 3;</code>
- */
- public Builder clearEndKey() {
- bitField0_ = (bitField0_ & ~0x00000004);
- endKey_ = getDefaultInstance().getEndKey();
- onChanged();
- return this;
- }
-
- // optional int64 id = 4;
- private long id_ ;
- /**
- * <code>optional int64 id = 4;</code>
- */
- public boolean hasId() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional int64 id = 4;</code>
- */
- public long getId() {
- return id_;
- }
- /**
- * <code>optional int64 id = 4;</code>
- */
- public Builder setId(long value) {
- bitField0_ |= 0x00000008;
- id_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int64 id = 4;</code>
- */
- public Builder clearId() {
- bitField0_ = (bitField0_ & ~0x00000008);
- id_ = 0L;
- onChanged();
- return this;
- }
-
- // optional string location = 5;
- private java.lang.Object location_ = "";
- /**
- * <code>optional string location = 5;</code>
- */
- public boolean hasLocation() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional string location = 5;</code>
- */
- public java.lang.String getLocation() {
- java.lang.Object ref = location_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- location_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string location = 5;</code>
- */
- public com.google.protobuf.ByteString
- getLocationBytes() {
- java.lang.Object ref = location_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- location_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string location = 5;</code>
- */
- public Builder setLocation(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000010;
- location_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string location = 5;</code>
- */
- public Builder clearLocation() {
- bitField0_ = (bitField0_ & ~0x00000010);
- location_ = getDefaultInstance().getLocation();
- onChanged();
- return this;
- }
- /**
- * <code>optional string location = 5;</code>
- */
- public Builder setLocationBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000010;
- location_ = value;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region)
- }
-
- static {
- defaultInstance = new Region(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region)
- }
-
- private int bitField0_;
- // required string name = 1;
- public static final int NAME_FIELD_NUMBER = 1;
- private java.lang.Object name_;
- /**
- * <code>required string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
- public static final int REGIONS_FIELD_NUMBER = 2;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> regions_;
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> getRegionsList() {
- return regions_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>
- getRegionsOrBuilderList() {
- return regions_;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public int getRegionsCount() {
- return regions_.size();
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
- return regions_.get(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder getRegionsOrBuilder(
- int index) {
- return regions_.get(index);
- }
-
- private void initFields() {
- name_ = "";
- regions_ = java.util.Collections.emptyList();
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasName()) {
- memoizedIsInitialized = 0;
- return false;
- }
- for (int i = 0; i < getRegionsCount(); i++) {
- if (!getRegions(i).isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getNameBytes());
- }
- for (int i = 0; i < regions_.size(); i++) {
- output.writeMessage(2, regions_.get(i));
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getNameBytes());
- }
- for (int i = 0; i < regions_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, regions_.get(i));
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfoOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Builder.class);
- }
-
- // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getRegionsFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- if (regionsBuilder_ == null) {
- regions_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- regionsBuilder_.clear();
- }
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance();
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo build() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo buildPartial() {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (regionsBuilder_ == null) {
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- regions_ = java.util.Collections.unmodifiableList(regions_);
- bitField0_ = (bitField0_ & ~0x00000002);
- }
- result.regions_ = regions_;
- } else {
- result.regions_ = regionsBuilder_.build();
- }
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo) {
- return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo other) {
- if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance()) return this;
- if (other.hasName()) {
- bitField0_ |= 0x00000001;
- name_ = other.name_;
- onChanged();
- }
- if (regionsBuilder_ == null) {
- if (!other.regions_.isEmpty()) {
- if (regions_.isEmpty()) {
- regions_ = other.regions_;
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- ensureRegionsIsMutable();
- regions_.addAll(other.regions_);
- }
- onChanged();
- }
- } else {
- if (!other.regions_.isEmpty()) {
- if (regionsBuilder_.isEmpty()) {
- regionsBuilder_.dispose();
- regionsBuilder_ = null;
- regions_ = other.regions_;
- bitField0_ = (bitField0_ & ~0x00000002);
- regionsBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getRegionsFieldBuilder() : null;
- } else {
- regionsBuilder_.addAllMessages(other.regions_);
- }
- }
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasName()) {
-
- return false;
- }
- for (int i = 0; i < getRegionsCount(); i++) {
- if (!getRegions(i).isInitialized()) {
-
- return false;
- }
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // required string name = 1;
- private java.lang.Object name_ = "";
- /**
- * <code>required string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- name_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder setName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder clearName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- name_ = getDefaultInstance().getName();
- onChanged();
- return this;
- }
- /**
- * <code>required string name = 1;</code>
- */
- public Builder setNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
-
- // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
- private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> regions_ =
- java.util.Collections.emptyList();
- private void ensureRegionsIsMutable() {
- if (!((bitField0_ & 0x00000002) == 0x00000002)) {
- regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region>(regions_);
- bitField0_ |= 0x00000002;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder> regionsBuilder_;
-
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> getRegionsList() {
- if (regionsBuilder_ == null) {
- return java.util.Collections.unmodifiableList(regions_);
- } else {
- return regionsBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public int getRegionsCount() {
- if (regionsBuilder_ == null) {
- return regions_.size();
- } else {
- return regionsBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
- if (regionsBuilder_ == null) {
- return regions_.get(index);
- } else {
- return regionsBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public Builder setRegions(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
- if (regionsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureRegionsIsMutable();
- regions_.set(index, value);
- onChanged();
- } else {
- regionsBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public Builder setRegions(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
- if (regionsBuilder_ == null) {
- ensureRegionsIsMutable();
- regions_.set(index, builderForValue.build());
- onChanged();
- } else {
- regionsBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public Builder addRegions(org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
- if (regionsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureRegionsIsMutable();
- regions_.add(value);
- onChanged();
- } else {
- regionsBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public Builder addRegions(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
- if (regionsBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureRegionsIsMutable();
- regions_.add(index, value);
- onChanged();
- } else {
- regionsBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public Builder addRegions(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
- if (regionsBuilder_ == null) {
- ensureRegionsIsMutable();
- regions_.add(builderForValue.build());
- onChanged();
- } else {
- regionsBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public Builder addRegions(
- int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
- if (regionsBuilder_ == null) {
- ensureRegionsIsMutable();
- regions_.add(index, builderForValue.build());
- onChanged();
- } else {
- regionsBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public Builder addAllRegions(
- java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region> values) {
- if (regionsBuilder_ == null) {
- ensureRegionsIsMutable();
- super.addAll(values, regions_);
- onChanged();
- } else {
- regionsBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public Builder clearRegions() {
- if (regionsBuilder_ == null) {
- regions_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000002);
- onChanged();
- } else {
- regionsBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public Builder removeRegions(int index) {
- if (regionsBuilder_ == null) {
- ensureRegionsIsMutable();
- regions_.remove(index);
- onChanged();
- } else {
- regionsBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder getRegionsBuilder(
- int index) {
- return getRegionsFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder getRegionsOrBuilder(
- int index) {
- if (regionsBuilder_ == null) {
- return regions_.get(index); } else {
- return regionsBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>
- getRegionsOrBuilderList() {
- if (regionsBuilder_ != null) {
- return regionsBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(regions_);
- }
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder addRegionsBuilder() {
- return getRegionsFieldBuilder().addBuilder(
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder addRegionsBuilder(
- int index) {
- return getRegionsFieldBuilder().addBuilder(
- index, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance());
- }
- /**
- * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;</code>
- */
- public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder>
- getRegionsBuilderList() {
- return getRegionsFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>
- getRegionsFieldBuilder() {
- if (regionsBuilder_ == null) {
- regionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.RegionOrBuilder>(
- regions_,
- ((bitField0_ & 0x00000002) == 0x00000002),
- getParentForChildren(),
- isClean());
- regions_ = null;
- }
- return regionsBuilder_;
- }
-
- // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo)
- }
-
- static {
- defaultInstance = new TableInfo(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo)
- }
-
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\026TableInfoMessage.proto\022/org.apache.had" +
- "oop.hbase.rest.protobuf.generated\"\305\001\n\tTa" +
- "bleInfo\022\014\n\004name\030\001 \002(\t\022R\n\007regions\030\002 \003(\0132A" +
- ".org.apache.hadoop.hbase.rest.protobuf.g" +
- "enerated.TableInfo.Region\032V\n\006Region\022\014\n\004n" +
- "ame\030\001 \002(\t\022\020\n\010startKey\030\002 \001(\014\022\016\n\006endKey\030\003 " +
- "\001(\014\022\n\n\002id\030\004 \001(\003\022\020\n\010location\030\005 \001(\t"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor,
- new java.lang.String[] { "Name", "Regions", });
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor =
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor.getNestedTypes().get(0);
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_Region_descriptor,
- new java.lang.String[] { "Name", "StartKey", "EndKey", "Id", "Location", });
- return null;
- }
- };
- com.google.protobuf.Descriptors.FileDescriptor
- .internalBuildGeneratedFileFrom(descriptorData,
- new com.google.protobuf.Descriptors.FileDescriptor[] {
- }, assigner);
- }
-
- // @@protoc_insertion_point(outer_class_scope)
-}
[34/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
new file mode 100644
index 0000000..4321a8e
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
@@ -0,0 +1,78 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import org.codehaus.jackson.annotate.JsonValue;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlValue;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Simple representation of the version of the storage cluster
+ *
+ * <pre>
+ * <complexType name="StorageClusterVersion">
+ * <attribute name="version" type="string"></attribute>
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="ClusterVersion")
+@InterfaceAudience.Private
+public class StorageClusterVersionModel implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private String version;
+
+ /**
+ * @return the storage cluster version
+ */
+ @XmlValue
+ public String getVersion() {
+ return version;
+ }
+
+ /**
+ * @param version the storage cluster version
+ */
+ public void setVersion(String version) {
+ this.version = version;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @JsonValue
+ @Override
+ public String toString() {
+ return version;
+ }
+
+ //needed for jackson deserialization
+ private static StorageClusterVersionModel valueOf(String value) {
+ StorageClusterVersionModel versionModel
+ = new StorageClusterVersionModel();
+ versionModel.setVersion(value);
+ return versionModel;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
new file mode 100644
index 0000000..700e766
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
@@ -0,0 +1,159 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo;
+
+/**
+ * Representation of a list of table regions.
+ *
+ * <pre>
+ * <complexType name="TableInfo">
+ * <sequence>
+ * <element name="region" type="tns:TableRegion"
+ * maxOccurs="unbounded" minOccurs="1"></element>
+ * </sequence>
+ * <attribute name="name" type="string"></attribute>
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="TableInfo")
+@InterfaceAudience.Private
+public class TableInfoModel implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+
+ private String name;
+ private List<TableRegionModel> regions = new ArrayList<TableRegionModel>();
+
+ /**
+ * Default constructor
+ */
+ public TableInfoModel() {}
+
+ /**
+ * Constructor
+ * @param name
+ */
+ public TableInfoModel(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Add a region model to the list
+ * @param region the region
+ */
+ public void add(TableRegionModel region) {
+ regions.add(region);
+ }
+
+ /**
+ * @param index the index
+ * @return the region model
+ */
+ public TableRegionModel get(int index) {
+ return regions.get(index);
+ }
+
+ /**
+ * @return the table name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the regions
+ */
+ @XmlElement(name="Region")
+ public List<TableRegionModel> getRegions() {
+ return regions;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param regions the regions to set
+ */
+ public void setRegions(List<TableRegionModel> regions) {
+ this.regions = regions;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for(TableRegionModel aRegion : regions) {
+ sb.append(aRegion.toString());
+ sb.append('\n');
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableInfo.Builder builder = TableInfo.newBuilder();
+ builder.setName(name);
+ for (TableRegionModel aRegion: regions) {
+ TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder();
+ regionBuilder.setName(aRegion.getName());
+ regionBuilder.setId(aRegion.getId());
+ regionBuilder.setStartKey(ByteStringer.wrap(aRegion.getStartKey()));
+ regionBuilder.setEndKey(ByteStringer.wrap(aRegion.getEndKey()));
+ regionBuilder.setLocation(aRegion.getLocation());
+ builder.addRegions(regionBuilder);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableInfo.Builder builder = TableInfo.newBuilder();
+ builder.mergeFrom(message);
+ setName(builder.getName());
+ for (TableInfo.Region region: builder.getRegionsList()) {
+ add(new TableRegionModel(builder.getName(), region.getId(),
+ region.getStartKey().toByteArray(),
+ region.getEndKey().toByteArray(),
+ region.getLocation()));
+ }
+ return this;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
new file mode 100644
index 0000000..596adac
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
@@ -0,0 +1,113 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlElementRef;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList;
+
+/**
+ * Simple representation of a list of table names.
+ */
+@XmlRootElement(name="TableList")
+@InterfaceAudience.Private
+public class TableListModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ private List<TableModel> tables = new ArrayList<TableModel>();
+
+ /**
+ * Default constructor
+ */
+ public TableListModel() {}
+
+ /**
+ * Add the table name model to the list
+ * @param table the table model
+ */
+ public void add(TableModel table) {
+ tables.add(table);
+ }
+
+ /**
+ * @param index the index
+ * @return the table model
+ */
+ public TableModel get(int index) {
+ return tables.get(index);
+ }
+
+ /**
+ * @return the tables
+ */
+ @XmlElementRef(name="table")
+ public List<TableModel> getTables() {
+ return tables;
+ }
+
+ /**
+ * @param tables the tables to set
+ */
+ public void setTables(List<TableModel> tables) {
+ this.tables = tables;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for(TableModel aTable : tables) {
+ sb.append(aTable.toString());
+ sb.append('\n');
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableList.Builder builder = TableList.newBuilder();
+ for (TableModel aTable : tables) {
+ builder.addName(aTable.getName());
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableList.Builder builder = TableList.newBuilder();
+ builder.mergeFrom(message);
+ for (String table: builder.getNameList()) {
+ this.add(new TableModel(table));
+ }
+ return this;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
new file mode 100644
index 0000000..0fb0d6e
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
@@ -0,0 +1,84 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Simple representation of a table name.
+ *
+ * <pre>
+ * <complexType name="Table">
+ * <sequence>
+ * <element name="name" type="string"></element>
+ * </sequence>
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="table")
+@InterfaceAudience.Private
+public class TableModel implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ private String name;
+
+ /**
+ * Default constructor
+ */
+ public TableModel() {}
+
+ /**
+ * Constructor
+ * @param name
+ */
+ public TableModel(String name) {
+ super();
+ this.name = name;
+ }
+
+ /**
+ * @return the name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @param name the name to set
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return this.name;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
new file mode 100644
index 0000000..d9b2b65
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
@@ -0,0 +1,196 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Representation of a region of a table and its current location on the
+ * storage cluster.
+ *
+ * <pre>
+ * <complexType name="TableRegion">
+ * <attribute name="name" type="string"></attribute>
+ * <attribute name="id" type="int"></attribute>
+ * <attribute name="startKey" type="base64Binary"></attribute>
+ * <attribute name="endKey" type="base64Binary"></attribute>
+ * <attribute name="location" type="string"></attribute>
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="Region")
+@InterfaceAudience.Private
+public class TableRegionModel implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ private String table;
+ private long id;
+ private byte[] startKey;
+ private byte[] endKey;
+ private String location;
+
+ /**
+ * Constructor
+ */
+ public TableRegionModel() {}
+
+ /**
+ * Constructor
+ * @param table the table name
+ * @param id the encoded id of the region
+ * @param startKey the start key of the region
+ * @param endKey the end key of the region
+ */
+ public TableRegionModel(String table, long id, byte[] startKey,
+ byte[] endKey) {
+ this(table, id, startKey, endKey, null);
+ }
+
+ /**
+ * Constructor
+ * @param table the table name
+ * @param id the encoded id of the region
+ * @param startKey the start key of the region
+ * @param endKey the end key of the region
+ * @param location the name and port of the region server hosting the region
+ */
+ public TableRegionModel(String table, long id, byte[] startKey,
+ byte[] endKey, String location) {
+ this.table = table;
+ this.id = id;
+ this.startKey = startKey;
+ this.endKey = endKey;
+ this.location = location;
+ }
+
+ /**
+ * @return the region name
+ */
+ @XmlAttribute
+ public String getName() {
+ byte [] tableNameAsBytes = Bytes.toBytes(this.table);
+ TableName tableName = TableName.valueOf(tableNameAsBytes);
+ byte [] nameAsBytes = HRegionInfo.createRegionName(
+ tableName, this.startKey, this.id, !tableName.isSystemTable());
+ return Bytes.toString(nameAsBytes);
+ }
+
+ /**
+ * @return the encoded region id
+ */
+ @XmlAttribute
+ public long getId() {
+ return id;
+ }
+
+ /**
+ * @return the start key
+ */
+ @XmlAttribute
+ public byte[] getStartKey() {
+ return startKey;
+ }
+
+ /**
+ * @return the end key
+ */
+ @XmlAttribute
+ public byte[] getEndKey() {
+ return endKey;
+ }
+
+ /**
+ * @return the name and port of the region server hosting the region
+ */
+ @XmlAttribute
+ public String getLocation() {
+ return location;
+ }
+
+ /**
+ * @param name region printable name
+ */
+ public void setName(String name) {
+ String split[] = name.split(",");
+ this.table = split[0];
+ this.startKey = Bytes.toBytes(split[1]);
+ String tail = split[2];
+ split = tail.split("\\.");
+ id = Long.valueOf(split[0]);
+ }
+
+ /**
+ * @param id the region's encoded id
+ */
+ public void setId(long id) {
+ this.id = id;
+ }
+
+ /**
+ * @param startKey the start key
+ */
+ public void setStartKey(byte[] startKey) {
+ this.startKey = startKey;
+ }
+
+ /**
+ * @param endKey the end key
+ */
+ public void setEndKey(byte[] endKey) {
+ this.endKey = endKey;
+ }
+
+ /**
+ * @param location the name and port of the region server hosting the region
+ */
+ public void setLocation(String location) {
+ this.location = location;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(getName());
+ sb.append(" [\n id=");
+ sb.append(id);
+ sb.append("\n startKey='");
+ sb.append(Bytes.toString(startKey));
+ sb.append("'\n endKey='");
+ sb.append(Bytes.toString(endKey));
+ if (location != null) {
+ sb.append("'\n location='");
+ sb.append(location);
+ }
+ sb.append("'\n]\n");
+ return sb.toString();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
new file mode 100644
index 0000000..d843e79
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
@@ -0,0 +1,361 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAnyAttribute;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.namespace.QName;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema;
+import org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.codehaus.jackson.annotate.JsonAnyGetter;
+import org.codehaus.jackson.annotate.JsonAnySetter;
+import org.codehaus.jackson.annotate.JsonIgnore;
+
+/**
+ * A representation of HBase table descriptors.
+ *
+ * <pre>
+ * <complexType name="TableSchema">
+ * <sequence>
+ * <element name="column" type="tns:ColumnSchema"
+ * maxOccurs="unbounded" minOccurs="1"></element>
+ * </sequence>
+ * <attribute name="name" type="string"></attribute>
+ * <anyAttribute></anyAttribute>
+ * </complexType>
+ * </pre>
+ */
+@XmlRootElement(name="TableSchema")
+@InterfaceAudience.Private
+public class TableSchemaModel implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+ private static final QName IS_META = new QName(HTableDescriptor.IS_META);
+ private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT);
+ private static final QName READONLY = new QName(HTableDescriptor.READONLY);
+ private static final QName TTL = new QName(HColumnDescriptor.TTL);
+ private static final QName VERSIONS = new QName(HConstants.VERSIONS);
+ private static final QName COMPRESSION =
+ new QName(HColumnDescriptor.COMPRESSION);
+
+ private String name;
+ private Map<QName,Object> attrs = new LinkedHashMap<QName,Object>();
+ private List<ColumnSchemaModel> columns = new ArrayList<ColumnSchemaModel>();
+
+ /**
+ * Default constructor.
+ */
+ public TableSchemaModel() {}
+
+ /**
+ * Constructor
+ * @param htd the table descriptor
+ */
+ public TableSchemaModel(HTableDescriptor htd) {
+ setName(htd.getTableName().getNameAsString());
+ for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+ htd.getValues().entrySet()) {
+ addAttribute(Bytes.toString(e.getKey().get()),
+ Bytes.toString(e.getValue().get()));
+ }
+ for (HColumnDescriptor hcd: htd.getFamilies()) {
+ ColumnSchemaModel columnModel = new ColumnSchemaModel();
+ columnModel.setName(hcd.getNameAsString());
+ for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+ hcd.getValues().entrySet()) {
+ columnModel.addAttribute(Bytes.toString(e.getKey().get()),
+ Bytes.toString(e.getValue().get()));
+ }
+ addColumnFamily(columnModel);
+ }
+ }
+
+ /**
+ * Add an attribute to the table descriptor
+ * @param name attribute name
+ * @param value attribute value
+ */
+ @JsonAnySetter
+ public void addAttribute(String name, Object value) {
+ attrs.put(new QName(name), value);
+ }
+
+ /**
+ * Return a table descriptor value as a string. Calls toString() on the
+ * object stored in the descriptor value map.
+ * @param name the attribute name
+ * @return the attribute value
+ */
+ public String getAttribute(String name) {
+ Object o = attrs.get(new QName(name));
+ return o != null ? o.toString() : null;
+ }
+
+ /**
+ * Add a column family to the table descriptor
+ * @param family the column family model
+ */
+ public void addColumnFamily(ColumnSchemaModel family) {
+ columns.add(family);
+ }
+
+ /**
+ * Retrieve the column family at the given index from the table descriptor
+ * @param index the index
+ * @return the column family model
+ */
+ public ColumnSchemaModel getColumnFamily(int index) {
+ return columns.get(index);
+ }
+
+ /**
+ * @return the table name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the map for holding unspecified (user) attributes
+ */
+ @XmlAnyAttribute
+ @JsonAnyGetter
+ public Map<QName,Object> getAny() {
+ return attrs;
+ }
+
+ /**
+ * @return the columns
+ */
+ @XmlElement(name="ColumnSchema")
+ public List<ColumnSchemaModel> getColumns() {
+ return columns;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param columns the columns to set
+ */
+ public void setColumns(List<ColumnSchemaModel> columns) {
+ this.columns = columns;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ NAME=> '");
+ sb.append(name);
+ sb.append('\'');
+ for (Map.Entry<QName,Object> e: attrs.entrySet()) {
+ sb.append(", ");
+ sb.append(e.getKey().getLocalPart());
+ sb.append(" => '");
+ sb.append(e.getValue().toString());
+ sb.append('\'');
+ }
+ sb.append(", COLUMNS => [ ");
+ Iterator<ColumnSchemaModel> i = columns.iterator();
+ while (i.hasNext()) {
+ ColumnSchemaModel family = i.next();
+ sb.append(family.toString());
+ if (i.hasNext()) {
+ sb.append(',');
+ }
+ sb.append(' ');
+ }
+ sb.append("] }");
+ return sb.toString();
+ }
+
+ // getters and setters for common schema attributes
+
+ // cannot be standard bean type getters and setters, otherwise this would
+ // confuse JAXB
+
+ /**
+ * @return true if IS_META attribute exists and is truel
+ */
+ public boolean __getIsMeta() {
+ Object o = attrs.get(IS_META);
+ return o != null ? Boolean.valueOf(o.toString()) : false;
+ }
+
+ /**
+ * @return true if IS_ROOT attribute exists and is truel
+ */
+ public boolean __getIsRoot() {
+ Object o = attrs.get(IS_ROOT);
+ return o != null ? Boolean.valueOf(o.toString()) : false;
+ }
+
+ /**
+ * @return true if READONLY attribute exists and is truel
+ */
+ public boolean __getReadOnly() {
+ Object o = attrs.get(READONLY);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_READONLY;
+ }
+
+ /**
+ * @param value desired value of IS_META attribute
+ */
+ public void __setIsMeta(boolean value) {
+ attrs.put(IS_META, Boolean.toString(value));
+ }
+
+ /**
+ * @param value desired value of IS_ROOT attribute
+ */
+ public void __setIsRoot(boolean value) {
+ attrs.put(IS_ROOT, Boolean.toString(value));
+ }
+
+ /**
+ * @param value desired value of READONLY attribute
+ */
+ public void __setReadOnly(boolean value) {
+ attrs.put(READONLY, Boolean.toString(value));
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ builder.setName(name);
+ for (Map.Entry<QName, Object> e: attrs.entrySet()) {
+ TableSchema.Attribute.Builder attrBuilder =
+ TableSchema.Attribute.newBuilder();
+ attrBuilder.setName(e.getKey().getLocalPart());
+ attrBuilder.setValue(e.getValue().toString());
+ builder.addAttrs(attrBuilder);
+ }
+ for (ColumnSchemaModel family: columns) {
+ Map<QName, Object> familyAttrs = family.getAny();
+ ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder();
+ familyBuilder.setName(family.getName());
+ for (Map.Entry<QName, Object> e: familyAttrs.entrySet()) {
+ ColumnSchema.Attribute.Builder attrBuilder =
+ ColumnSchema.Attribute.newBuilder();
+ attrBuilder.setName(e.getKey().getLocalPart());
+ attrBuilder.setValue(e.getValue().toString());
+ familyBuilder.addAttrs(attrBuilder);
+ }
+ if (familyAttrs.containsKey(TTL)) {
+ familyBuilder.setTtl(
+ Integer.valueOf(familyAttrs.get(TTL).toString()));
+ }
+ if (familyAttrs.containsKey(VERSIONS)) {
+ familyBuilder.setMaxVersions(
+ Integer.valueOf(familyAttrs.get(VERSIONS).toString()));
+ }
+ if (familyAttrs.containsKey(COMPRESSION)) {
+ familyBuilder.setCompression(familyAttrs.get(COMPRESSION).toString());
+ }
+ builder.addColumns(familyBuilder);
+ }
+ if (attrs.containsKey(READONLY)) {
+ builder.setReadOnly(
+ Boolean.valueOf(attrs.get(READONLY).toString()));
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ builder.mergeFrom(message);
+ this.setName(builder.getName());
+ for (TableSchema.Attribute attr: builder.getAttrsList()) {
+ this.addAttribute(attr.getName(), attr.getValue());
+ }
+ if (builder.hasReadOnly()) {
+ this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly());
+ }
+ for (ColumnSchema family: builder.getColumnsList()) {
+ ColumnSchemaModel familyModel = new ColumnSchemaModel();
+ familyModel.setName(family.getName());
+ for (ColumnSchema.Attribute attr: family.getAttrsList()) {
+ familyModel.addAttribute(attr.getName(), attr.getValue());
+ }
+ if (family.hasTtl()) {
+ familyModel.addAttribute(HColumnDescriptor.TTL, family.getTtl());
+ }
+ if (family.hasMaxVersions()) {
+ familyModel.addAttribute(HConstants.VERSIONS,
+ family.getMaxVersions());
+ }
+ if (family.hasCompression()) {
+ familyModel.addAttribute(HColumnDescriptor.COMPRESSION,
+ family.getCompression());
+ }
+ this.addColumnFamily(familyModel);
+ }
+ return this;
+ }
+
+ /**
+ * @return a table descriptor
+ */
+ @JsonIgnore
+ public HTableDescriptor getTableDescriptor() {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(getName()));
+ for (Map.Entry<QName, Object> e: getAny().entrySet()) {
+ htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ for (ColumnSchemaModel column: getColumns()) {
+ HColumnDescriptor hcd = new HColumnDescriptor(column.getName());
+ for (Map.Entry<QName, Object> e: column.getAny().entrySet()) {
+ hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ htd.addFamily(hcd);
+ }
+ return htd;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
new file mode 100644
index 0000000..0938803
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
@@ -0,0 +1,209 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import javax.servlet.ServletContext;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.RESTServlet;
+import org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version;
+
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
+/**
+ * A representation of the collection of versions of the REST gateway software
+ * components.
+ * <ul>
+ * <li>restVersion: REST gateway revision</li>
+ * <li>jvmVersion: the JVM vendor and version information</li>
+ * <li>osVersion: the OS type, version, and hardware architecture</li>
+ * <li>serverVersion: the name and version of the servlet container</li>
+ * <li>jerseyVersion: the version of the embedded Jersey framework</li>
+ * </ul>
+ */
+@XmlRootElement(name="Version")
+@InterfaceAudience.Private
+public class VersionModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ private String restVersion;
+ private String jvmVersion;
+ private String osVersion;
+ private String serverVersion;
+ private String jerseyVersion;
+
+ /**
+ * Default constructor. Do not use.
+ */
+ public VersionModel() {}
+
+ /**
+ * Constructor
+ * @param context the servlet context
+ */
+ public VersionModel(ServletContext context) {
+ restVersion = RESTServlet.VERSION_STRING;
+ jvmVersion = System.getProperty("java.vm.vendor") + ' ' +
+ System.getProperty("java.version") + '-' +
+ System.getProperty("java.vm.version");
+ osVersion = System.getProperty("os.name") + ' ' +
+ System.getProperty("os.version") + ' ' +
+ System.getProperty("os.arch");
+ serverVersion = context.getServerInfo();
+ jerseyVersion = ServletContainer.class.getPackage()
+ .getImplementationVersion();
+ }
+
+ /**
+ * @return the REST gateway version
+ */
+ @XmlAttribute(name="REST")
+ public String getRESTVersion() {
+ return restVersion;
+ }
+
+ /**
+ * @return the JVM vendor and version
+ */
+ @XmlAttribute(name="JVM")
+ public String getJVMVersion() {
+ return jvmVersion;
+ }
+
+ /**
+ * @return the OS name, version, and hardware architecture
+ */
+ @XmlAttribute(name="OS")
+ public String getOSVersion() {
+ return osVersion;
+ }
+
+ /**
+ * @return the servlet container version
+ */
+ @XmlAttribute(name="Server")
+ public String getServerVersion() {
+ return serverVersion;
+ }
+
+ /**
+ * @return the version of the embedded Jersey framework
+ */
+ @XmlAttribute(name="Jersey")
+ public String getJerseyVersion() {
+ return jerseyVersion;
+ }
+
+ /**
+ * @param version the REST gateway version string
+ */
+ public void setRESTVersion(String version) {
+ this.restVersion = version;
+ }
+
+ /**
+ * @param version the OS version string
+ */
+ public void setOSVersion(String version) {
+ this.osVersion = version;
+ }
+
+ /**
+ * @param version the JVM version string
+ */
+ public void setJVMVersion(String version) {
+ this.jvmVersion = version;
+ }
+
+ /**
+ * @param version the servlet container version string
+ */
+ public void setServerVersion(String version) {
+ this.serverVersion = version;
+ }
+
+ /**
+ * @param version the Jersey framework version string
+ */
+ public void setJerseyVersion(String version) {
+ this.jerseyVersion = version;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("rest ");
+ sb.append(restVersion);
+ sb.append(" [JVM: ");
+ sb.append(jvmVersion);
+ sb.append("] [OS: ");
+ sb.append(osVersion);
+ sb.append("] [Server: ");
+ sb.append(serverVersion);
+ sb.append("] [Jersey: ");
+ sb.append(jerseyVersion);
+ sb.append("]\n");
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Version.Builder builder = Version.newBuilder();
+ builder.setRestVersion(restVersion);
+ builder.setJvmVersion(jvmVersion);
+ builder.setOsVersion(osVersion);
+ builder.setServerVersion(serverVersion);
+ builder.setJerseyVersion(jerseyVersion);
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ Version.Builder builder = Version.newBuilder();
+ builder.mergeFrom(message);
+ if (builder.hasRestVersion()) {
+ restVersion = builder.getRestVersion();
+ }
+ if (builder.hasJvmVersion()) {
+ jvmVersion = builder.getJvmVersion();
+ }
+ if (builder.hasOsVersion()) {
+ osVersion = builder.getOsVersion();
+ }
+ if (builder.hasServerVersion()) {
+ serverVersion = builder.getServerVersion();
+ }
+ if (builder.hasJerseyVersion()) {
+ jerseyVersion = builder.getJerseyVersion();
+ }
+ return this;
+ }
+}
[23/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java
new file mode 100644
index 0000000..c14f3e2
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java
@@ -0,0 +1,585 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.net.URLEncoder;
+import java.util.List;
+
+import javax.xml.bind.JAXBException;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.hadoop.hbase.CompatibilityFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.test.MetricsAssertHelper;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestGetAndPutResource extends RowResourceBase {
+
+ private static final MetricsAssertHelper METRICS_ASSERT =
+ CompatibilityFactory.getInstance(MetricsAssertHelper.class);
+
+ @Test
+ public void testForbidden() throws IOException, JAXBException {
+ conf.set("hbase.rest.readonly", "true");
+
+ Response response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 403);
+ response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 403);
+ response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_2);
+ assertEquals(response.getCode(), 403);
+ response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_2);
+ assertEquals(response.getCode(), 403);
+ response = deleteValue(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 403);
+ response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 403);
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 403);
+
+ conf.set("hbase.rest.readonly", "false");
+
+ response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_2);
+ assertEquals(response.getCode(), 200);
+ response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2, VALUE_3);
+ assertEquals(response.getCode(), 200);
+ response = deleteValue(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 200);
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testSingleCellGetPutXML() throws IOException, JAXBException {
+ Response response = getValueXML(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+
+ response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2);
+ assertEquals(response.getCode(), 200);
+ checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2);
+ response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2, VALUE_3);
+ assertEquals(response.getCode(), 200);
+ checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_3);
+ response = checkAndDeleteXML(TABLE, ROW_1, COLUMN_1, VALUE_3);
+ assertEquals(response.getCode(), 200);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testSingleCellGetPutPB() throws IOException, JAXBException {
+ Response response = getValuePB(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+
+ response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2);
+ assertEquals(response.getCode(), 200);
+ checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2);
+
+ response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2, VALUE_3);
+ assertEquals(response.getCode(), 200);
+ checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_3);
+ response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_3, VALUE_4);
+ assertEquals(response.getCode(), 200);
+ checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_4);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testSingleCellGetPutBinary() throws IOException {
+ final String path = "/" + TABLE + "/" + ROW_3 + "/" + COLUMN_1;
+ final byte[] body = Bytes.toBytes(VALUE_3);
+ Response response = client.put(path, Constants.MIMETYPE_BINARY, body);
+ assertEquals(response.getCode(), 200);
+ Thread.yield();
+
+ response = client.get(path, Constants.MIMETYPE_BINARY);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type"));
+ assertTrue(Bytes.equals(response.getBody(), body));
+ boolean foundTimestampHeader = false;
+ for (Header header: response.getHeaders()) {
+ if (header.getName().equals("X-Timestamp")) {
+ foundTimestampHeader = true;
+ break;
+ }
+ }
+ assertTrue(foundTimestampHeader);
+
+ response = deleteRow(TABLE, ROW_3);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testSingleCellGetJSON() throws IOException, JAXBException {
+ final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
+ Response response = client.put(path, Constants.MIMETYPE_BINARY,
+ Bytes.toBytes(VALUE_4));
+ assertEquals(response.getCode(), 200);
+ Thread.yield();
+ response = client.get(path, Constants.MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ response = deleteRow(TABLE, ROW_4);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testLatestCellGetJSON() throws IOException, JAXBException {
+ final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_4);
+ CellModel cellOne = new CellModel(Bytes.toBytes(COLUMN_1), 1L,
+ Bytes.toBytes(VALUE_1));
+ CellModel cellTwo = new CellModel(Bytes.toBytes(COLUMN_1), 2L,
+ Bytes.toBytes(VALUE_2));
+ rowModel.addCell(cellOne);
+ rowModel.addCell(cellTwo);
+ cellSetModel.addRow(rowModel);
+ String jsonString = jsonMapper.writeValueAsString(cellSetModel);
+ Response response = client.put(path, Constants.MIMETYPE_JSON,
+ Bytes.toBytes(jsonString));
+ assertEquals(response.getCode(), 200);
+ Thread.yield();
+ response = client.get(path, Constants.MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ CellSetModel cellSet = jsonMapper.readValue(response.getBody(), CellSetModel.class);
+ assertTrue(cellSet.getRows().size() == 1);
+ assertTrue(cellSet.getRows().get(0).getCells().size() == 1);
+ CellModel cell = cellSet.getRows().get(0).getCells().get(0);
+ assertEquals(VALUE_2 , Bytes.toString(cell.getValue()));
+ assertEquals(2L , cell.getTimestamp());
+ response = deleteRow(TABLE, ROW_4);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testURLEncodedKey() throws IOException, JAXBException {
+ String urlKey = "http://example.com/foo";
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(TABLE);
+ path.append('/');
+ path.append(URLEncoder.encode(urlKey, HConstants.UTF8_ENCODING));
+ path.append('/');
+ path.append(COLUMN_1);
+ Response response;
+ response = putValueXML(path.toString(), TABLE, urlKey, COLUMN_1,
+ VALUE_1);
+ assertEquals(response.getCode(), 200);
+ checkValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1);
+ }
+
+ @Test
+ public void testNoSuchCF() throws IOException, JAXBException {
+ final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA+":";
+ final String badPath = "/" + TABLE + "/" + ROW_1 + "/" + "BAD";
+ Response response = client.post(goodPath, Constants.MIMETYPE_BINARY,
+ Bytes.toBytes(VALUE_1));
+ assertEquals(response.getCode(), 200);
+ assertEquals(client.get(goodPath, Constants.MIMETYPE_BINARY).getCode(),
+ 200);
+ assertEquals(client.get(badPath, Constants.MIMETYPE_BINARY).getCode(),
+ 404);
+ assertEquals(client.get(goodPath, Constants.MIMETYPE_BINARY).getCode(),
+ 200);
+ }
+
+ @Test
+ public void testMultiCellGetPutXML() throws IOException, JAXBException {
+ String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
+
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_1)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_2)));
+ cellSetModel.addRow(rowModel);
+ rowModel = new RowModel(ROW_2);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_3)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_4)));
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ xmlMarshaller.marshal(cellSetModel, writer);
+ Response response = client.put(path, Constants.MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ Thread.yield();
+
+ // make sure the fake row was not actually created
+ response = client.get(path, Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 404);
+
+ // check that all of the values were created
+ checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
+ checkValueXML(TABLE, ROW_2, COLUMN_1, VALUE_3);
+ checkValueXML(TABLE, ROW_2, COLUMN_2, VALUE_4);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ response = deleteRow(TABLE, ROW_2);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testMultiCellGetPutPB() throws IOException {
+ String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
+
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_1)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_2)));
+ cellSetModel.addRow(rowModel);
+ rowModel = new RowModel(ROW_2);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_3)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_4)));
+ cellSetModel.addRow(rowModel);
+ Response response = client.put(path, Constants.MIMETYPE_PROTOBUF,
+ cellSetModel.createProtobufOutput());
+ Thread.yield();
+
+ // make sure the fake row was not actually created
+ response = client.get(path, Constants.MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 404);
+
+ // check that all of the values were created
+ checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2);
+ checkValuePB(TABLE, ROW_2, COLUMN_1, VALUE_3);
+ checkValuePB(TABLE, ROW_2, COLUMN_2, VALUE_4);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ response = deleteRow(TABLE, ROW_2);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testStartEndRowGetPutXML() throws IOException, JAXBException {
+ String[] rows = { ROW_1, ROW_2, ROW_3 };
+ String[] values = { VALUE_1, VALUE_2, VALUE_3 };
+ Response response = null;
+ for (int i = 0; i < rows.length; i++) {
+ response = putValueXML(TABLE, rows[i], COLUMN_1, values[i]);
+ assertEquals(200, response.getCode());
+ checkValueXML(TABLE, rows[i], COLUMN_1, values[i]);
+ }
+ response = getValueXML(TABLE, rows[0], rows[2], COLUMN_1);
+ assertEquals(200, response.getCode());
+ CellSetModel cellSet = (CellSetModel)
+ xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+ assertEquals(2, cellSet.getRows().size());
+ for (int i = 0; i < cellSet.getRows().size()-1; i++) {
+ RowModel rowModel = cellSet.getRows().get(i);
+ for (CellModel cell: rowModel.getCells()) {
+ assertEquals(COLUMN_1, Bytes.toString(cell.getColumn()));
+ assertEquals(values[i], Bytes.toString(cell.getValue()));
+ }
+ }
+ for (String row : rows) {
+ response = deleteRow(TABLE, row);
+ assertEquals(200, response.getCode());
+ }
+ }
+
+ @Test
+ public void testInvalidCheckParam() throws IOException, JAXBException {
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_1)));
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ xmlMarshaller.marshal(cellSetModel, writer);
+
+ final String path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1 + "?check=blah";
+
+ Response response = client.put(path, Constants.MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ assertEquals(response.getCode(), 400);
+ }
+
+ @Test
+ public void testInvalidColumnPut() throws IOException, JAXBException {
+ String dummyColumn = "doesnot:exist";
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(dummyColumn),
+ Bytes.toBytes(VALUE_1)));
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ xmlMarshaller.marshal(cellSetModel, writer);
+
+ final String path = "/" + TABLE + "/" + ROW_1 + "/" + dummyColumn;
+
+ Response response = client.put(path, Constants.MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ assertEquals(response.getCode(), 404);
+ }
+
+ @Test
+ public void testMultiCellGetJson() throws IOException, JAXBException {
+ String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
+
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_1)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_2)));
+ cellSetModel.addRow(rowModel);
+ rowModel = new RowModel(ROW_2);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_3)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_4)));
+ cellSetModel.addRow(rowModel);
+ String jsonString = jsonMapper.writeValueAsString(cellSetModel);
+
+ Response response = client.put(path, Constants.MIMETYPE_JSON,
+ Bytes.toBytes(jsonString));
+ Thread.yield();
+
+ // make sure the fake row was not actually created
+ response = client.get(path, Constants.MIMETYPE_JSON);
+ assertEquals(response.getCode(), 404);
+
+ // check that all of the values were created
+ checkValueJSON(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ checkValueJSON(TABLE, ROW_1, COLUMN_2, VALUE_2);
+ checkValueJSON(TABLE, ROW_2, COLUMN_1, VALUE_3);
+ checkValueJSON(TABLE, ROW_2, COLUMN_2, VALUE_4);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ response = deleteRow(TABLE, ROW_2);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testMetrics() throws IOException, JAXBException {
+ final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
+ Response response = client.put(path, Constants.MIMETYPE_BINARY,
+ Bytes.toBytes(VALUE_4));
+ assertEquals(response.getCode(), 200);
+ Thread.yield();
+ response = client.get(path, Constants.MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+ response = deleteRow(TABLE, ROW_4);
+ assertEquals(response.getCode(), 200);
+
+ UserProvider userProvider = UserProvider.instantiate(conf);
+ METRICS_ASSERT.assertCounterGt("requests", 2l,
+ RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
+
+ METRICS_ASSERT.assertCounterGt("successfulGet", 0l,
+ RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
+
+ METRICS_ASSERT.assertCounterGt("successfulPut", 0l,
+ RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
+
+ METRICS_ASSERT.assertCounterGt("successfulDelete", 0l,
+ RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
+ }
+
+ @Test
+ public void testMultiColumnGetXML() throws Exception {
+ String path = "/" + TABLE + "/fakerow";
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_3), Bytes.toBytes(VALUE_2)));
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ xmlMarshaller.marshal(cellSetModel, writer);
+
+ Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString()));
+ Thread.yield();
+
+ // make sure the fake row was not actually created
+ response = client.get(path, Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 404);
+
+ // Try getting all the column values at once.
+ path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1 + "," + COLUMN_2 + "," + COLUMN_3;
+ response = client.get(path, Constants.MIMETYPE_XML);
+ assertEquals(200, response.getCode());
+ CellSetModel cellSet = (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response
+ .getBody()));
+ assertTrue(cellSet.getRows().size() == 1);
+ assertTrue(cellSet.getRows().get(0).getCells().size() == 3);
+ List<CellModel> cells = cellSet.getRows().get(0).getCells();
+
+ assertTrue(containsCellModel(cells, COLUMN_1, VALUE_1));
+ assertTrue(containsCellModel(cells, COLUMN_2, VALUE_2));
+ assertTrue(containsCellModel(cells, COLUMN_3, VALUE_2));
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ }
+
+ private boolean containsCellModel(List<CellModel> cells, String column, String value) {
+ boolean contains = false;
+ for (CellModel cell : cells) {
+ if (Bytes.toString(cell.getColumn()).equals(column)
+ && Bytes.toString(cell.getValue()).equals(value)) {
+ contains = true;
+ return contains;
+ }
+ }
+ return contains;
+ }
+
+ @Test
+ public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBException {
+ String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
+
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_1)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_2)));
+ cellSetModel.addRow(rowModel);
+ rowModel = new RowModel(ROW_2);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_3)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_4)));
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ xmlMarshaller.marshal(cellSetModel, writer);
+ Response response = client.put(path, Constants.MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ Thread.yield();
+
+ // make sure the fake row was not actually created
+ response = client.get(path, Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 404);
+
+ // check that all of the values were created
+ StringBuilder query = new StringBuilder();
+ query.append('/');
+ query.append(TABLE);
+ query.append('/');
+ query.append("testrow*");
+ response = client.get(query.toString(), Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ CellSetModel cellSet = (CellSetModel)
+ xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+ assertTrue(cellSet.getRows().size() == 2);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ response = deleteRow(TABLE, ROW_2);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testSuffixGlobbingXML() throws IOException, JAXBException {
+ String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
+
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_1)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_2)));
+ cellSetModel.addRow(rowModel);
+ rowModel = new RowModel(ROW_2);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),
+ Bytes.toBytes(VALUE_3)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),
+ Bytes.toBytes(VALUE_4)));
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ xmlMarshaller.marshal(cellSetModel, writer);
+ Response response = client.put(path, Constants.MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ Thread.yield();
+
+ // make sure the fake row was not actually created
+ response = client.get(path, Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 404);
+
+ // check that all of the values were created
+ StringBuilder query = new StringBuilder();
+ query.append('/');
+ query.append(TABLE);
+ query.append('/');
+ query.append("testrow*");
+ query.append('/');
+ query.append(COLUMN_1);
+ response = client.get(query.toString(), Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ CellSetModel cellSet = (CellSetModel)
+ xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+ List<RowModel> rows = cellSet.getRows();
+ assertTrue(rows.size() == 2);
+ for (RowModel row : rows) {
+ assertTrue(row.getCells().size() == 1);
+ assertEquals(COLUMN_1, Bytes.toString(row.getCells().get(0).getColumn()));
+ }
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ response = deleteRow(TABLE, ROW_2);
+ assertEquals(response.getCode(), 200);
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java
new file mode 100644
index 0000000..23da0ec
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java
@@ -0,0 +1,161 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.util.zip.GZIPInputStream;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.filter.GzipFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestGzipFilter {
+ private static final String TABLE = "TestGzipFilter";
+ private static final String CFA = "a";
+ private static final String COLUMN_1 = CFA + ":1";
+ private static final String COLUMN_2 = CFA + ":2";
+ private static final String ROW_1 = "testrow1";
+ private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1");
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ private static Client client;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniCluster();
+ REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (admin.tableExists(TABLE)) {
+ return;
+ }
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
+ htd.addFamily(new HColumnDescriptor(CFA));
+ admin.createTable(htd);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testGzipFilter() throws Exception {
+ String path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1;
+
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ GZIPOutputStream os = new GZIPOutputStream(bos);
+ os.write(VALUE_1);
+ os.close();
+ byte[] value_1_gzip = bos.toByteArray();
+
+ // input side filter
+
+ Header[] headers = new Header[2];
+ headers[0] = new Header("Content-Type", Constants.MIMETYPE_BINARY);
+ headers[1] = new Header("Content-Encoding", "gzip");
+ Response response = client.put(path, headers, value_1_gzip);
+ assertEquals(response.getCode(), 200);
+
+ HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
+ Get get = new Get(Bytes.toBytes(ROW_1));
+ get.addColumn(Bytes.toBytes(CFA), Bytes.toBytes("1"));
+ Result result = table.get(get);
+ byte[] value = result.getValue(Bytes.toBytes(CFA), Bytes.toBytes("1"));
+ assertNotNull(value);
+ assertTrue(Bytes.equals(value, VALUE_1));
+
+ // output side filter
+
+ headers[0] = new Header("Accept", Constants.MIMETYPE_BINARY);
+ headers[1] = new Header("Accept-Encoding", "gzip");
+ response = client.get(path, headers);
+ assertEquals(response.getCode(), 200);
+ ByteArrayInputStream bis = new ByteArrayInputStream(response.getBody());
+ GZIPInputStream is = new GZIPInputStream(bis);
+ value = new byte[VALUE_1.length];
+ is.read(value, 0, VALUE_1.length);
+ assertTrue(Bytes.equals(value, VALUE_1));
+ is.close();
+ table.close();
+
+ testScannerResultCodes();
+ }
+
+ @Test
+ public void testErrorNotGzipped() throws Exception {
+ Header[] headers = new Header[2];
+ headers[0] = new Header("Accept", Constants.MIMETYPE_BINARY);
+ headers[1] = new Header("Accept-Encoding", "gzip");
+ Response response = client.get("/" + TABLE + "/" + ROW_1 + "/" + COLUMN_2, headers);
+ assertEquals(response.getCode(), 404);
+ String contentEncoding = response.getHeader("Content-Encoding");
+ assertTrue(contentEncoding == null || !contentEncoding.contains("gzip"));
+ response = client.get("/" + TABLE, headers);
+ assertEquals(response.getCode(), 405);
+ contentEncoding = response.getHeader("Content-Encoding");
+ assertTrue(contentEncoding == null || !contentEncoding.contains("gzip"));
+ }
+
+ void testScannerResultCodes() throws Exception {
+ Header[] headers = new Header[3];
+ headers[0] = new Header("Content-Type", Constants.MIMETYPE_XML);
+ headers[1] = new Header("Accept", Constants.MIMETYPE_JSON);
+ headers[2] = new Header("Accept-Encoding", "gzip");
+ Response response = client.post("/" + TABLE + "/scanner", headers,
+ "<Scanner/>".getBytes());
+ assertEquals(response.getCode(), 201);
+ String scannerUrl = response.getLocation();
+ assertNotNull(scannerUrl);
+ response = client.get(scannerUrl);
+ assertEquals(response.getCode(), 200);
+ response = client.get(scannerUrl);
+ assertEquals(response.getCode(), 204);
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
new file mode 100644
index 0000000..0c999b8
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
@@ -0,0 +1,181 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.provider.JacksonProvider;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import javax.xml.bind.Unmarshaller;
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+
+
+@Category(MediumTests.class)
+public class TestMultiRowResource {
+
+ private static final String TABLE = "TestRowResource";
+ private static final String CFA = "a";
+ private static final String CFB = "b";
+ private static final String COLUMN_1 = CFA + ":1";
+ private static final String COLUMN_2 = CFB + ":2";
+ private static final String ROW_1 = "testrow5";
+ private static final String VALUE_1 = "testvalue5";
+ private static final String ROW_2 = "testrow6";
+ private static final String VALUE_2 = "testvalue6";
+
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility();
+
+ private static Client client;
+ private static JAXBContext context;
+ private static Marshaller marshaller;
+ private static Unmarshaller unmarshaller;
+ private static Configuration conf;
+
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ TEST_UTIL.startMiniCluster();
+ REST_TEST_UTIL.startServletContainer(conf);
+ context = JAXBContext.newInstance(
+ CellModel.class,
+ CellSetModel.class,
+ RowModel.class);
+ marshaller = context.createMarshaller();
+ unmarshaller = context.createUnmarshaller();
+ client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (admin.tableExists(TABLE)) {
+ return;
+ }
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
+ htd.addFamily(new HColumnDescriptor(CFA));
+ htd.addFamily(new HColumnDescriptor(CFB));
+ admin.createTable(htd);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+
+ @Test
+ public void testMultiCellGetJSON() throws IOException, JAXBException {
+ String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1;
+ String row_6_url = "/" + TABLE + "/" + ROW_2 + "/" + COLUMN_2;
+
+
+ StringBuilder path = new StringBuilder();
+ path.append("/");
+ path.append(TABLE);
+ path.append("/multiget/?row=");
+ path.append(ROW_1);
+ path.append("&row=");
+ path.append(ROW_2);
+
+ client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1));
+ client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2));
+
+
+ Response response = client.get(path.toString(), Constants.MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
+
+ client.delete(row_5_url);
+ client.delete(row_6_url);
+
+ }
+
+ @Test
+ public void testMultiCellGetXML() throws IOException, JAXBException {
+ String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1;
+ String row_6_url = "/" + TABLE + "/" + ROW_2 + "/" + COLUMN_2;
+
+
+ StringBuilder path = new StringBuilder();
+ path.append("/");
+ path.append(TABLE);
+ path.append("/multiget/?row=");
+ path.append(ROW_1);
+ path.append("&row=");
+ path.append(ROW_2);
+
+ client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1));
+ client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2));
+
+
+ Response response = client.get(path.toString(), Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+
+ client.delete(row_5_url);
+ client.delete(row_6_url);
+
+ }
+
+ @Test
+ public void testMultiCellGetJSONNotFound() throws IOException, JAXBException {
+ String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1;
+
+ StringBuilder path = new StringBuilder();
+ path.append("/");
+ path.append(TABLE);
+ path.append("/multiget/?row=");
+ path.append(ROW_1);
+ path.append("&row=");
+ path.append(ROW_2);
+
+ client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1));
+ Response response = client.get(path.toString(), Constants.MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ ObjectMapper mapper = new JacksonProvider().locateMapper(CellSetModel.class,
+ MediaType.APPLICATION_JSON_TYPE);
+ CellSetModel cellSet = (CellSetModel) mapper.readValue(response.getBody(), CellSetModel.class);
+ assertEquals(1, cellSet.getRows().size());
+ assertEquals(ROW_1, Bytes.toString(cellSet.getRows().get(0).getKey()));
+ assertEquals(VALUE_1, Bytes.toString(cellSet.getRows().get(0).getCells().get(0).getValue()));
+ client.delete(row_5_url);
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java
new file mode 100644
index 0000000..70d425c
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestResourceFilter {
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ private static Client client;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().set(Constants.FILTER_CLASSES, DummyFilter.class.getName());
+ TEST_UTIL.startMiniCluster();
+ REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testFilter() throws Exception {
+ String path = "/status/cluster";
+ Response response = client.get(path);
+ assertEquals(404, response.getCode());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
new file mode 100644
index 0000000..84aa994
--- /dev/null
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
@@ -0,0 +1,354 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.util.Iterator;
+import java.util.Random;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.Response;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import static org.junit.Assert.*;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestScannerResource {
+ private static final String TABLE = "TestScannerResource";
+ private static final String NONEXISTENT_TABLE = "ThisTableDoesNotExist";
+ private static final String CFA = "a";
+ private static final String CFB = "b";
+ private static final String COLUMN_1 = CFA + ":1";
+ private static final String COLUMN_2 = CFB + ":2";
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final HBaseRESTTestingUtility REST_TEST_UTIL =
+ new HBaseRESTTestingUtility();
+ private static Client client;
+ private static JAXBContext context;
+ private static Marshaller marshaller;
+ private static Unmarshaller unmarshaller;
+ private static int expectedRows1;
+ private static int expectedRows2;
+ private static Configuration conf;
+
+ static int insertData(Configuration conf, String tableName, String column, double prob)
+ throws IOException {
+ Random rng = new Random();
+ int count = 0;
+ HTable table = new HTable(conf, tableName);
+ byte[] k = new byte[3];
+ byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
+ for (byte b1 = 'a'; b1 < 'z'; b1++) {
+ for (byte b2 = 'a'; b2 < 'z'; b2++) {
+ for (byte b3 = 'a'; b3 < 'z'; b3++) {
+ if (rng.nextDouble() < prob) {
+ k[0] = b1;
+ k[1] = b2;
+ k[2] = b3;
+ Put put = new Put(k);
+ put.setDurability(Durability.SKIP_WAL);
+ put.add(famAndQf[0], famAndQf[1], k);
+ table.put(put);
+ count++;
+ }
+ }
+ }
+ }
+ table.flushCommits();
+ table.close();
+ return count;
+ }
+
+ static int countCellSet(CellSetModel model) {
+ int count = 0;
+ Iterator<RowModel> rows = model.getRows().iterator();
+ while (rows.hasNext()) {
+ RowModel row = rows.next();
+ Iterator<CellModel> cells = row.getCells().iterator();
+ while (cells.hasNext()) {
+ cells.next();
+ count++;
+ }
+ }
+ return count;
+ }
+
+ private static int fullTableScan(ScannerModel model) throws IOException {
+ model.setBatch(100);
+ Response response = client.put("/" + TABLE + "/scanner",
+ Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+ int count = 0;
+ while (true) {
+ response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF);
+ assertTrue(response.getCode() == 200 || response.getCode() == 204);
+ if (response.getCode() == 200) {
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ CellSetModel cellSet = new CellSetModel();
+ cellSet.getObjectFromMessage(response.getBody());
+ Iterator<RowModel> rows = cellSet.getRows().iterator();
+ while (rows.hasNext()) {
+ RowModel row = rows.next();
+ Iterator<CellModel> cells = row.getCells().iterator();
+ while (cells.hasNext()) {
+ cells.next();
+ count++;
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ return count;
+ }
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ TEST_UTIL.startMiniCluster();
+ REST_TEST_UTIL.startServletContainer(conf);
+ client = new Client(new Cluster().add("localhost",
+ REST_TEST_UTIL.getServletPort()));
+ context = JAXBContext.newInstance(
+ CellModel.class,
+ CellSetModel.class,
+ RowModel.class,
+ ScannerModel.class);
+ marshaller = context.createMarshaller();
+ unmarshaller = context.createUnmarshaller();
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+ if (admin.tableExists(TABLE)) {
+ return;
+ }
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
+ htd.addFamily(new HColumnDescriptor(CFA));
+ htd.addFamily(new HColumnDescriptor(CFB));
+ admin.createTable(htd);
+ expectedRows1 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_1, 1.0);
+ expectedRows2 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_2, 0.5);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ REST_TEST_UTIL.shutdownServletContainer();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testSimpleScannerXML() throws IOException, JAXBException {
+ final int BATCH_SIZE = 5;
+ // new scanner
+ ScannerModel model = new ScannerModel();
+ model.setBatch(BATCH_SIZE);
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ byte[] body = Bytes.toBytes(writer.toString());
+
+ // test put operation is forbidden in read-only mode
+ conf.set("hbase.rest.readonly", "true");
+ Response response = client.put("/" + TABLE + "/scanner",
+ Constants.MIMETYPE_XML, body);
+ assertEquals(response.getCode(), 403);
+ String scannerURI = response.getLocation();
+ assertNull(scannerURI);
+
+ // recall previous put operation with read-only off
+ conf.set("hbase.rest.readonly", "false");
+ response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML,
+ body);
+ assertEquals(response.getCode(), 201);
+ scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, Constants.MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
+ CellSetModel cellSet = (CellSetModel)
+ unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+ // confirm batch size conformance
+ assertEquals(countCellSet(cellSet), BATCH_SIZE);
+
+ // test delete scanner operation is forbidden in read-only mode
+ conf.set("hbase.rest.readonly", "true");
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 403);
+
+ // recall previous delete scanner operation with read-only off
+ conf.set("hbase.rest.readonly", "false");
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testSimpleScannerPB() throws IOException {
+ final int BATCH_SIZE = 10;
+ // new scanner
+ ScannerModel model = new ScannerModel();
+ model.setBatch(BATCH_SIZE);
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+
+ // test put operation is forbidden in read-only mode
+ conf.set("hbase.rest.readonly", "true");
+ Response response = client.put("/" + TABLE + "/scanner",
+ Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ assertEquals(response.getCode(), 403);
+ String scannerURI = response.getLocation();
+ assertNull(scannerURI);
+
+ // recall previous put operation with read-only off
+ conf.set("hbase.rest.readonly", "false");
+ response = client.put("/" + TABLE + "/scanner",
+ Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ assertEquals(response.getCode(), 201);
+ scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+ CellSetModel cellSet = new CellSetModel();
+ cellSet.getObjectFromMessage(response.getBody());
+ // confirm batch size conformance
+ assertEquals(countCellSet(cellSet), BATCH_SIZE);
+
+ // test delete scanner operation is forbidden in read-only mode
+ conf.set("hbase.rest.readonly", "true");
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 403);
+
+ // recall previous delete scanner operation with read-only off
+ conf.set("hbase.rest.readonly", "false");
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testSimpleScannerBinary() throws IOException {
+ // new scanner
+ ScannerModel model = new ScannerModel();
+ model.setBatch(1);
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+
+ // test put operation is forbidden in read-only mode
+ conf.set("hbase.rest.readonly", "true");
+ Response response = client.put("/" + TABLE + "/scanner",
+ Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ assertEquals(response.getCode(), 403);
+ String scannerURI = response.getLocation();
+ assertNull(scannerURI);
+
+ // recall previous put operation with read-only off
+ conf.set("hbase.rest.readonly", "false");
+ response = client.put("/" + TABLE + "/scanner",
+ Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ assertEquals(response.getCode(), 201);
+ scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell
+ response = client.get(scannerURI, Constants.MIMETYPE_BINARY);
+ assertEquals(response.getCode(), 200);
+ assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type"));
+ // verify that data was returned
+ assertTrue(response.getBody().length > 0);
+ // verify that the expected X-headers are present
+ boolean foundRowHeader = false, foundColumnHeader = false,
+ foundTimestampHeader = false;
+ for (Header header: response.getHeaders()) {
+ if (header.getName().equals("X-Row")) {
+ foundRowHeader = true;
+ } else if (header.getName().equals("X-Column")) {
+ foundColumnHeader = true;
+ } else if (header.getName().equals("X-Timestamp")) {
+ foundTimestampHeader = true;
+ }
+ }
+ assertTrue(foundRowHeader);
+ assertTrue(foundColumnHeader);
+ assertTrue(foundTimestampHeader);
+
+ // test delete scanner operation is forbidden in read-only mode
+ conf.set("hbase.rest.readonly", "true");
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 403);
+
+ // recall previous delete scanner operation with read-only off
+ conf.set("hbase.rest.readonly", "false");
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ }
+
+ @Test
+ public void testFullTableScan() throws IOException {
+ ScannerModel model = new ScannerModel();
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+ assertEquals(fullTableScan(model), expectedRows1);
+
+ model = new ScannerModel();
+ model.addColumn(Bytes.toBytes(COLUMN_2));
+ assertEquals(fullTableScan(model), expectedRows2);
+ }
+
+ @Test
+ public void testTableDoesNotExist() throws IOException, JAXBException {
+ ScannerModel model = new ScannerModel();
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ byte[] body = Bytes.toBytes(writer.toString());
+ Response response = client.put("/" + NONEXISTENT_TABLE +
+ "/scanner", Constants.MIMETYPE_XML, body);
+ assertEquals(response.getCode(), 404);
+ }
+
+}
+
[26/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java
new file mode 100644
index 0000000..88f9cd3
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java
@@ -0,0 +1,2125 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: TableSchemaMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class TableSchemaMessage {
+ private TableSchemaMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface TableSchemaOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string name = 1;
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ boolean hasName();
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ java.lang.String getName();
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>
+ getAttrsList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index);
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ int getAttrsCount();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>
+ getAttrsOrBuilderList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder getAttrsOrBuilder(
+ int index);
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema>
+ getColumnsList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index);
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ int getColumnsCount();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>
+ getColumnsOrBuilderList();
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder getColumnsOrBuilder(
+ int index);
+
+ // optional bool inMemory = 4;
+ /**
+ * <code>optional bool inMemory = 4;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ boolean hasInMemory();
+ /**
+ * <code>optional bool inMemory = 4;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ boolean getInMemory();
+
+ // optional bool readOnly = 5;
+ /**
+ * <code>optional bool readOnly = 5;</code>
+ */
+ boolean hasReadOnly();
+ /**
+ * <code>optional bool readOnly = 5;</code>
+ */
+ boolean getReadOnly();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema}
+ */
+ public static final class TableSchema extends
+ com.google.protobuf.GeneratedMessage
+ implements TableSchemaOrBuilder {
+ // Use TableSchema.newBuilder() to construct.
+ private TableSchema(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TableSchema(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TableSchema defaultInstance;
+ public static TableSchema getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableSchema getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TableSchema(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ attrs_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.PARSER, extensionRegistry));
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ columns_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.PARSER, extensionRegistry));
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000002;
+ inMemory_ = input.readBool();
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000004;
+ readOnly_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ attrs_ = java.util.Collections.unmodifiableList(attrs_);
+ }
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ columns_ = java.util.Collections.unmodifiableList(columns_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<TableSchema> PARSER =
+ new com.google.protobuf.AbstractParser<TableSchema>() {
+ public TableSchema parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TableSchema(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<TableSchema> getParserForType() {
+ return PARSER;
+ }
+
+ public interface AttributeOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string name = 1;
+ /**
+ * <code>required string name = 1;</code>
+ */
+ boolean hasName();
+ /**
+ * <code>required string name = 1;</code>
+ */
+ java.lang.String getName();
+ /**
+ * <code>required string name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ // required string value = 2;
+ /**
+ * <code>required string value = 2;</code>
+ */
+ boolean hasValue();
+ /**
+ * <code>required string value = 2;</code>
+ */
+ java.lang.String getValue();
+ /**
+ * <code>required string value = 2;</code>
+ */
+ com.google.protobuf.ByteString
+ getValueBytes();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute}
+ */
+ public static final class Attribute extends
+ com.google.protobuf.GeneratedMessage
+ implements AttributeOrBuilder {
+ // Use Attribute.newBuilder() to construct.
+ private Attribute(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Attribute(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Attribute defaultInstance;
+ public static Attribute getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Attribute getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Attribute(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ value_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Attribute> PARSER =
+ new com.google.protobuf.AbstractParser<Attribute>() {
+ public Attribute parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Attribute(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Attribute> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private java.lang.Object value_;
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public java.lang.String getValue() {
+ java.lang.Object ref = value_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ value_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ name_ = "";
+ value_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasValue()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getValueBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getValueBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ bitField0_ |= 0x00000001;
+ name_ = other.name_;
+ onChanged();
+ }
+ if (other.hasValue()) {
+ bitField0_ |= 0x00000002;
+ value_ = other.value_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasName()) {
+
+ return false;
+ }
+ if (!hasValue()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string name = 1;
+ private java.lang.Object name_ = "";
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string name = 1;</code>
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string value = 2;
+ private java.lang.Object value_ = "";
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public java.lang.String getValue() {
+ java.lang.Object ref = value_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ value_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public Builder setValue(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value_ = getDefaultInstance().getValue();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string value = 2;</code>
+ */
+ public Builder setValueBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute)
+ }
+
+ static {
+ defaultInstance = new Attribute(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute)
+ }
+
+ private int bitField0_;
+ // optional string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
+ public static final int ATTRS_FIELD_NUMBER = 2;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> attrs_;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> getAttrsList() {
+ return attrs_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>
+ getAttrsOrBuilderList() {
+ return attrs_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public int getAttrsCount() {
+ return attrs_.size();
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
+ return attrs_.get(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder getAttrsOrBuilder(
+ int index) {
+ return attrs_.get(index);
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
+ public static final int COLUMNS_FIELD_NUMBER = 3;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> columns_;
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> getColumnsList() {
+ return columns_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>
+ getColumnsOrBuilderList() {
+ return columns_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public int getColumnsCount() {
+ return columns_.size();
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
+ return columns_.get(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder getColumnsOrBuilder(
+ int index) {
+ return columns_.get(index);
+ }
+
+ // optional bool inMemory = 4;
+ public static final int INMEMORY_FIELD_NUMBER = 4;
+ private boolean inMemory_;
+ /**
+ * <code>optional bool inMemory = 4;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public boolean hasInMemory() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional bool inMemory = 4;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public boolean getInMemory() {
+ return inMemory_;
+ }
+
+ // optional bool readOnly = 5;
+ public static final int READONLY_FIELD_NUMBER = 5;
+ private boolean readOnly_;
+ /**
+ * <code>optional bool readOnly = 5;</code>
+ */
+ public boolean hasReadOnly() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional bool readOnly = 5;</code>
+ */
+ public boolean getReadOnly() {
+ return readOnly_;
+ }
+
+ private void initFields() {
+ name_ = "";
+ attrs_ = java.util.Collections.emptyList();
+ columns_ = java.util.Collections.emptyList();
+ inMemory_ = false;
+ readOnly_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getAttrsCount(); i++) {
+ if (!getAttrs(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getColumnsCount(); i++) {
+ if (!getColumns(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ for (int i = 0; i < attrs_.size(); i++) {
+ output.writeMessage(2, attrs_.get(i));
+ }
+ for (int i = 0; i < columns_.size(); i++) {
+ output.writeMessage(3, columns_.get(i));
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBool(4, inMemory_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBool(5, readOnly_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ for (int i = 0; i < attrs_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, attrs_.get(i));
+ }
+ for (int i = 0; i < columns_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, columns_.get(i));
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(4, inMemory_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(5, readOnly_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchemaOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.class, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getAttrsFieldBuilder();
+ getColumnsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (attrsBuilder_ == null) {
+ attrs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ attrsBuilder_.clear();
+ }
+ if (columnsBuilder_ == null) {
+ columns_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ columnsBuilder_.clear();
+ }
+ inMemory_ = false;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ readOnly_ = false;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema build() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema buildPartial() {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema result = new org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (attrsBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ attrs_ = java.util.Collections.unmodifiableList(attrs_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.attrs_ = attrs_;
+ } else {
+ result.attrs_ = attrsBuilder_.build();
+ }
+ if (columnsBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ columns_ = java.util.Collections.unmodifiableList(columns_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.columns_ = columns_;
+ } else {
+ result.columns_ = columnsBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.inMemory_ = inMemory_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.readOnly_ = readOnly_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema) {
+ return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema other) {
+ if (other == org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ bitField0_ |= 0x00000001;
+ name_ = other.name_;
+ onChanged();
+ }
+ if (attrsBuilder_ == null) {
+ if (!other.attrs_.isEmpty()) {
+ if (attrs_.isEmpty()) {
+ attrs_ = other.attrs_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureAttrsIsMutable();
+ attrs_.addAll(other.attrs_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.attrs_.isEmpty()) {
+ if (attrsBuilder_.isEmpty()) {
+ attrsBuilder_.dispose();
+ attrsBuilder_ = null;
+ attrs_ = other.attrs_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ attrsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getAttrsFieldBuilder() : null;
+ } else {
+ attrsBuilder_.addAllMessages(other.attrs_);
+ }
+ }
+ }
+ if (columnsBuilder_ == null) {
+ if (!other.columns_.isEmpty()) {
+ if (columns_.isEmpty()) {
+ columns_ = other.columns_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureColumnsIsMutable();
+ columns_.addAll(other.columns_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.columns_.isEmpty()) {
+ if (columnsBuilder_.isEmpty()) {
+ columnsBuilder_.dispose();
+ columnsBuilder_ = null;
+ columns_ = other.columns_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ columnsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getColumnsFieldBuilder() : null;
+ } else {
+ columnsBuilder_.addAllMessages(other.columns_);
+ }
+ }
+ }
+ if (other.hasInMemory()) {
+ setInMemory(other.getInMemory());
+ }
+ if (other.hasReadOnly()) {
+ setReadOnly(other.getReadOnly());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getAttrsCount(); i++) {
+ if (!getAttrs(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getColumnsCount(); i++) {
+ if (!getColumns(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string name = 1;
+ private java.lang.Object name_ = "";
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> attrs_ =
+ java.util.Collections.emptyList();
+ private void ensureAttrsIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>(attrs_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder> attrsBuilder_;
+
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> getAttrsList() {
+ if (attrsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(attrs_);
+ } else {
+ return attrsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public int getAttrsCount() {
+ if (attrsBuilder_ == null) {
+ return attrs_.size();
+ } else {
+ return attrsBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
+ if (attrsBuilder_ == null) {
+ return attrs_.get(index);
+ } else {
+ return attrsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public Builder setAttrs(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
+ if (attrsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttrsIsMutable();
+ attrs_.set(index, value);
+ onChanged();
+ } else {
+ attrsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public Builder setAttrs(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ attrs_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ attrsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAttrs(org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
+ if (attrsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttrsIsMutable();
+ attrs_.add(value);
+ onChanged();
+ } else {
+ attrsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAttrs(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
+ if (attrsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAttrsIsMutable();
+ attrs_.add(index, value);
+ onChanged();
+ } else {
+ attrsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAttrs(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ attrs_.add(builderForValue.build());
+ onChanged();
+ } else {
+ attrsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAttrs(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ attrs_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ attrsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public Builder addAllAttrs(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> values) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ super.addAll(values, attrs_);
+ onChanged();
+ } else {
+ attrsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public Builder clearAttrs() {
+ if (attrsBuilder_ == null) {
+ attrs_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ attrsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public Builder removeAttrs(int index) {
+ if (attrsBuilder_ == null) {
+ ensureAttrsIsMutable();
+ attrs_.remove(index);
+ onChanged();
+ } else {
+ attrsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder getAttrsBuilder(
+ int index) {
+ return getAttrsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder getAttrsOrBuilder(
+ int index) {
+ if (attrsBuilder_ == null) {
+ return attrs_.get(index); } else {
+ return attrsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>
+ getAttrsOrBuilderList() {
+ if (attrsBuilder_ != null) {
+ return attrsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(attrs_);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder addAttrsBuilder() {
+ return getAttrsFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder addAttrsBuilder(
+ int index) {
+ return getAttrsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder>
+ getAttrsBuilderList() {
+ return getAttrsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>
+ getAttrsFieldBuilder() {
+ if (attrsBuilder_ == null) {
+ attrsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.AttributeOrBuilder>(
+ attrs_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ attrs_ = null;
+ }
+ return attrsBuilder_;
+ }
+
+ // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
+ private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> columns_ =
+ java.util.Collections.emptyList();
+ private void ensureColumnsIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema>(columns_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder> columnsBuilder_;
+
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> getColumnsList() {
+ if (columnsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(columns_);
+ } else {
+ return columnsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public int getColumnsCount() {
+ if (columnsBuilder_ == null) {
+ return columns_.size();
+ } else {
+ return columnsBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
+ if (columnsBuilder_ == null) {
+ return columns_.get(index);
+ } else {
+ return columnsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public Builder setColumns(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
+ if (columnsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColumnsIsMutable();
+ columns_.set(index, value);
+ onChanged();
+ } else {
+ columnsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public Builder setColumns(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
+ if (columnsBuilder_ == null) {
+ ensureColumnsIsMutable();
+ columns_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ columnsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public Builder addColumns(org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
+ if (columnsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColumnsIsMutable();
+ columns_.add(value);
+ onChanged();
+ } else {
+ columnsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public Builder addColumns(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
+ if (columnsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColumnsIsMutable();
+ columns_.add(index, value);
+ onChanged();
+ } else {
+ columnsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public Builder addColumns(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
+ if (columnsBuilder_ == null) {
+ ensureColumnsIsMutable();
+ columns_.add(builderForValue.build());
+ onChanged();
+ } else {
+ columnsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public Builder addColumns(
+ int index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
+ if (columnsBuilder_ == null) {
+ ensureColumnsIsMutable();
+ columns_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ columnsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public Builder addAllColumns(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema> values) {
+ if (columnsBuilder_ == null) {
+ ensureColumnsIsMutable();
+ super.addAll(values, columns_);
+ onChanged();
+ } else {
+ columnsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public Builder clearColumns() {
+ if (columnsBuilder_ == null) {
+ columns_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ columnsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public Builder removeColumns(int index) {
+ if (columnsBuilder_ == null) {
+ ensureColumnsIsMutable();
+ columns_.remove(index);
+ onChanged();
+ } else {
+ columnsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder getColumnsBuilder(
+ int index) {
+ return getColumnsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder getColumnsOrBuilder(
+ int index) {
+ if (columnsBuilder_ == null) {
+ return columns_.get(index); } else {
+ return columnsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>
+ getColumnsOrBuilderList() {
+ if (columnsBuilder_ != null) {
+ return columnsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(columns_);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder addColumnsBuilder() {
+ return getColumnsFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder addColumnsBuilder(
+ int index) {
+ return getColumnsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder>
+ getColumnsBuilderList() {
+ return getColumnsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>
+ getColumnsFieldBuilder() {
+ if (columnsBuilder_ == null) {
+ columnsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchemaOrBuilder>(
+ columns_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ columns_ = null;
+ }
+ return columnsBuilder_;
+ }
+
+ // optional bool inMemory = 4;
+ private boolean inMemory_ ;
+ /**
+ * <code>optional bool inMemory = 4;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public boolean hasInMemory() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional bool inMemory = 4;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public boolean getInMemory() {
+ return inMemory_;
+ }
+ /**
+ * <code>optional bool inMemory = 4;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public Builder setInMemory(boolean value) {
+ bitField0_ |= 0x00000008;
+ inMemory_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bool inMemory = 4;</code>
+ *
+ * <pre>
+ * optional helpful encodings of commonly used attributes
+ * </pre>
+ */
+ public Builder clearInMemory() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ inMemory_ = false;
+ onChanged();
+ return this;
+ }
+
+ // optional bool readOnly = 5;
+ private boolean readOnly_ ;
+ /**
+ * <code>optional bool readOnly = 5;</code>
+ */
+ public boolean hasReadOnly() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional bool readOnly = 5;</code>
+ */
+ public boolean getReadOnly() {
+ return readOnly_;
+ }
+ /**
+ * <code>optional bool readOnly = 5;</code>
+ */
+ public Builder setReadOnly(boolean value) {
+ bitField0_ |= 0x00000010;
+ readOnly_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bool readOnly = 5;</code>
+ */
+ public Builder clearReadOnly() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ readOnly_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema)
+ }
+
+ static {
+ defaultInstance = new TableSchema(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\030TableSchemaMessage.proto\022/org.apache.h" +
+ "adoop.hbase.rest.protobuf.generated\032\031Col" +
+ "umnSchemaMessage.proto\"\220\002\n\013TableSchema\022\014" +
+ "\n\004name\030\001 \001(\t\022U\n\005attrs\030\002 \003(\0132F.org.apache" +
+ ".hadoop.hbase.rest.protobuf.generated.Ta" +
+ "bleSchema.Attribute\022N\n\007columns\030\003 \003(\0132=.o" +
+ "rg.apache.hadoop.hbase.rest.protobuf.gen" +
+ "erated.ColumnSchema\022\020\n\010inMemory\030\004 \001(\010\022\020\n" +
+ "\010readOnly\030\005 \001(\010\032(\n\tAttribute\022\014\n\004name\030\001 \002" +
+ "(\t\022\r\n\005value\030\002 \002(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor,
+ new java.lang.String[] { "Name", "Attrs", "Columns", "InMemory", "ReadOnly", });
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor =
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_Attribute_descriptor,
+ new java.lang.String[] { "Name", "Value", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.getDescriptor(),
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
[15/38] HBASE-12197 Move rest to it's on module
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
deleted file mode 100644
index 0938803..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-
-import javax.servlet.ServletContext;
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.rest.RESTServlet;
-import org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version;
-
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-
-/**
- * A representation of the collection of versions of the REST gateway software
- * components.
- * <ul>
- * <li>restVersion: REST gateway revision</li>
- * <li>jvmVersion: the JVM vendor and version information</li>
- * <li>osVersion: the OS type, version, and hardware architecture</li>
- * <li>serverVersion: the name and version of the servlet container</li>
- * <li>jerseyVersion: the version of the embedded Jersey framework</li>
- * </ul>
- */
-@XmlRootElement(name="Version")
-@InterfaceAudience.Private
-public class VersionModel implements Serializable, ProtobufMessageHandler {
-
- private static final long serialVersionUID = 1L;
-
- private String restVersion;
- private String jvmVersion;
- private String osVersion;
- private String serverVersion;
- private String jerseyVersion;
-
- /**
- * Default constructor. Do not use.
- */
- public VersionModel() {}
-
- /**
- * Constructor
- * @param context the servlet context
- */
- public VersionModel(ServletContext context) {
- restVersion = RESTServlet.VERSION_STRING;
- jvmVersion = System.getProperty("java.vm.vendor") + ' ' +
- System.getProperty("java.version") + '-' +
- System.getProperty("java.vm.version");
- osVersion = System.getProperty("os.name") + ' ' +
- System.getProperty("os.version") + ' ' +
- System.getProperty("os.arch");
- serverVersion = context.getServerInfo();
- jerseyVersion = ServletContainer.class.getPackage()
- .getImplementationVersion();
- }
-
- /**
- * @return the REST gateway version
- */
- @XmlAttribute(name="REST")
- public String getRESTVersion() {
- return restVersion;
- }
-
- /**
- * @return the JVM vendor and version
- */
- @XmlAttribute(name="JVM")
- public String getJVMVersion() {
- return jvmVersion;
- }
-
- /**
- * @return the OS name, version, and hardware architecture
- */
- @XmlAttribute(name="OS")
- public String getOSVersion() {
- return osVersion;
- }
-
- /**
- * @return the servlet container version
- */
- @XmlAttribute(name="Server")
- public String getServerVersion() {
- return serverVersion;
- }
-
- /**
- * @return the version of the embedded Jersey framework
- */
- @XmlAttribute(name="Jersey")
- public String getJerseyVersion() {
- return jerseyVersion;
- }
-
- /**
- * @param version the REST gateway version string
- */
- public void setRESTVersion(String version) {
- this.restVersion = version;
- }
-
- /**
- * @param version the OS version string
- */
- public void setOSVersion(String version) {
- this.osVersion = version;
- }
-
- /**
- * @param version the JVM version string
- */
- public void setJVMVersion(String version) {
- this.jvmVersion = version;
- }
-
- /**
- * @param version the servlet container version string
- */
- public void setServerVersion(String version) {
- this.serverVersion = version;
- }
-
- /**
- * @param version the Jersey framework version string
- */
- public void setJerseyVersion(String version) {
- this.jerseyVersion = version;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append("rest ");
- sb.append(restVersion);
- sb.append(" [JVM: ");
- sb.append(jvmVersion);
- sb.append("] [OS: ");
- sb.append(osVersion);
- sb.append("] [Server: ");
- sb.append(serverVersion);
- sb.append("] [Jersey: ");
- sb.append(jerseyVersion);
- sb.append("]\n");
- return sb.toString();
- }
-
- @Override
- public byte[] createProtobufOutput() {
- Version.Builder builder = Version.newBuilder();
- builder.setRestVersion(restVersion);
- builder.setJvmVersion(jvmVersion);
- builder.setOsVersion(osVersion);
- builder.setServerVersion(serverVersion);
- builder.setJerseyVersion(jerseyVersion);
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- Version.Builder builder = Version.newBuilder();
- builder.mergeFrom(message);
- if (builder.hasRestVersion()) {
- restVersion = builder.getRestVersion();
- }
- if (builder.hasJvmVersion()) {
- jvmVersion = builder.getJvmVersion();
- }
- if (builder.hasOsVersion()) {
- osVersion = builder.getOsVersion();
- }
- if (builder.hasServerVersion()) {
- serverVersion = builder.getServerVersion();
- }
- if (builder.hasJerseyVersion()) {
- jerseyVersion = builder.getJerseyVersion();
- }
- return this;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/package.html
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/package.html b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/package.html
deleted file mode 100644
index c21e129..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/package.html
+++ /dev/null
@@ -1,1660 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<head />
-<body bgcolor="white">
-<h1>HBase REST</h1>
-This package provides a RESTful Web service front end for HBase.
-<p>
-
-<h2>Table Of Contents</h2>
-<ol>
-<li><a href="#deployment">Deployment</a></li>
- <ol type="a">
- <li><a href="#deployment_daemon">Daemon</a></li>
- <li><a href="#deployment_servlet">Servlet</a></li>
- </ol>
-<li><a href="#rest">Representational State Transfer</a></li>
-<li><a href="#identifiers">Resource Identifiers</a></li>
-<li><a href="#operations">Operations</a></li>
- <ol type="a">
- <li><a href="#operation_version">Query Software Version</a></li>
- <li><a href="#operation_storage_cluster_version">Query Storage Cluster Version</a></li>
- <li><a href="#operation_storage_cluster_status">Query Storage Cluster Status</a></li>
- <li><a href="#operation_query_tables">Query Table List</a></li>
- <li><a href="#operation_query_schema">Query Table Schema</a></li>
- <li><a href="#operation_create_schema">Create Table Or Update Table Schema</a></li>
- <li><a href="#operation_table_metadata">Query Table Metadata</a></li>
- <li><a href="#operation_delete_table">Delete Table</a></li>
- <li><a href="#operation_cell_query_single">Cell Query (Single Value)</a></li>
- <li><a href="#operation_cell_query_multiple">Cell or Row Query (Multiple Values)</a></li>
- <li><a href="#operation_cell_store_single">Cell Store (Single)</a></li>
- <li><a href="#operation_cell_store_multiple">Cell Store (Multiple)</a></li>
- <li><a href="#operation_delete">Row, Column, or Cell Delete</a></li>
- <li><a href="#operation_scanner_create">Scanner Creation</a></li>
- <li><a href="#operation_scanner_next">Scanner Get Next</a></li>
- <li><a href="#operation_scanner_delete">Scanner Deletion</a></li>
- <li><a href="#operation_stateless_scanner">Stateless scanner</a></li>
- </ol>
- <li><a href="#xmlschema">XML Schema</a></li>
- <li><a href="#pbufschema">Protobufs Schema</a></li>
-</ol>
-
-<p>
-<a name="deployment">
-<h2>Deployment</h2>
-</a>
-<p>
-
-<p>
-<a name="deployment_daemon">
-<h3>Daemon</h3>
-</a>
-<p>
-HBase REST can run as a daemon which starts an embedded Jetty servlet container
-and deploys the servlet into it.
-<p>
-<ol>
-<li>Start the embedded Jetty servlet container:
- <ul>
- <li>In the foreground:
- <blockquote>
- <tt>
- % ./bin/hbase rest start -p <<i>port</i>>
- </tt>
- </blockquote>
- <p>
- where <<i>port</i>> is optional, and is the port the connector should
- listen on. (Default is 8080.)
- </p>
- </li>
- </ul>
-</li>
-</ol>
-
-<p>
-<a name="rest">
-<h2>Representational State Transfer</h2>
-</a>
-<p>
-
-The terms "representational state transfer" and "REST" were introduced in 2000
-in the
-<a href="http://www.ics.uci.edu/~fielding/pubs/dissertation/rest_arch_style.htm">
-doctoral dissertation of Roy Fielding</a>, one of the principal authors of the
-Hypertext Transfer Protocol (HTTP) specification.
-<p>
-A GET to an identifier requests a copy of the information in the supplied
-content type.
-<p>
-A PUT to an identifier replaces the information. The supplied content type
-determines how it is to be interpreted.
-<p>
-POST adds information.
-<p>
-DELETE eliminates information.
-<p>
-<center>
-<table width="90%">
-<tr><td><b>Database Operations</b></td>
- <td><b>REST/HTTP Equivalents</b></td>
- </tr>
-<tr><td colspan="2"> </td></tr>
-<tr><td>CREATE</td><td>PUT</td></tr>
-<tr><td>READ</td><td>GET</td></tr>
-<tr><td>UPDATE</td><td>POST (update) or PUT (replace)</td></tr>
-<tr><td>DELETE</td><td>DELETE</td></tr>
-</table>
-</center>
-
-<p>
-<a name="identifiers">
-<h2>Resource Identifiers</h2>
-</a>
-<p>
-<a href="http://www.rfc-editor.org/rfc/rfc3968.txt">RFC 3968</a> defines URL
-syntax:
-<p>
-<pre>
-scheme://user:pass@example.net:8080/path/to/file;type=foo?name=val#frag
-\_____/ \_______/\___________/\__/\______/\____/\______/\________/\___/
- | | | | | | | | |
- scheme userinfo hostname port path filename param query fragment
- \________________________/
- authority
-</pre>
-<p>
-HBase REST exposes HBase tables, rows, cells, and metadata as URL specified
-resources.
-<p>
-<b>NOTE:</b> The characters <tt>/</tt>, <tt>:</tt>, and <tt>,</tt> are reserved
-within row keys, column names, and column qualifiers. Clients must escape them
-somehow, perhaps by encoding them as hex escapes or by using www-url-encoding. For
-example, the key:
-<p>
-<pre>
- http://www.google.com/
-</pre>
-<p>
-should first be encoded as:
-<p>
-<pre>
- http%3A%2F%2Fwww.google.com%2F
-</pre>
-<p>
-to produce a path like:
-<pre>
- /SomeTable/http%3A%2F%2Fwww.google.com%2F/someColumn:qualifier
-</pre>
-<p>
-<h3>Addressing for cell or row query (GET)</h3>
-<p>
-<pre>
- path := '/' <table>
- '/' <row>
- ( '/' ( <column> ( ':' <qualifier> )?
- ( ',' <column> ( ':' <qualifier> )? )+ )?
- ( '/' ( <start-timestamp> ',' )? <end-timestamp> )? )?
- query := ( '?' 'v' '=' <num-versions> )?
-</pre>
-<p>
-
-<h3>Addressing for single value store (PUT)</h3>
-<p>
-Address with table, row, column (and optional qualifier), and optional timestamp.
-<p>
-<pre>
- path := '/' <table> '/' <row> '/' <column> ( ':' <qualifier> )?
- ( '/' <timestamp> )?
-</pre>
-<p>
-
-<h3>Addressing for multiple (batched) value store (PUT)</h3>
-<p>
-<pre>
- path := '/' <table> '/' <false-row-key>
-</pre>
-<p>
-
-<h3>Addressing for row, column, or cell DELETE</h3>
-<p>
-<pre>
- path := '/' <table>
- '/' <row>
- ( '/' <column> ( ':' <qualifier> )?
- ( '/' <timestamp> )? )?
-</pre>
-<p>
-
-<h3>Addressing for table creation or schema update (PUT or POST), schema query
-(GET), or delete (DELETE)</h3>
-<p>
-<pre>
- path := '/' <table> / 'schema'
-</pre>
-<p>
-
-<h3>Addressing for scanner creation (POST)</h3>
-<p>
-<pre>
- path := '/' <table> '/' 'scanner'
-</pre>
-<p>
-
-<h3>Addressing for scanner next item (GET)</h3>
-<p>
-<pre>
- path := '/' <table> '/' 'scanner' '/' <scanner-id>
-</pre>
-<p>
-
-<h3>Addressing for scanner deletion (DELETE)</h3>
-<p>
-<pre>
- path := '/' <table> '/' '%scanner' '/' <scanner-id>
-</pre>
-<p>
-
-<p>
-<a name="operations">
-<h2>Operations</h2>
-</a>
-<p>
-
-<a name="operation_version">
-<h3>Query Software Version</h3>
-</a>
-<p>
-<pre>
-GET /version
-</pre>
-<p>
-Returns the software version.
-Set Accept header to <tt>text/plain</tt> for plain text output.
-Set Accept header to <tt>text/xml</tt> for XML reply.
-Set Accept header to <tt>application/json</tt> for JSON reply.
-Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
-<p>
-If not successful, returns appropriate HTTP error status code.
-If successful, returns the software version.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl http://localhost:8000/version<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 149<br>
-Cache-Control: no-cache<br>
-Content-Type: text/plain<br>
-<br>
-Stargate 0.0.1 [JVM: Sun Microsystems Inc. 1.6.0_13-11.3-b02] [OS: Linux 2.6.<br>
-18-128.1.6.el5.centos.plusxen amd64] [Jetty: 6.1.14] [Jersey: 1.1.0-ea]<br>
-<br>
-% curl -H "Accept: text/xml" http://localhost:8000/version<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: text/xml<br>
-Content-Length: 212<br>
-<br>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
-<Version Stargate="0.0.1" OS="Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"<br>
- JVM="Sun Microsystems Inc. 1.6.0_13-11.3-b02" Jetty="6.1.14" Jersey="1.1.0-e<br>
-a"/><br>
-<br>
-% curl -H "Accept: application/json" http://localhost:8000/version<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: application/json<br>
-Transfer-Encoding: chunked<br>
-<br>
-{"@Stargate":"0.0.1","@OS":"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64","@<br>
-JVM":"Sun Microsystems Inc. 1.6.0_13-11.3-b02","@Jetty":"6.1.14","@Jersey":"1<br>
-.1.0-ea"}<br>
-<br>
-% curl -H "Accept: application/x-protobuf" http://localhost:8000/version<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 113<br>
-Cache-Control: no-cache<br>
-Content-Type: application/x-protobuf<br>
-<br>
-000000 0a 05 30 2e 30 2e 31 12 27 53 75 6e 20 4d 69 63<br>
-000010 72 6f 73 79 73 74 65 6d 73 20 49 6e 63 2e 20 31<br>
-000020 2e 36 2e 30 5f 31 33 2d 31 31 2e 33 2d 62 30 32<br>
-000030 1a 2d 4c 69 6e 75 78 20 32 2e 36 2e 31 38 2d 31<br>
-000040 32 38 2e 31 2e 36 2e 65 6c 35 2e 63 65 6e 74 6f<br>
-000050 73 2e 70 6c 75 73 78 65 6e 20 61 6d 64 36 34 22<br>
-000060 06 36 2e 31 2e 31 34 2a 08 31 2e 31 2e 30 2d 65<br>
-000070 61<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_storage_cluster_version">
-<h3>Query Storage Cluster Version</h3>
-</a>
-<p>
-<pre>
-GET /version/cluster
-</pre>
-<p>
-Returns version information regarding the HBase cluster backing the Stargate instance.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl http://localhost:8000/version/cluster<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 6<br>
-Cache-Control: no-cache<br>
-Content-Type: text/plain<br>
-<br>
-0.20.0<br>
-<br>
-% curl -H "Accept: text/xml" http://localhost:8000/version/cluster<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: text/xml<br>
-Content-Length: 94<br>
-<br>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
-<ClusterVersion>0.20.0</ClusterVersion><br>
-<br>
-% curl -H "Accept: application/json" http://localhost:8000/version/cluster<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: application/json<br>
-Transfer-Encoding: chunked<br>
-<br>
-"0.20.0"<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_storage_cluster_status">
-<h3>Query Storage Cluster Status</h3>
-</a>
-<p>
-<pre>
-GET /status/cluster
-</pre>
-<p>
-Returns detailed status on the HBase cluster backing the Stargate instance.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl http://localhost:8000/status/cluster<br>
-</tt>
-<pre>
-HTTP/1.1 200 OK
-Content-Length: 839
-Cache-Control: no-cache
-Content-Type: text/plain
-
-1 live servers, 0 dead servers, 13.0000 average load
-
-1 live servers
- test:37154 1244960965781
- requests=1, regions=13
-
- urls,http|www.legacy.com|80|site=Legacy|aamsz=300x250||position=1|prod
- =1,1244851990859
- urls,http|weather.boston.com|80|LYNX.js,1244851990859
- hbase:meta,,1
- content,601292a839b95e50200d8f8767859864,1244869158156
- content,9d7f3aeb2a5c1e2b45d690a91de3f23c,1244879698031
- content,7f6d48830ef51d635e9a5b672e79a083,1244879698031
- content,3ef16d776603bf9b9e775c9ceb64860f,1244869158156
- urls,,1244851989250
- urls,http|groups.google.com|80|groups|img|card_left.gif,1244851989250
- content,deafed2f90f718d72caaf87bd6c27d04,1244870320343
- content,bcf91ecf78ea72a33faccfb8e6b5d900,1244870320343
- -ROOT-,,0
- content,,1244851999187
-</pre>
-<tt>
-% curl -H "Accept: text/xml" http://localhost:8000/status/cluster<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: text/xml<br>
-Content-Length: 1301<br>
-<br>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
-<ClusterStatus requests="1" regions="13" averageLoad="13.0"><DeadNodes/><LiveN<br>
-odes><Node startCode="1244960965781" requests="1" name="test:37154"><Region na<br>
-me="dXJscyxodHRwfHd3dy5sZWdhY3kuY29tfDgwfHNpdGU9TGVnYWN5fGFhbXN6PTMwMHgyNTB8YX<br>
-JlYT1DSlDQaElDQUdPVFJJQlVORS4yMXx6b25lPUhvbWV8cG9zaXRpb249MXxwcm9kPTEsMTI0NDg1<br>
-MTk5MDg1OQ=="/><Region name="dXJscyxodHRwfHdlYXRoZXIuYm9zdG9uLmNvbXw4MHxMWU5YL<br>
-mpzLDEyNDQ4NTE5OTA4NTk="/><Region name="Lk1FVEEuLCwx"/><Region name="Y29udGVud<br>
-Cw2MDEyOTJhODM5Yjk1ZTUwMjAwZDhmODc2Nzg1OTg2NCwxMjQ0ODY5MTU4MTU2"/><Region name<br>
-="Y29udGVudCw5ZDdmM2FlYjJhNWMxZTJiNDVkNjkwYTkxZGUzZjIzYywxMjQ0ODc5Njk4MDMx"/><<br>
-Region name="Y29udGVudCw3ZjZkNDg4MzBlZjUxZDYzNWU5YTViNjcyZTc5YTA4MywxMjQ0ODc5N<br>
-jk4MDMx"/><Region name="Y29udGVudCwzZWYxNmQ3NzY2MDNiZjliOWU3NzVjOWNlYjY0ODYwZi<br>
-wxMjQ0ODY5MTU4MTU2"/><Region name="dXJscywsMTI0NDg1MTk4OTI1MA=="/><Region name<br>
-="dXJscyxodHRwfGdyb3Vwcy5nb29nbGUuY29tfDgwfGdyb3Vwc3xpbWd8Y2FyZF9sZWZ0LmdpZiwx<br>
-MjQ0ODUxOTg5MjUw"/><Region name="Y29udGVudCxkZWFmZWQyZjkwZjcxOGQ3MmNhYWY4N2JkN<br>
-mMyN2QwNCwxMjQ0ODcwMzIwMzQz"/><Region name="Y29udGVudCxiY2Y5MWVjZjc4ZWE3MmEzM2<br>
-ZhY2NmYjhlNmI1ZDkwMCwxMjQ0ODcwMzIwMzQz"/><Region name="LVJPT1QtLCww"/><Region<br>
-name="Y29udGVudCwsMTI0NDg1MTk5OTE4Nw=="/></Node></LiveNodes></ClusterStatus><br>
-<br>
-% curl -H "Accept: application/json" http://localhost:8000/status/cluster<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: application/json<br>
-Transfer-Encoding: chunked<br>
-<br>
-{"@requests":"1","@regions":"13","@averageLoad":"13.0","DeadNodes":[],"LiveNod<br>
-es":{"Node":{"@startCode":"1244960965781","@requests":"1","@name":"test:37154"<br>
-,"Region":[{"@name":"dXJscyxodHRwfHd3dLmpzy5sZWdhY3kuY29tfDgwfHNpdGU9TGVnYWN5f<br>
-GFhbXN6PTMwMHgyNTB8YXJlYT1DSElDQUdPVFJJQlVORS4yMXx6b25lPUhvbWV8cG9zaXRpb249MXx<br>
-wcm9kPTEsMTI0NDg1MTk5MDg1OQ=="},{"@name":"dXJscyxodHRwfHdlYXRoZXIuYm9zdG9uLmNv<br>
-bXw4MHxMWU5YLmpzLDEyNDQ4NTE5OTA4NTk="},{"@name":"Lk1FVEEuLCwx"},{"@name":"Y29u<br>
-dGVudCw2MDEyOTJhODM5Yjk1ZTUwMjAwZDhmODc2Nzg1OTg2NCwxMjQ0ODY5MTU4MTU2"},{"@name<br>
-":"Y29udGVudCw5ZDdmM2FlYjJhNWMxZTJiNDVkNjkwYTkxZGUzZjIzYywxMjQ0ODc5Njk4MDMx"},<br>
-{"@name":"Y29udGVudCw3ZjZkNDg4MzBlZjUxZDYzNWU5YTViNjcyZTc5YTA4MywxMjQ0ODc5Njk4<br>
-MDMx"},{"@name":"Y29udGVudCwzZWYxNmQ3NzY2MDNiZjliOWU3NzVjOWNlYjY0ODYwZiwxMjQ0O<br>
-DY5MTU4MTU2"},{"@name":"dXJscywsMTI0NDg1MTk4OTI1MA=="},{"@name":"dXJscyxodHRwf<br>
-Gdyb3Vwcy5nb29nbGUuY29tfDgwfGdyb3Vwc3xpbWd8Y2FyZF9sZWZ0LmdpZiwxMjQ0ODUxOTg5MjU<br>
-w"},{"@name":"Y29udGVudCxkZWFmZWQyZjkwZjcxOGQ3MmNhYWY4N2JkNmMyN2QwNCwxMjQ0ODcw<br>
-MzIwMzQz"},{"@name":"Y29udGVudCxiY2Y5MWVjZjc4ZWE3MmEzM2ZhY2NmYjhlNmI1ZDkwMCwxM<br>
-jQ0ODcwMzIwMzQz"},{"@name":"LVJPT1QtLCww"},{"@name":"Y29udGVudCwsMTI0NDg1MTk5O<br>
-TE4Nw=="}]}}}<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_query_tables">
-<h3>Query Table List</h3>
-</a>
-<p>
-<pre>
-GET /
-</pre>
-<p>
-Retrieves the list of available tables.
-Set Accept header to <tt>text/plain</tt> for plain text output.
-Set Accept header to <tt>text/xml</tt> for XML reply.
-Set Accept header to <tt>application/json</tt> for JSON reply.
-Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
-If not successful, returns appropriate HTTP error status code.
-If successful, returns the table list in the requested encoding.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl http://localhost:8000/<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 13<br>
-Cache-Control: no-cache<br>
-Content-Type: text/plain<br>
-<br>
-content<br>
-urls<br>
-<br>
-% curl -H "Accept: text/xml" http://localhost:8000/<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: text/xml<br>
-Content-Length: 121<br>
-<br>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
-<TableList><table name="content"/><table name="urls"/></TableList><br>
-<br>
-% curl -H "Accept: application/json" http://localhost:8000/<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: application/json<br>
-Transfer-Encoding: chunked<br>
-<br>
-{"table":[{"name":"content"},{"name":"urls"}]}<br>
-<br>
-% curl -H "Accept: application/x-protobuf" http://localhost:8000/<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 15<br>
-Cache-Control: no-cache<br>
-Content-Type: application/x-protobuf<br>
-<br>
-000000 0a 07 63 6f 6e 74 65 6e 74 0a 04 75 72 6c 73<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_query_schema">
-<h3>Query Table Schema</h3>
-</a>
-<p>
-<pre>
-GET /<table>/schema
-</pre>
-<p>
-Retrieves table schema.
-Set Accept header to <tt>text/plain</tt> for plain text output.
-Set Accept header to <tt>text/xml</tt> for XML reply.
-Set Accept header to <tt>application/json</tt> for JSON reply.
-Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
-If not successful, returns appropriate HTTP error status code.
-If successful, returns the table schema in the requested encoding.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl http://localhost:8000/content/schema<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 639<br>
-Cache-Control: no-cache<br>
-Content-Type: text/plain<br>
-<br>
-{ NAME=> 'content', IS_META => 'false', IS_ROOT => 'false', COLUMNS => [ { NA<br>
-ME => 'content', BLOCKSIZE => '65536', BLOOMFILTER => 'false', BLOCKCACHE => <br>
-'false', COMPRESSION => 'GZ', LENGTH => '2147483647', VERSIONS => '1', TTL =><br>
-'-1', IN_MEMORY => 'false' }, { NAME => 'info', BLOCKSIZE => '65536', BLOOMFI<br>
-LTER => 'false', BLOCKCACHE => 'false', COMPRESSION => 'NONE', LENGTH => '214<br>
-7483647', VERSIONS => '1', TTL => '-1', IN_MEMORY => 'false' }, { NAME => 'ur<br>
-l', BLOCKSIZE => '65536', BLOOMFILTER => 'false', BLOCKCACHE => 'false', COMP<br>
-RESSION => 'NONE', LENGTH => '2147483647', VERSIONS => '1', TTL => '-1', IN_<br>
-MEMORY => 'false' } ] }<br>
-<br>
-% curl -H "Accept: text/xml" http://localhost:8000/content/schema<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: text/xml<br>
-Content-Length: 618<br>
-<br>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
-<TableSchema name="content" IS_META="false" IS_ROOT="false"><ColumnSchema nam<br>
-e="content" BLOCKSIZE="65536" BLOOMFILTER="false" BLOCKCACHE="false" COMPRESS<br>
-ION="GZ" LENGTH="2147483647" VERSIONS="1" TTL="-1" IN_MEMORY="false"/><Column<br>
-Schema name="info" BLOCKSIZE="65536" BLOOMFILTER="false" BLOCKCACHE="false" C<br>
-OMPRESSION="NONE" LENGTH="2147483647" VERSIONS="1" TTL="-1" IN_MEMORY="false"<br>
-/><ColumnSchema name="url" BLOCKSIZE="65536" BLOOMFILTER="false"BLOCKCACHE="f<br>
-alse" COMPRESSION="NONE" LENGTH="2147483647" VERSIONS="1" TTL="-1" IN_MEMORY=<br>
-"false"/></TableSchema><br>
-<br>
-% curl -H "Accept: application/json" http://localhost:8000/content/schema<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: application/json<br>
-Transfer-Encoding: chunked<br>
-<br>
-{"@name":"content","@IS_META":"false","@IS_ROOT":"false","ColumnSchema":[{"@n<br>
-ame":"content","@BLOCKSIZE":"65536","@BLOOMFILTER":"false","@BLOCKCACHE":"fal<br>
-se","@COMPRESSION":"GZ","@LENGTH":"2147483647","@VERSIONS":"1","@TTL":"-1","@<br>
-IN_MEMORY":"false"},{"@name":"info","@BLOCKSIZE":"65536","@BLOOMFILTER":"fals<br>
-e","@BLOCKCACHE":"false","@COMPRESSION":"NONE","@LENGTH":"2147483647","@VERSI<br>
-ONS":"1","@TTL":"-1","@IN_MEMORY":"false"},{"@name":"url","@BLOCKSIZE":"65536<br>
-","@BLOOMFILTER":"false","@BLOCKCACHE":"false","@COMPRESSION":"NONE","@LENGTH<br>
-":"2147483647","@VERSIONS":"1","@TTL":"-1","@IN_MEMORY":"false"}]}<br>
-<br>
-% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/schema<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 563<br>
-Cache-Control: no-cache<br>
-Content-Type: application/x-protobuf<br>
-<br>
-000000 0a 07 63 6f 6e 74 65 6e 74 12 10 0a 07 49 53 5f<br>
-000010 4d 45 54 41 12 05 66 61 6c 73 65 12 10 0a 07 49<br>
-000020 53 5f 52 4f 4f 54 12 05 66 61 6c 73 65 1a a7 01<br>
-000030 12 12 0a 09 42 4c 4f 43 4b 53 49 5a 45 12 05 36<br>
-[...]<br>
-000230 4f 4e 45<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_create_schema">
-<h3>Create Table Or Update Table Schema</h3>
-</a>
-<p>
-<pre>
-PUT /<table>/schema
-
-POST /<table>/schema
-</pre>
-<p>
-Uploads table schema.
-PUT or POST creates table as necessary.
-PUT fully replaces schema.
-POST modifies schema (add or modify column family).
-Supply the full table schema for PUT or a well formed schema fragment for POST
-in the desired encoding.
-Set Content-Type header to <tt>text/xml</tt> if the desired encoding is XML.
-Set Content-Type header to <tt>application/json</tt> if the desired encoding
-is JSON.
-Set Content-Type header to <tt>application/x-protobuf</tt> if the desired
-encoding is protobufs.
-If not successful, returns appropriate HTTP error status code.
-If successful, returns HTTP 200 status.
-<p>
-
-<a name="operation_table_metadata">
-<h3>Query Table Metadata</h3>
-</a>
-<p>
-<pre>
-GET /<table>/regions
-</pre>
-<p>
-Retrieves table region metadata.
-Set Accept header to <tt>text/plain</tt> for plain text output.
-Set Accept header to <tt>text/xml</tt> for XML reply.
-Set Accept header to <tt>application/json</tt> for JSON reply.
-Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
-If not successful, returns appropriate HTTP error status code.
-If successful, returns the table region metadata in the requested encoding.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl -H "Accept: text/xml" http://localhost:8000/content/regions<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: text/xml<br>
-Content-Length: 1555<br>
-<br>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
-<TableInfo name="content"><Region location="test:51025" endKey="M2VmMTZkNzc2Nj<br>
-AzYmY5YjllNzc1YzljZWI2NDg2MGY=" startKey="" id="1244851999187" name="content,,<br>
-1244851999187"/><Region location="test:51025" endKey="NjAxMjkyYTgzOWI5NWU1MDIw<br>
-MGQ4Zjg3Njc4NTk4NjQ=" startKey="M2VmMTZkNzc2NjAzYmY5YjllNzc1YzljZWI2NDg2MGY=" <br>
-id="1244869158156" name="content,3ef16d776603bf9b9e775c9ceb64860f,124486915815<br>
-6"/><Region location="test:51025" endKey="N2Y2ZDQ4ODMwZWY1MWQ2MzVlOWE1YjY3MmU3<br>
-OWEwODM=" startKey="NjAxMjkyYTgzOWI5NWU1MDIwMGQ4Zjg3Njc4NTk4NjQ=" id="12448691<br>
-58156" name="content,601292a839b95e50200d8f8767859864,1244869158156"/><Region<br>
-location="test:51025" endKey="OWQ3ZjNhZWIyYTVjMWUyYjQ1ZDY5MGE5MWRlM2YyM2M=" st<br>
-artKey="N2Y2ZDQ4ODMwZWY1MWQ2MzVlOWE1YjY3MmU3OWEwODM=" id="1244879698031" name=<br>
-"content,7f6d48830ef51d635e9a5b672e79a083,1244879698031"/><Region location="te<br>
-st:51025" endKey="YmNmOTFlY2Y3OGVhNzJhMzNmYWNjZmI4ZTZiNWQ5MDA=" startKey="OWQ3<br>
-ZjNhZWIyYTVjMWUyYjQ1ZDY5MGE5MWRlM2YyM2M=" id="1244879698031" name="content,9d7<br>
-f3aeb2a5c1e2b45d690a91de3f23c,1244879698031"/><Region location="test:51025" en<br>
-dKey="ZGVhZmVkMmY5MGY3MThkNzJjYWFmODdiZDZjMjdkMDQ=" startKey="YmNmOTFlY2Y3OGVh<br>
-NzJhMzNmYWNjZmI4ZTZiNWQ5MDA=" id="1244870320343" name="content,bcf91ecf78ea72a<br>
-33faccfb8e6b5d900,1244870320343"/><Region location="test:51025" endKey="" star<br>
-tKey="ZGVhZmVkMmY5MGY3MThkNzJjYWFmODdiZDZjMjdkMDQ=" id="1244870320343" name="c<br>
-ontent,deafed2f90f718d72caaf87bd6c27d04,1244870320343"/></TableInfo><br>
-<br>
-% curl -H "Accept: application/json" http://localhost:8000/content/regions<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: application/json<br>
-Transfer-Encoding: chunked<br>
-<br>
-{"@name":"content","Region":[{"@location":"test:51025","@endKey":"M2VmMTZkNzc2<br>
-NjAzYmY5YjllNzc1YzljZWI2NDg2MGY=","@startKey":"","@id":"1244851999187","@name"<br>
-:"content,,1244851999187"},{"@location":"test:51025","@endKey":"NjAxMjkyYTgzOW<br>
-I5NWU1MDIwMGQ4Zjg3Njc4NTk4NjQ=","@startKey":"M2VmMTZkNzc2NjAzYmY5YjllNzc1YzljZ<br>
-WI2NDg2MGY=","@id":"1244869158156","@name":"content,3ef16d776603bf9b9e775c9ceb<br>
-64860f,1244869158156"},{"@location":"test:51025","@endKey":"N2Y2ZDQ4ODMwZWY1MW<br>
-Q2MzVlOWE1YjY3MmU3OWEwODM=","@startKey":"NjAxMjkyYTgzOWI5NWU1MDIwMGQ4Zjg3Njc4N<br>
-Tk4NjQ=","@id":"1244869158156","@name":"content,601292a839b95e50200d8f87678598<br>
-64,1244869158156"},{"@location":"test:51025","@endKey":"OWQ3ZjNhZWIyYTVjMWUyYj<br>
-Q1ZDY5MGE5MWRlM2YyM2M=","@startKey":"N2Y2ZDQ4ODMwZWY1MWQ2MzVlOWE1YjY3MmU3OWEwO<br>
-DM=","@id":"1244879698031","@name":"content,7f6d48830ef51d635e9a5b672e79a083,1<br>
-244879698031"},{"@location":"test:51025","@endKey":"YmNmOTFlY2Y3OGVhNzJhMzNmYW<br>
-NjZmI4ZTZiNWQ5MDA=","@startKey":"OWQ3ZjNhZWIyYTVjMWUyYjQ1ZDY5MGE5MWRlM2YyM2M="<br>
-,"@id":"1244879698031","@name":"content,9d7f3aeb2a5c1e2b45d690a91de3f23c,12448<br>
-79698031"},{"@location":"test:51025","@endKey":"ZGVhZmVkMmY5MGY3MThkNzJjYWFmOD<br>
-diZDZjMjdkMDQ=","@startKey":"YmNmOTFlY2Y3OGVhNzJhMzNmYWNjZmI4ZTZiNWQ5MDA=","@i<br>
-d":"1244870320343","@name":"content,bcf91ecf78ea72a33faccfb8e6b5d900,124487032<br>
-0343"},{"@location":"test:51025","@endKey":"","@startKey":"ZGVhZmVkMmY5MGY3MTh<br>
-kNzJjYWFmODdiZDZjMjdkMDQ=","@id":"1244870320343","@name":"content,deafed2f90f7<br>
-18d72caaf87bd6c27d04,1244870320343"}]}<br>
-<br>
-% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/regions<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 961<br>
-Cache-Control: no-cache<br>
-Content-Type: application/x-protobuf<br>
-<br>
-000000 0a 07 63 6f 6e 74 65 6e 74 12 53 0a 16 63 6f 6e<br>
-000010 74 65 6e 74 2c 2c 31 32 34 34 38 35 31 39 39 39<br>
-000020 31 38 37 12 00 1a 20 33 65 66 31 36 64 37 37 36<br>
-000030 36 30 33 62 66 39 62 39 65 37 37 35 63 39 63 65<br>
-[...]<br>
-0003c0 35<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_delete_table">
-<h3>Delete Table</h3>
-</a>
-<p>
-<pre>
-DELETE /<table>/schema
-</pre>
-<p>
-Deletes a table.
-If not successful, returns appropriate HTTP error status code.
-If successful, returns HTTP 200 status.
-<p>
-NOTE: <tt>DELETE /<table></tt> will not work
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% telnet localhost 8000<br>
-DELETE http://localhost:8000/test/schema HTTP/1.0<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 0<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_cell_query_single">
-<h3>Cell Query (Single Value)</h3>
-</a>
-<p>
-<pre>
-GET /<table>/<row>/
- <column> ( : <qualifier> )?
- ( / <timestamp> )?
-</pre>
-<p>
-Retrieves one cell, with optional specification of timestamp.
-Set Accept header to <tt>text/xml</tt> for XML reply.
-Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
-Set Accept header to <tt>application/octet-stream</tt> for binary.
-If not successful, returns appropriate HTTP error status code.
-If successful, returns HTTP 200 status and cell data in the response body in
-the requested encoding. If the encoding is binary, returns row, column, and
-timestamp in X headers: <tt>X-Row</tt>, <tt>X-Column</tt>, and
-<tt>X-Timestamp</tt>, respectively. Depending on the precision of the resource
-specification, some of the X-headers may be elided as redundant.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl -H "Accept: text/xml" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: max-age=14400<br>
-Content-Type: text/xml<br>
-Content-Length: 521<br>
-<br>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
-<CellSet><Row key="MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY="><Cell timesta<br>
-mp="1244880122250" column="Y29udGVudDpyYXc=">PCFET0NUWVBFIEhUTUwgUFVCTElDICItL<br>
-y9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEgTW92ZWQgUGV<br>
-ybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbnRseTwvaDE+C<br>
-jxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIuY29tL2R1bmN<br>
-hbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg==</Cell></Row></CellSet><br>
-<br>
-% curl -H "Accept: application/json" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: max-age=14400<br>
-Content-Type: application/json<br>
-Transfer-Encoding: chunked<br>
-<br>
-{"Row":{"@key":"MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY=","Cell":{"@timest<br>
-amp":"1244880122250","@column":"Y29udGVudDpyYXc=","$":"PCFET0NUWVBFIEhUTUwgUFV<br>
-CTElDICItLy9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEgT<br>
-W92ZWQgUGVybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbnR<br>
-seTwvaDE+CjxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIuY<br>
-29tL2R1bmNhbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg=="}}}<br>
-<br>
-% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 301<br>
-Cache-Control: max-age=14400<br>
-Content-Type: application/x-protobuf<br>
-<br>
-000000 0a aa 02 0a 20 30 30 30 31 32 36 31 34 66 37 64<br>
-000010 34 33 64 66 36 34 31 38 35 32 33 34 34 35 61 36<br>
-000020 37 38 37 64 36 12 85 02 12 0b 63 6f 6e 74 65 6e<br>
-000030 74 3a 72 61 77 18 8a e3 8c c5 9d 24 22 ee 01 3c<br>
-[...]<br>
-000120 62 6f 64 79 3e 3c 2f 68 74 6d 6c 3e 0a<br>
-<br>
-% curl -H "Accept: application/octet-stream" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 238<br>
-Cache-Control: max-age=14400<br>
-X-Timestamp: 1244880122250<br>
-Content-Type: application/octet-stream<br>
-<br>
-[...]<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_cell_query_multiple">
-<h3>Cell or Row Query (Multiple Values)</h3>
-</a>
-<p>
-<pre>
-GET /<table>/<row>
- ( / ( <column> ( : <qualifier> )?
- ( , <column> ( : <qualifier> )? )+ )?
- ( / ( <start-timestamp> ',' )? <end-timestamp> )? )?
- ( ?v= <num-versions> )?
-</pre>
-<p>
-Retrieves one or more cells from a full row, or one or more specified columns
-in the row, with optional filtering via timestamp, and an optional restriction
-on the maximum number of versions to return.
-Set Accept header to <tt>text/xml</tt> for XML reply.
-Set Accept header to <tt>application/json</tt> for JSON reply.
-Set Accept header to <tt>application/x-protobuf</tt> for protobufs.
-Set Accept header to <tt>application/octet-stream</tt> for binary.
-If not successful, returns appropriate HTTP error status code.
-If successful, returns row results in the requested encoding.
-<p>
-NOTE: If binary encoding is requested, only one cell can be returned, the
-first to match the resource specification. The row, column, and timestamp
-associated with the cell will be transmitted in X headers: <tt>X-Row</tt>,
-<tt>X-Column</tt>, and <tt>X-Timestamp</tt>, respectively. Depending on the
-precision of the resource specification, some of the X-headers may be elided
-as redundant.
-<p>
-<b>Suffix Globbing</b>
-<p>
-Multiple value queries of a row can optionally append a suffix glob on the row
-key. This is a restricted form of scanner which will return all values in all
-rows that have keys which contain the supplied key on their left hand side,
-for example:
-<p>
-<pre>
- org.someorg.*
- -> org.someorg.blog
- -> org.someorg.home
- -> org.someorg.www
-</pre>
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl -H "Accept: text/xml" http://localhost:8000/urls/https|ad.doubleclick.net|*<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: max-age=14400<br>
-Content-Type: text/xml<br>
-Transfer-Encoding: chunked<br>
-<br>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
-<CellSet><Row key="aHR0cHx3d3cudGVsZWdyYXBoLmNvLnVrfDgwfG5ld3N8d29ybGRuZXdzfG5<br>
-vcnRoYW1lcmljYXx1c2F8NTQ5MTI4NHxBcm5vbGQtU2Nod2FyemVuZWdnZXItdW52ZWlscy1wYXBlc<br>
-mxlc3MtY2xhc3Nyb29tcy1wbGFuLmh0bWw="><Cell timestamp="1244701257843" column="a<br>
-W5mbzpjcmF3bGVyLTEyNDQ3MDEyNTc4NDM=">eyJpcCI6IjIwOC41MS4xMzcuOSIsIm1pbWV0eXBlI<br>
-joidGV4dC9odG1sO2NoYXJzZXQ9SVNPLT<br>
-[...]<br>
-</Cell><Cell timestamp="1244701513390" column="aW5mbzp1cmw=">aHR0cDovL3d3dy50Z<br>
-WxlZ3JhcGguY28udWs6ODAvdGVsZWdyYXBoL3RlbXBsYXRlL3ZlcjEtMC90ZW1wbGF0ZXMvZnJhZ21<br>
-lbnRzL2NvbW1vbi90bWdsQnJhbmRDU1MuanNw</Cell></Row></CellSet><br>
-<br>
-% curl -H "Accept: text/xml" http://localhost:8000/content/00012614f7d43df6418523445a6787d6<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: max-age=14400<br>
-Content-Type: text/xml<br>
-Content-Length: 1177<br>
-<br>
-<CellSet><Row key="MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY="><Cell timesta<br>
-mp="1244880122250" column="Y29udGVudDpyYXc=">PCFET0NUWVBFIEhUTUwgUFVCTElDICItL<br>
-y9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEgTW92ZWQgUGV<br>
-ybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbnRseTwvaDE+C<br>
-jxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIuY29tL2R1bmN<br>
-hbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg==</Cell><Cell timestamp="1<br>
-244880122250" column="aW5mbzpjcmF3bGVyLWh0dHB8d3d3LnR3aXR0ZXIuY29tfDgwfGR1bmNh<br>
-bnJpbGV5LTEyNDQ4ODAxMjIyNTA=">eyJpcCI6IjE2OC4xNDMuMTYyLjY4IiwibWltZXR5cGUiOiJ0<br>
-ZXh0L2h0bWw7IGNoYXJzZXQ9aXNvLTg4NTktMSIsInZpYSI6Imh0dHA6Ly93d3cuaW5xdWlzaXRyLm<br>
-NvbTo4MC8yNTkyNy90b3NoMC1hbmQtdGhlLWRlbWktbW9vcmUtbnNmdy1waWMvIn0=</Cell><Cell<br>
-timestamp="1244880122250" column="aW5mbzpsZW5ndGg=">MjM4</Cell><Cell timestamp<br>
-="1244880122250" column="aW5mbzptaW1ldHlwZQ==">dGV4dC9odG1sOyBjaGFyc2V0PWlzby0<br>
-4ODU5LTE=</Cell><Cell timestamp="1244880122250" column="dXJsOmh0dHB8d3d3LnR3aX<br>
-R0ZXIuY29tfDgwfGR1bmNhbnJpbGV5">aHR0cDovL3d3dy50d2l0dGVyLmNvbTo4MC9kdW5jYW5yaW<br>
-xleQ==</Cell></Row></CellSet><br>
-<br>
-% curl -H "Accept: application/json" http://localhost:8000/content/00012614f7d43df6418523445a6787d6<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: max-age=14400<br>
-Content-Type: application/json<br>
-Transfer-Encoding: chunked<br>
-<br>
-{"Row":{"@key":"MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY=","Cell":[{"@times<br>
-tamp":"1244880122250","@column":"Y29udGVudDpyYXc=","$":"PCFET0NUWVBFIEhUTUwgUF<br>
-VCTElDICItLy9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEg<br>
-TW92ZWQgUGVybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbn<br>
-RseTwvaDE+CjxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIu<br>
-Y29tL2R1bmNhbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg=="},{"@timestam<br>
-p":"1244880122250","@column":"aW5mbzpjcmF3bGVyLWh0dHB8d3d3LnR3aXR0ZXIuY29tfDgw<br>
-fGR1bmNhbnJpbGV5LTEyNDQ4ODAxMjIyNTA=","$":"eyJpcCI6IjE2OC4xNDMuMTYyLjY4IiwibWl<br>
-tZXR5cGUiOiJ0ZXh0L2h0bWw7IGNoYXJzZXQ9aXNvLTg4NTktMSIsInZpYSI6Imh0dHA6Ly93d3cua<br>
-W5xdWlzaXRyLmNvbTo4MC8yNTkyNy90b3NoMC1hbmQtdGhlLWRlbWktbW9vcmUtbnNmdy1waWMvIn0<br>
-="},{"@timestamp":"1244880122250","@column":"aW5mbzpsZW5ndGg=","$":"MjM4"},{"@<br>
-timestamp":"1244880122250","@column":"aW5mbzptaW1ldHlwZQ==","$":"dGV4dC9odG1sO<br>
-yBjaGFyc2V0PWlzby04ODU5LTE="},{"@timestamp":"1244880122250","@column":"dXJsOmh<br>
-0dHB8d3d3LnR3aXR0ZXIuY29tfDgwfGR1bmNhbnJpbGV5","$":"aHR0cDovL3d3dy50d2l0dGVyLm<br>
-NvbTo4MC9kdW5jYW5yaWxleQ=="}]}}<br>
-</tt>
-<p>
-NOTE: The cell value is given in JSON encoding as the value associated with the key "$".
-<p>
-<tt>
-% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/00012614f7d43df6418523445a6787d6<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 692<br>
-Cache-Control: max-age=14400<br>
-Content-Type: application/x-protobuf<br>
-<br>
-000000 0a b1 05 0a 20 30 30 30 31 32 36 31 34 66 37 64<br>
-000010 34 33 64 66 36 34 31 38 35 32 33 34 34 35 61 36<br>
-000020 37 38 37 64 36 12 85 02 12 0b 63 6f 6e 74 65 6e<br>
-000030 74 3a 72 61 77 18 8a e3 8c c5 9d 24 22 ee 01 3c<br>
-[...]<br>
-0002b0 69 6c 65 79<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_cell_store_single">
-<h3>Cell Store (Single)</h3>
-</a>
-<p>
-<pre>
-PUT /<table>/<row>/<column>( : <qualifier> )? ( / <timestamp> )?
-
-POST /<table>/<row>/<column>( : <qualifier> )? ( / <timestamp> )?
-</pre>
-<p>
-Stores cell data into the specified location.
-If not successful, returns appropriate HTTP error status code.
-If successful, returns HTTP 200 status.
-Set Content-Type header to <tt>text/xml</tt> for XML encoding.
-Set Content-Type header to <tt>application/x-protobuf</tt> for protobufs encoding.
-Set Content-Type header to <tt>application/octet-stream</tt> for binary encoding.
-When using binary encoding, optionally, set X-Timestamp header to the desired
-timestamp.
-<p>
-PUT and POST operations are equivalent here: Specified addresses without
-existing data will create new values. Specified addresses with existing data
-will create new versions, overwriting an existing version if all of { row,
-column:qualifer, timestamp } match that of the existing value.
-<p>
-See "Cell Query (Single Value)" section for encoding examples.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl -H "Content-Type: text/xml" --data '[...]' http://localhost:8000/test/testrow/test:testcolumn<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 0<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_cell_store_multiple">
-<h3>Cell Store (Multiple)</h3>
-</a>
-<p>
-<pre>
-PUT /<table>/<false-row-key>
-
-POST /<table>/<false-row-key>
-</pre>
-<p>
-Use a false row key. Row, column, and timestamp values in supplied cells
-override the specifications of the same on the path, allowing for posting of
-multiple values to a table in batch. If not successful, returns appropriate
-HTTP error status code. If successful, returns HTTP 200 status.
-Set Content-Type to <tt>text/xml</tt> for XML encoding.
-Set Content-Type header to <tt>application/x-protobuf</tt> for protobufs encoding.
-Supply commit data in the PUT or POST body.
-<p>
-PUT and POST operations are equivalent here: Specified addresses without
-existing data will create new values. Specified addresses with existing data
-will create new versions, overwriting an existing version if all of { row,
-column:qualifer, timestamp } match that of the existing value.
-<p>
-See "Cell or Row Query (Multiple Values)" for encoding examples.
-<p>
-
-<a name="operation_delete">
-<h3>Row, Column, or Cell Delete</h3>
-</a>
-<p>
-<pre>
-DELETE /<table>/<row>
- ( / ( <column> ( : <qualifier> )?
- ( / <timestamp> )? )?
-</pre>
-<p>
-Deletes an entire row, a entire column family, or specific cell(s), depending
-on how specific the data address. If not successful, returns appropriate HTTP
-error status code. If successful, returns HTTP 200 status.
-<p>
-NOTE: <tt>DELETE /<table></tt> will not work.
-Use <tt>DELETE /<table>/schema</tt> instead.
-<p>
-
-<a name="operation_scanner_create">
-<h3>Scanner Creation</h3>
-</a>
-<p>
-<pre>
-PUT /<table>/scanner
-
-POST /<table>/scanner
-</pre>
-<p>
-Allocates a new table scanner.
-If not successful, returns appropriate HTTP error status code.
-If successful, returns HTTP 201 status (created) and the URI which should be
-used to address the scanner, e.g.
-<p>
-<blockquote><tt>/<table>/scanner/112876541342014107c0fa92</tt></blockquote>
-<p>
-Set Content-Type to <tt>text/xml</tt> if supplying an XML scanner specification.
-Set Content-Type to <tt>application/protobuf</tt> if supplying a protobufs
-encoded specification.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl -H "Content-Type: text/xml" -d '<Scanner batch="1"/>' http://localhost:8000/content/scanner<br>
-<br>
-HTTP/1.1 201 Created<br>
-Location: http://localhost:8000/content/scanner/12447063229213b1937<br>
-Content-Length: 0<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_scanner_next">
-<h3>Scanner Get Next</h3>
-</a>
-<p>
-<pre>
-GET /<table>/scanner/<scanner-id>
-</pre>
-<p>
-Returns the values of the next cells found by the scanner, up to the configured batch amount.
-Set Accept header to <tt>text/xml</tt> for XML encoding.
-Set Accept header to <tt>application/x-protobuf</tt> for protobufs encoding.
-Set Accept header to <tt>application/octet-stream</tt> for binary encoding.
-If not successful, returns appropriate HTTP error status code.
-If result is successful but the scanner is exhausted, returns HTTP 204 status (no content).
-Otherwise, returns HTTP 200 status and row and cell data in the response body.
-See examples from the "Cell or Row Query (Multiple Values)" section.
-<p>
-NOTE: The binary encoding option returns only one cell regardless of the
-batching parameter supplied during scanner creation. The row, column, and
-timestamp associated with the cell are transmitted as X-headers:
-<tt>X-Row</tt>, <tt>X-Column</tt>, and <tt>X-Timestamp</tt> respectively.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% curl -H "Content-Type: text/xml" http://localhost:8000/content/scanner/12447063229213b1937<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: text/xml<br>
-Content-Length: 589<br>
-<br>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><br>
-<CellSet><Row key="MDAyMDFjMTAwNjk4ZGNkYjU5MDQxNTVkZGQ3OGRlZTk="><Cell timesta<br>
-mp="1244701281234" column="Y29udGVudDpyYXc=">PCFET0NUWVBFIEhUTUwgUFVCTElDICItL<br>
-y9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT40MDQgTm90IEZvdW5<br>
-kPC90aXRsZT4KPC9oZWFkPjxib2R5Pgo8aDE+Tm90IEZvdW5kPC9oMT4KPHA+VGhlIHJlcXVlc3RlZ<br>
-CBVUkwgL3JvYm90cy50eHQgd2FzIG5vdCBmb3VuZCBvbiB0aGlzIHNlcnZlci48L3A+Cjxocj4KPGF<br>
-kZHJlc3M+QXBhY2hlLzIuMi4zIChSZWQgSGF0KSBTZXJ2ZXIgYXQgd3gubWduZXR3b3JrLmNvbSBQb<br>
-3J0IDgwPC9hZGRyZXNzPgo8L2JvZHk+PC9odG1sPgo=</Cell></Row></CellSet><br>
-<br>
-% curl -H "Content-Type: application/json" http://localhost:8000/content/scanner/12447063229213b1937<br>
-<br>
-HTTP/1.1 200 OK<br>
-Cache-Control: no-cache<br>
-Content-Type: application/json<br>
-Transfer-Encoding: chunked<br>
-<br>
-{"Row":{"@key":"MDAyMDFjMTAwNjk4ZGNkYjU5MDQxNTVkZGQ3OGRlZTk=","Cell":{"@timest<br>
-amp":"1244701281234","@column":"aW5mbzpjcmF3bGVyLWh0dHB8d3gubWduZXR3b3JrLmNvbX<br>
-w4MHxyb2JvdHMudHh0LTEyNDQ3MDEyODEyMzQ=","$":"eyJpcCI6IjE5OS4xOTMuMTAuMTAxIiwib<br>
-WltZXR5cGUiOiJ0ZXh0L2h0bWw7IGNoYXJzZXQ9aXNvLTg4NTktMSIsInZpYSI6Imh0dHA6Ly93eC5<br>
-tZ25ldHdvcmsuY29tOjgwL2pzL2N1cnJlbnRzaGFuZGxlci5qcyJ9"}}}<br>
-<br>
-% curl -H "Content-Type: application/x-protobuf" http://localhost:8000/content/scanner/12447063229213b1937<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 63<br>
-Cache-Control: no-cache<br>
-Content-Type: application/x-protobuf<br>
-<br>
-000000 0a 3d 0a 20 30 30 32 30 31 63 31 30 30 36 39 38<br>
-000010 64 63 64 62 35 39 30 34 31 35 35 64 64 64 37 38<br>
-000020 64 65 65 39 12 19 12 0b 69 6e 66 6f 3a 6c 65 6e<br>
-000030 67 74 68 18 d2 97 e9 ef 9c 24 22 03 32 39 30<br>
-<br>
-% curl -H "Content-Type: application/octet-stream" http://localhost:8000/content/scanner/12447063229213b1937<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 37<br>
-Cache-Control: no-cache<br>
-X-Column: dXJsOmh0dHB8d3gubWduZXR3b3JrLmNvbXw4MHxyb2JvdHMudHh0<br>
-X-Row: MDAyMDFjMTAwNjk4ZGNkYjU5MDQxNTVkZGQ3OGRlZTk=<br>
-X-Timestamp: 1244701281234<br>
-Content-Type: application/octet-stream<br>
-<br>
-000000 68 74 74 70 3a 2f 2f 77 78 2e 6d 67 6e 65 74 77<br>
-000010 6f 72 6b 2e 63 6f 6d 3a 38 30 2f 72 6f 62 6f 74<br>
-000020 73 2e 74 78 74<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_scanner_delete">
-<h3>Scanner Deletion</h3>
-</a>
-<p>
-<pre>
-DELETE /<table>/scanner/<scanner-id>
-</pre>
-<p>
-Deletes resources associated with the scanner. This is an optional action.
-Scanners will expire after some globally configurable interval has elapsed
-with no activity on the scanner. If not successful, returns appropriate HTTP
-error status code. If successful, returns HTTP status 200.
-<p>
-Examples:
-<p>
-<blockquote>
-<tt>
-% telnet localhost 8000<br>
-DELETE http://localhost:8000/content/scanner/12447063229213b1937 HTTP/1.0<br>
-<br>
-HTTP/1.1 200 OK<br>
-Content-Length: 0<br>
-</tt>
-</blockquote>
-<p>
-
-<a name="operation_stateless_scanner">
- <h3>Stateless Scanner</h3>
-</a>
-<pre>
- GET /<table>/<optional_row_prefix>*?<scan_parameters>
-</pre>
-<p align="justify">
- The current scanner API expects clients to restart scans if there is a REST server failure in the
- midst. The stateless does not store any state related to scan operation and all the parameters
- are specified as query parameters.
-<p>
-<p>
- The following are the scan parameters
- <ol>
- <li>startrow - The start row for the scan.</li>
- <li>endrow - The end row for the scan.</li>
- <li>columns - The columns to scan.</li>
- <li>starttime, endtime - To only retrieve columns within a specific range of version timestamps,
- both start and end time must be specified.</li>
- <li>maxversions - To limit the number of versions of each column to be returned.</li>
- <li>batchsize - To limit the maximum number of values returned for each call to next().</li>
- <li>limit - The number of rows to return in the scan operation.</li>
- </ol>
-<p>
-<p>
- More on start row, end row and limit parameters.
- <ol>
- <li>If start row, end row and limit not specified, then the whole table will be scanned.</li>
- <li>If start row and limit (say N) is specified, then the scan operation will return N rows from
- the start row specified.</li>
- <li>If only limit parameter is specified, then the scan operation will return N rows from the
- start of the table.</li>
- <li>If limit and end row are specified, then the scan operation will return N rows from start
- of table till the end row. If the end row is reached before N rows ( say M and M < N ),
- then M rows will be returned to the user.</li>
- <li>If start row, end row and limit (say N ) are specified and N < number of rows between
- start row and end row, then N rows from start row will be returned to the user. If N >
- (number of rows between start row and end row (say M), then M number of rows will be returned
- to the user.</li>
- </ol>
-<p>
-<p><b>Examples</b><p>
-<p>
-<blockquote>
-<pre>
-Lets say we have a table with name "ExampleScanner". On Hbase shell,
->> scan 'ExampleScanner'
-
-ROW COLUMN+CELL
-testrow1 column=a:1, timestamp=1389900769772, value=testvalue-a1
-testrow1 column=b:1, timestamp=1389900780536, value=testvalue-b1
-testrow2 column=a:1, timestamp=1389900823877, value=testvalue-a2
-testrow2 column=b:1, timestamp=1389900818233, value=testvalue-b2
-testrow3 column=a:1, timestamp=1389900847336, value=testvalue-a3
-testrow3 column=b:1, timestamp=1389900856845, value=testvalue-b3
-</pre>
-<ul>
-<li>
-<pre>
-<b>Scanning the entire table in json</b>
-
-curl -H "Accept: application/json" https://localhost:8080/ExampleScanner/*
-</pre>
-<p>
-<tt>
-{"Row":[{"key":"dGVzdHJvdzE=","Cell":[{"column":"YTox","timestamp":1389900769772,<br>
-"$":"dGVzdHZhbHVlLWEx"},{"column":"Yjox","timestamp":1389900780536,"$":"dGVzdHZhbHVlLWIx"}]},<br>
-{"key":"dGVzdHJvdzI=","Cell":[{"column":"YTox","timestamp":1389900823877,"$":"dGVzdHZhbHVlLWEy"}<br>
-{"column":"Yjox","timestamp":1389900818233,"$":"dGVzdHZhbHVlLWIy"}]},{"key":"dGVzdHJvdzM=",<br>
-"Cell":[{"column":"YTox","timestamp":1389900847336,"$":"dGVzdHZhbHVlLWEz"},{"column":"Yjox",<br>
-"timestamp":1389900856845,"$":"dGVzdHZhbHVlLWIz"}]}]}<br>
-</tt>
-<p>
-</li>
-<li>
-<pre>
-<b>Scanning the entire table in XML</b>
-
-curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*
-</pre>
-<p>
-<tt>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
-Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
-/Cell><Cell column="Yjox"timestamp="1389900780536">dGVzdHZhbHVlLWIx</Cell><<br>
-/Row><Row key="dGVzdHJvdzI="><Cell column="YTox" timestamp="1389900823877"><br>
-dGVzdHZhbHVlLWEy</Cell><Cell column="Yjox"timestamp="1389900818233">dGVzdHZhbHVlLWIy<<br>
-/Cell></Row><Row key="dGVzdHJvdzM="><Cell column="YTox" timestamp="1389900847336<br>
-">dGVzdHZhbHVlLWEz</Cell><Cell column="Yjox"timestamp="1389900856845"><br>
-dGVzdHZhbHVlLWIz</Cell></Row></CellSet><br>
-</tt>
-<p>
-</li>
-<li>
-<pre>
-<b>Scanning the entire table in binary</b>
-
-curl -H "Accept: application/protobuf" https://localhost:8080/ExampleScanner/*
-
-^@Ì
-B
-^Htestrow1^R^Z^R^Ca:1^XìÓªä¹("^Ltestvalue-a1^R^Z^R^Cb:1^Xø§«ä¹("^Ltestvalue-b1
-B
-^Htestrow2^R^Z^R^Ca:1^XÅúä¹("^Ltestvalue-a2^R^Z^R^Cb:1^X¹Îä¹("^Ltestvalue-b2
-B
-^Htestrow3^R^Z^R^Ca:1^X豯ä¹("^Ltestvalue-a3^R^Z^R^Cb:1^X<8d>ü¯ä¹("^Ltestvalue-b3
-</pre>
-</li>
-<li>
-<pre>
-<b>Scanning the first row of table</b>
-
-curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?limit=1
-</pre>
-<p>
-<tt>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
-Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772"><br>
-dGVzdHZhbHVlLWEx</Cell><Cell column="Yjox"timestamp="1389900780536"><br>
-dGVzdHZhbHVlLWIx</Cell></Row></CellSet><br>
-</tt>
-<p>
-</li>
-<li>
-<pre>
-<b>Scanning a given column of table</b>
-
-curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?columns=a:1
-</pre>
-<p>
-<tt>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
-Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
-/Cell></Row><Row key="dGVzdHJvdzI="><Cell column="YTox" timestamp=<br>
-"1389900823877">dGVzdHZhbHVlLWEy</Cell></Row><Row key="dGVzdHJvdzM="><<br>
-Cell column="YTox" timestamp="1389900847336">dGVzdHZhbHVlLWEz</Cell><<br>
-/Row></CellSet><br>
-</tt>
-<p>
-</li>
-<li>
-<pre>
-<b>Scanning more than one column of table</b>
-
-curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?columns=a:1,b:1
-</pre>
-<p>
-<tt>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
-Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772"><br>
-dGVzdHZhbHVlLWEx</Cell><Cell column="Yjox"timestamp="1389900780536"><br>
-dGVzdHZhbHVlLWIx</Cell></Row><Row key="dGVzdHJvdzI="><<br>
-Cell column="YTox" timestamp="1389900823877">dGVzdHZhbHVlLWEy</Cell><<br>
-Cell column="Yjox"timestamp="1389900818233">dGVzdHZhbHVlLWIy</Cell><<br>
-/Row><Row key="dGVzdHJvdzM="><Cell column="YTox" timestamp="1389900847336"><br>
-dGVzdHZhbHVlLWEz</Cell><Cell column="Yjox"timestamp="1389900856845"><br>
-dGVzdHZhbHVlLWIz</Cell></Row></CellSet><br>
-</tt>
-<p>
-</li>
-<li>
-<pre>
-<b>Scanning table with start row and limit</b>
-
-curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?startrow=testrow1&limit=2
-</pre>
-<p>
-<tt>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
-Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
-/Cell><Cell column="Yjox"timestamp="1389900780536">dGVzdHZhbHVlLWIx</Cell><<br>
-/Row><Row key="dGVzdHJvdzI="><Cell column="YTox" timestamp="1389900823877"><br>
-dGVzdHZhbHVlLWEy</Cell><Cell column="Yjox"<br>
-timestamp="1389900818233">dGVzdHZhbHVlLWIy</Cell></Row></CellSet><br>
-</tt>
-<p>
-</li>
-<li>
-<pre>
-<b>Scanning with start and end time</b>
-
-curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/*?starttime=1389900769772&endtime=1389900800000
-</pre>
-<p>
-<tt>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
-Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
-/Cell><Cell column="Yjox"timestamp="1389900780536">dGVzdHZhbHVlLWIx</Cell><<br>
-/Row></CellSet><br>
-</tt>
-<p>
-</li>
-<li>
-<pre>
-<b>Scanning with row prefix</b>
-
-curl -H "Content-Type: text/xml" https://localhost:8080/ExampleScanner/test*
-</pre>
-<p>
-<tt>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><<br>
-Row key="dGVzdHJvdzE="><Cell column="YTox" timestamp="1389900769772">dGVzdHZhbHVlLWEx<<br>
-/Cell><Cell column="Yjox"timestamp="1389900780536">dGVzdHZhbHVlLWIx</Cell><<br>
-/Row><Row key="dGVzdHJvdzI="><Cell column="YTox" timestamp="1389900823877"><br>
-dGVzdHZhbHVlLWEy</Cell><Cell column="Yjox"timestamp="1389900818233"><br>
-dGVzdHZhbHVlLWIy</Cell></Row><Row key="dGVzdHJvdzM="><<br>
-Cell column="YTox" timestamp="1389900847336">dGVzdHZhbHVlLWEz</Cell><<br>
-Cell column="Yjox"timestamp="1389900856845">dGVzdHZhbHVlLWIz</Cell><<br>
-/Row></CellSet><br>
-</tt>
-<p>
-</li>
-</ul>
-</blockquote>
-</p>
-
-<p>
-<a name="xmlschema">
-<h2>XML Schema</h2>
-</a>
-<p>
-<pre>
-<schema targetNamespace="StargateSchema" elementFormDefault="qualified"
-xmlns="http://www.w3.org/2001/XMLSchema" xmlns:tns="StargateSchema">
-
- <element name="CellSet" type="tns:CellSet"></element>
-
- <complexType name="CellSet">
- <sequence>
- <element name="row" type="tns:Row" maxOccurs="unbounded" minOccurs="1"></element>
- </sequence>
- </complexType>
-
- <complexType name="Row">
- <sequence>
- <element name="key" type="base64Binary"></element>
- <element name="cell" type="tns:Cell" maxOccurs="unbounded" minOccurs="1"></element>
- </sequence>
- </complexType>
-
- <complexType name="Cell">
- <sequence>
- <element name="value" maxOccurs="1" minOccurs="1"><simpleType><restriction base="base64Binary"></restriction></simpleType></element>
- </sequence>
- <attribute name="column" type="base64Binary" />
- <attribute name="timestamp" type="int" />
- </complexType>
-
- <element name="Version" type="tns:Version"></element>
-
- <complexType name="Version">
- <attribute name="Stargate" type="string"></attribute>
- <attribute name="JVM" type="string"></attribute>
- <attribute name="OS" type="string"></attribute>
- <attribute name="Server" type="string"></attribute>
- <attribute name="Jersey" type="string"></attribute>
- </complexType>
-
-
- <element name="TableList" type="tns:TableList"></element>
-
- <complexType name="TableList">
- <sequence>
- <element name="table" type="tns:Table" maxOccurs="unbounded" minOccurs="1"></element>
- </sequence>
- </complexType>
-
- <complexType name="Table">
- <sequence>
- <element name="name" type="string"></element>
- </sequence>
- </complexType>
-
- <element name="TableInfo" type="tns:TableInfo"></element>
-
- <complexType name="TableInfo">
- <sequence>
- <element name="region" type="tns:TableRegion" maxOccurs="unbounded" minOccurs="1"></element>
- </sequence>
- <attribute name="name" type="string"></attribute>
- </complexType>
-
- <complexType name="TableRegion">
- <attribute name="name" type="string"></attribute>
- <attribute name="id" type="int"></attribute>
- <attribute name="startKey" type="base64Binary"></attribute>
- <attribute name="endKey" type="base64Binary"></attribute>
- <attribute name="location" type="string"></attribute>
- </complexType>
-
- <element name="TableSchema" type="tns:TableSchema"></element>
-
- <complexType name="TableSchema">
- <sequence>
- <element name="column" type="tns:ColumnSchema" maxOccurs="unbounded" minOccurs="1"></element>
- </sequence>
- <attribute name="name" type="string"></attribute>
- <anyAttribute></anyAttribute>
- </complexType>
-
- <complexType name="ColumnSchema">
- <attribute name="name" type="string"></attribute>
- <anyAttribute></anyAttribute>
- </complexType>
-
- <element name="Scanner" type="tns:Scanner"></element>
-
- <complexType name="Scanner">
- <attribute name="startRow" type="base64Binary"></attribute>
- <attribute name="endRow" type="base64Binary"></attribute>
- <attribute name="columns" type="base64Binary"></attribute>
- <attribute name="batch" type="int"></attribute>
- <attribute name="startTime" type="int"></attribute>
- <attribute name="endTime" type="int"></attribute>
- </complexType>
-
- <element name="StorageClusterVersion"
- type="tns:StorageClusterVersion">
- </element>
-
- <complexType name="StorageClusterVersion">
- <attribute name="version" type="string"></attribute>
- </complexType>
-
- <element name="StorageClusterStatus"
- type="tns:StorageClusterStatus">
- </element>
-
- <complexType name="StorageClusterStatus">
- <sequence>
- <element name="liveNode" type="tns:Node"
- maxOccurs="unbounded" minOccurs="0">
- </element>
- <element name="deadNode" type="string" maxOccurs="unbounded"
- minOccurs="0">
- </element>
- </sequence>
- <attribute name="regions" type="int"></attribute>
- <attribute name="requests" type="int"></attribute>
- <attribute name="averageLoad" type="float"></attribute>
- </complexType>
-
- <complexType name="Node">
- <sequence>
- <element name="region" type="tns:Region" maxOccurs="unbounded" minOccurs="0"></element>
- </sequence>
- <attribute name="name" type="string"></attribute>
- <attribute name="startCode" type="int"></attribute>
- <attribute name="requests" type="int"></attribute>
- <attribute name="heapSizeMB" type="int"></attribute>
- <attribute name="maxHeapSizeMB" type="int"></attribute>
- </complexType>
-
- <complexType name="Region">
- <attribute name="name" type="base64Binary"></attribute>
- <attribute name="stores" type="int"></attribute>
- <attribute name="storefiles" type="int"></attribute>
- <attribute name="storefileSizeMB" type="int"></attribute>
- <attribute name="memstoreSizeMB" type="int"></attribute>
- <attribute name="storefileIndexSizeMB" type="int"></attribute>
- </complexType>
-</schema>
-</pre>
-
-<p>
-<a name="pbufschema">
-<h2>Protobufs Schema</h2>
-</a>
-<p>
-<pre>
-message Version {
- optional string stargateVersion = 1;
- optional string jvmVersion = 2;
- optional string osVersion = 3;
- optional string serverVersion = 4;
- optional string jerseyVersion = 5;
-}
-
-message StorageClusterStatus {
- message Region {
- required bytes name = 1;
- optional int32 stores = 2;
- optional int32 storefiles = 3;
- optional int32 storefileSizeMB = 4;
- optional int32 memstoreSizeMB = 5;
- optional int32 storefileIndexSizeMB = 6;
- }
- message Node {
- required string name = 1; // name:port
- optional int64 startCode = 2;
- optional int32 requests = 3;
- optional int32 heapSizeMB = 4;
- optional int32 maxHeapSizeMB = 5;
- repeated Region regions = 6;
- }
- // node status
- repeated Node liveNodes = 1;
- repeated string deadNodes = 2;
- // summary statistics
- optional int32 regions = 3;
- optional int32 requests = 4;
- optional double averageLoad = 5;
-}
-
-message TableList {
- repeated string name = 1;
-}
-
-message TableInfo {
- required string name = 1;
- message Region {
- required string name = 1;
- optional bytes startKey = 2;
- optional bytes endKey = 3;
- optional int64 id = 4;
- optional string location = 5;
- }
- repeated Region regions = 2;
-}
-
-message TableSchema {
- optional string name = 1;
- message Attribute {
- required string name = 1;
- required string value = 2;
- }
- repeated Attribute attrs = 2;
- repeated ColumnSchema columns = 3;
- // optional helpful encodings of commonly used attributes
- optional bool inMemory = 4;
- optional bool readOnly = 5;
-}
-
-message ColumnSchema {
- optional string name = 1;
- message Attribute {
- required string name = 1;
- required string value = 2;
- }
- repeated Attribute attrs = 2;
- // optional helpful encodings of commonly used attributes
- optional int32 ttl = 3;
- optional int32 maxVersions = 4;
- optional string compression = 5;
-}
-
-message Cell {
- optional bytes row = 1; // unused if Cell is in a CellSet
- optional bytes column = 2;
- optional int64 timestamp = 3;
- optional bytes data = 4;
-}
-
-message CellSet {
- message Row {
- required bytes key = 1;
- repeated Cell values = 2;
- }
- repeated Row rows = 1;
-}
-
-message Scanner {
- optional bytes startRow = 1;
- optional bytes endRow = 2;
- repeated bytes columns = 3;
- optional int32 batch = 4;
- optional int64 startTime = 5;
- optional int64 endTime = 6;
-}
-</pre>
-
-</body>
-</html>