You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kudu.apache.org by al...@apache.org on 2022/06/16 15:22:19 UTC

[kudu] branch master updated (9091f31cf -> 6909ee4f8)

This is an automated email from the ASF dual-hosted git repository.

alexey pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git


    from 9091f31cf KUDU-2671 refactor PartitionSchema::CreatePartitions()
     new 295b4903b KUDU-2671 more robust convention on specifying range bounds
     new 6909ee4f8 KUDU-2671 update partition schema in catalog when adding range

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 src/kudu/client/client.cc                          |  14 +-
 .../integration-tests/table_locations-itest.cc     |  72 +++----
 src/kudu/master/catalog_manager.cc                 |  69 ++++---
 src/kudu/master/catalog_manager.h                  |  24 ++-
 src/kudu/master/master-test.cc                     | 221 ++++++++++++++++++---
 5 files changed, 282 insertions(+), 118 deletions(-)


[kudu] 02/02: KUDU-2671 update partition schema in catalog when adding range

Posted by al...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit 6909ee4f800da192b72e59680916e5004527b6db
Author: Alexey Serbin <al...@apache.org>
AuthorDate: Mon Jun 13 15:10:13 2022 -0700

    KUDU-2671 update partition schema in catalog when adding range
    
    When adding a range with custom hash schema to a table, it's necessary
    to update the partition schema information stored in the system catalog
    correspondingly.  That was missing in one of the previous patches and
    this patch addresses the issue.
    
    This patch also adds a test scenario to spot regressions, if any.  The
    scenario was failing before the update in CatalogManager introduced
    in this patch.  I also addressed nits pointed to by the TidyBot.
    
    This is a follow-up to 250eb90bc0e1f4f472f44de8a23ce213595d5ee7.
    
    Change-Id: I869458fb8bcb06801b54f2b4869e7826322563e0
    Reviewed-on: http://gerrit.cloudera.org:8080/18615
    Tested-by: Kudu Jenkins
    Reviewed-by: Mahesh Reddy <mr...@cloudera.com>
    Reviewed-by: Attila Bukor <ab...@apache.org>
---
 src/kudu/master/catalog_manager.cc |  48 +++++++++----
 src/kudu/master/catalog_manager.h  |  24 ++++---
 src/kudu/master/master-test.cc     | 137 ++++++++++++++++++++++++++++++++++++-
 3 files changed, 181 insertions(+), 28 deletions(-)

diff --git a/src/kudu/master/catalog_manager.cc b/src/kudu/master/catalog_manager.cc
index 726499d56..d23d01e64 100644
--- a/src/kudu/master/catalog_manager.cc
+++ b/src/kudu/master/catalog_manager.cc
@@ -2518,10 +2518,11 @@ Status CatalogManager::DeleteTable(const DeleteTableRequestPB& req,
   return Status::OK();
 }
 
-Status CatalogManager::ApplyAlterSchemaSteps(const SysTablesEntryPB& current_pb,
-                                             vector<AlterTableRequestPB::Step> steps,
-                                             Schema* new_schema,
-                                             ColumnId* next_col_id) {
+Status CatalogManager::ApplyAlterSchemaSteps(
+    const SysTablesEntryPB& current_pb,
+    const vector<AlterTableRequestPB::Step>& steps,
+    Schema* new_schema,
+    ColumnId* next_col_id) {
   const SchemaPB& current_schema_pb = current_pb.schema();
   Schema cur_schema;
   RETURN_NOT_OK(SchemaFromPB(current_schema_pb, &cur_schema));
@@ -2601,20 +2602,20 @@ Status CatalogManager::ApplyAlterSchemaSteps(const SysTablesEntryPB& current_pb,
 }
 
 Status CatalogManager::ApplyAlterPartitioningSteps(
-    const TableMetadataLock& l,
     const scoped_refptr<TableInfo>& table,
     const Schema& client_schema,
-    vector<AlterTableRequestPB::Step> steps,
+    const vector<AlterTableRequestPB::Step>& steps,
+    TableMetadataLock* l,
     vector<scoped_refptr<TabletInfo>>* tablets_to_add,
     vector<scoped_refptr<TabletInfo>>* tablets_to_drop) {
 
   // Get the table's schema as it's known to the catalog manager.
   Schema schema;
-  RETURN_NOT_OK(SchemaFromPB(l.data().pb.schema(), &schema));
+  RETURN_NOT_OK(SchemaFromPB(l->data().pb.schema(), &schema));
   // Build current PartitionSchema for the table.
   PartitionSchema partition_schema;
   RETURN_NOT_OK(PartitionSchema::FromPB(
-      l.data().pb.partition_schema(), schema, &partition_schema));
+      l->data().pb.partition_schema(), schema, &partition_schema));
   TableInfo::TabletInfoMap existing_tablets = table->tablet_map();
   TableInfo::TabletInfoMap new_tablets;
   auto abort_mutations = MakeScopedCleanup([&new_tablets]() {
@@ -2627,11 +2628,11 @@ Status CatalogManager::ApplyAlterPartitioningSteps(
   for (const auto& step : steps) {
     CHECK(step.type() == AlterTableRequestPB::ADD_RANGE_PARTITION ||
           step.type() == AlterTableRequestPB::DROP_RANGE_PARTITION);
-    const auto& range_bouds =
+    const auto& range_bounds =
         step.type() == AlterTableRequestPB::ADD_RANGE_PARTITION
         ? step.add_range_partition().range_bounds()
         : step.drop_range_partition().range_bounds();
-    RowOperationsPBDecoder decoder(&range_bouds, &client_schema, &schema, nullptr);
+    RowOperationsPBDecoder decoder(&range_bounds, &client_schema, &schema, nullptr);
     vector<DecodedRowOperation> ops;
     RETURN_NOT_OK(decoder.DecodeOperations<DecoderMode::SPLIT_ROWS>(&ops));
 
@@ -2675,6 +2676,23 @@ Status CatalogManager::ApplyAlterPartitioningSteps(
       }
       RETURN_NOT_OK(partition_schema.CreatePartitionsForRange(
           range_bound, hash_schema, schema, &partitions));
+
+      // Add information on the new range with custom hash schema into the
+      // PartitionSchema for the table stored in the system catalog.
+      auto* p = l->mutable_data()->pb.mutable_partition_schema();
+      auto* range = p->add_custom_hash_schema_ranges();
+      RowOperationsPBEncoder encoder(range->mutable_range_bounds());
+      encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, *ops[0].split_row);
+      encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, *ops[1].split_row);
+      for (const auto& hash_dimension : hash_schema) {
+        auto* hash_dimension_pb = range->add_hash_schema();
+        hash_dimension_pb->set_num_buckets(hash_dimension.num_buckets);
+        hash_dimension_pb->set_seed(hash_dimension.seed);
+        auto* columns = hash_dimension_pb->add_columns();
+        for (const auto& column_id : hash_dimension.column_ids) {
+          columns->set_id(column_id);
+        }
+      }
     } else {
       RETURN_NOT_OK(partition_schema.CreatePartitions(
           {}, { range_bound }, schema, &partitions));
@@ -3202,11 +3220,11 @@ Status CatalogManager::AlterTable(const AlterTableRequestPB& req,
     TRACE("Apply alter partitioning");
     Schema client_schema;
     RETURN_NOT_OK(SetupError(SchemaFromPB(req.schema(), &client_schema),
-          resp, MasterErrorPB::UNKNOWN_ERROR));
-    RETURN_NOT_OK(SetupError(
-          ApplyAlterPartitioningSteps(l, table, client_schema, alter_partitioning_steps,
-            &tablets_to_add, &tablets_to_drop),
-          resp, MasterErrorPB::UNKNOWN_ERROR));
+        resp, MasterErrorPB::UNKNOWN_ERROR));
+    RETURN_NOT_OK(SetupError(ApplyAlterPartitioningSteps(
+        table, client_schema, alter_partitioning_steps, &l,
+        &tablets_to_add, &tablets_to_drop),
+                             resp, MasterErrorPB::UNKNOWN_ERROR));
   }
 
   // 8. Alter table's replication factor.
diff --git a/src/kudu/master/catalog_manager.h b/src/kudu/master/catalog_manager.h
index b23b5b727..f073f3347 100644
--- a/src/kudu/master/catalog_manager.h
+++ b/src/kudu/master/catalog_manager.h
@@ -1070,17 +1070,19 @@ class CatalogManager : public tserver::TabletReplicaLookupIf {
   // container must not be empty.
   Status DeleteTskEntries(const std::set<std::string>& entry_ids);
 
-  Status ApplyAlterSchemaSteps(const SysTablesEntryPB& current_pb,
-                               std::vector<AlterTableRequestPB::Step> steps,
-                               Schema* new_schema,
-                               ColumnId* next_col_id);
-
-  Status ApplyAlterPartitioningSteps(const TableMetadataLock& l,
-                                     const scoped_refptr<TableInfo>& table,
-                                     const Schema& client_schema,
-                                     std::vector<AlterTableRequestPB::Step> steps,
-                                     std::vector<scoped_refptr<TabletInfo>>* tablets_to_add,
-                                     std::vector<scoped_refptr<TabletInfo>>* tablets_to_drop);
+  Status ApplyAlterSchemaSteps(
+      const SysTablesEntryPB& current_pb,
+      const std::vector<AlterTableRequestPB::Step>& steps,
+      Schema* new_schema,
+      ColumnId* next_col_id);
+
+  Status ApplyAlterPartitioningSteps(
+      const scoped_refptr<TableInfo>& table,
+      const Schema& client_schema,
+      const std::vector<AlterTableRequestPB::Step>& steps,
+      TableMetadataLock* l,
+      std::vector<scoped_refptr<TabletInfo>>* tablets_to_add,
+      std::vector<scoped_refptr<TabletInfo>>* tablets_to_drop);
 
   // Task that takes care of the tablet assignments/creations.
   // Loops through the "not created" tablets and sends a CreateTablet() request.
diff --git a/src/kudu/master/master-test.cc b/src/kudu/master/master-test.cc
index 98ab68973..7196f96bc 100644
--- a/src/kudu/master/master-test.cc
+++ b/src/kudu/master/master-test.cc
@@ -43,6 +43,7 @@
 
 #include "kudu/common/common.pb.h"
 #include "kudu/common/partial_row.h"
+#include "kudu/common/partition.h"
 #include "kudu/common/row_operations.h"
 #include "kudu/common/row_operations.pb.h"
 #include "kudu/common/schema.h"
@@ -982,7 +983,7 @@ TEST_P(AlterTableWithRangeSpecificHashSchema, TestAlterTableWithDifferentHashDim
   ASSERT_EQ(2, tables.front()->num_tablets());
 
   // Submit the alter table request
-  proxy_->AlterTable(req, &resp, &controller);
+  ASSERT_OK(proxy_->AlterTable(req, &resp, &controller));
   if (has_different_dimensions_count) {
     ASSERT_TRUE(resp.has_error());
     ASSERT_STR_CONTAINS(resp.error().status().DebugString(),
@@ -997,10 +998,142 @@ TEST_P(AlterTableWithRangeSpecificHashSchema, TestAlterTableWithDifferentHashDim
     ASSERT_EQ(5, tables.front()->num_tablets());
   }
 }
-
 INSTANTIATE_TEST_SUITE_P(AlterTableWithCustomHashSchema,
                          AlterTableWithRangeSpecificHashSchema, ::testing::Bool());
 
+TEST_F(MasterTest, AlterTableAddRangeWithSpecificHashSchema) {
+  constexpr const char* const kTableName = "alter_table_custom_hash_schema";
+  constexpr const char* const kCol0 = "c_int32";
+  constexpr const char* const kCol1 = "c_int64";
+  const Schema kTableSchema({ColumnSchema(kCol0, INT32),
+                             ColumnSchema(kCol1, INT64)}, 1);
+  FLAGS_enable_per_range_hash_schemas = true;
+  FLAGS_default_num_replicas = 1;
+
+  // Create a table with one range partition based in the table-wide hash schema.
+  CreateTableResponsePB create_table_resp;
+  {
+    KuduPartialRow lower(&kTableSchema);
+    ASSERT_OK(lower.SetInt32(kCol0, 0));
+    KuduPartialRow upper(&kTableSchema);
+    ASSERT_OK(upper.SetInt32(kCol0, 100));
+    ASSERT_OK(CreateTable(
+        kTableName, kTableSchema, none, none, none, {}, {{lower, upper}},
+        {}, {{{kCol0}, 2, 0}}, &create_table_resp));
+  }
+
+  const auto& table_id = create_table_resp.table_id();
+  const HashSchema custom_hash_schema{{{kCol0}, 5, 1}};
+
+  // Alter the table, adding a new range with custom hash schema.
+  {
+    AlterTableRequestPB req;
+    AlterTableResponsePB resp;
+    req.mutable_table()->set_table_name(kTableName);
+    req.mutable_table()->set_table_id(table_id);
+
+    // Add the required information on the table's schema:
+    // key and non-null columns must be present in the request.
+    {
+      ColumnSchemaPB* col0 = req.mutable_schema()->add_columns();
+      col0->set_name(kCol0);
+      col0->set_type(INT32);
+      col0->set_is_key(true);
+
+      ColumnSchemaPB* col1 = req.mutable_schema()->add_columns();
+      col1->set_name(kCol1);
+      col1->set_type(INT64);
+    }
+
+    AlterTableRequestPB::Step* step = req.add_alter_schema_steps();
+    step->set_type(AlterTableRequestPB::ADD_RANGE_PARTITION);
+    KuduPartialRow lower(&kTableSchema);
+    ASSERT_OK(lower.SetInt32(kCol0, 100));
+    KuduPartialRow upper(&kTableSchema);
+    ASSERT_OK(upper.SetInt32(kCol0, 200));
+    RowOperationsPBEncoder enc(
+        step->mutable_add_range_partition()->mutable_range_bounds());
+    enc.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
+    enc.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
+    for (const auto& hash_dimension: custom_hash_schema) {
+      auto* hash_dimension_pb =
+          step->mutable_add_range_partition()->add_custom_hash_schema();
+      for (const auto& col_name: hash_dimension.columns) {
+        hash_dimension_pb->add_columns()->set_name(col_name);
+      }
+      hash_dimension_pb->set_num_buckets(hash_dimension.num_buckets);
+      hash_dimension_pb->set_seed(hash_dimension.seed);
+    }
+
+    // Check the number of tablets in the table before ALTER TABLE.
+    {
+      CatalogManager::ScopedLeaderSharedLock l(master_->catalog_manager());
+      std::vector<scoped_refptr<TableInfo>> tables;
+      master_->catalog_manager()->GetAllTables(&tables);
+      ASSERT_EQ(1, tables.size());
+      // 2 tablets (because of 2 hash buckets) for already existing range.
+      ASSERT_EQ(2, tables.front()->num_tablets());
+    }
+
+    RpcController ctl;
+    ASSERT_OK(proxy_->AlterTable(req, &resp, &ctl));
+    ASSERT_FALSE(resp.has_error())
+        << StatusFromPB(resp.error().status()).ToString();
+
+    // Check the number of tablets in the table after ALTER TABLE.
+    {
+      CatalogManager::ScopedLeaderSharedLock l(master_->catalog_manager());
+      std::vector<scoped_refptr<TableInfo>> tables;
+      master_->catalog_manager()->GetAllTables(&tables);
+      ASSERT_EQ(1, tables.size());
+      // Extra 5 tablets (because of 5 hash buckets) for newly added range.
+      ASSERT_EQ(7, tables.front()->num_tablets());
+    }
+  }
+
+  // Now verify the table's schema: fetch the information on the altered
+  // table and make sure the schema contains information on the newly added
+  // range partition with the custom hash schema.
+  {
+    GetTableSchemaRequestPB req;
+    req.mutable_table()->set_table_name(kTableName);
+
+    RpcController ctl;
+    GetTableSchemaResponsePB resp;
+    ASSERT_OK(proxy_->GetTableSchema(req, &resp, &ctl));
+    ASSERT_FALSE(resp.has_error())
+        << StatusFromPB(resp.error().status()).ToString();
+
+    Schema received_schema;
+    ASSERT_TRUE(resp.has_schema());
+    ASSERT_OK(SchemaFromPB(resp.schema(), &received_schema));
+    ASSERT_TRUE(kTableSchema == received_schema) << Substitute(
+        "$0 not equal to $1", kTableSchema.ToString(), received_schema.ToString());
+
+    ASSERT_TRUE(resp.has_table_id());
+    ASSERT_EQ(table_id, resp.table_id());
+    ASSERT_TRUE(resp.has_table_name());
+    ASSERT_EQ(kTableName, resp.table_name());
+
+    ASSERT_TRUE(resp.has_partition_schema());
+    PartitionSchema ps;
+    ASSERT_OK(PartitionSchema::FromPB(
+        resp.partition_schema(), received_schema, &ps));
+    ASSERT_TRUE(ps.HasCustomHashSchemas());
+
+    const auto& table_wide_hash_schema = ps.hash_schema();
+    ASSERT_EQ(1, table_wide_hash_schema.size());
+    ASSERT_EQ(2, table_wide_hash_schema.front().num_buckets);
+
+    const auto& ranges_with_hash_schemas = ps.ranges_with_hash_schemas();
+    ASSERT_EQ(ranges_with_hash_schemas.size(), 1);
+    const auto& custom_hash_schema = ranges_with_hash_schemas.front().hash_schema;
+    ASSERT_EQ(1, custom_hash_schema.size());
+    ASSERT_EQ(5, custom_hash_schema.front().num_buckets);
+    ASSERT_EQ(1, custom_hash_schema.front().seed);
+  }
+}
+
 TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
   constexpr const char* const kTableName = "testtb";
   const Schema kTableSchema({ ColumnSchema("key", INT32), ColumnSchema("val", INT32) }, 1);


[kudu] 01/02: KUDU-2671 more robust convention on specifying range bounds

Posted by al...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit 295b4903bc69fabb3cb36f618022d465c91954c7
Author: Alexey Serbin <al...@apache.org>
AuthorDate: Fri Jun 3 14:29:58 2022 -0700

    KUDU-2671 more robust convention on specifying range bounds
    
    This patch updates the code of the catalog manager to adhere to a more
    robust convention on specifying the information for the range partition
    boundaries when creating a table with custom hash schemas per range.
    
    Prior to this patch, the catalog manager required both the
    CreateTableRequestPB::split_rows_range_bounds and the
    CreateTableRequestPB::partition_schema::custom_hash_schema_ranges fields
    to have the same number of elements, assuming the former had the ranges
    exactly corresponding to the latter, where the latter would also had
    information on hash schema for each range correspondingly.  In addition
    to duplicating the information unnecessarily, that approach was also
    a bit brittle from the standpoint of keeping good API practices.
    
    This patch updates the code to use a new convention: if there is at
    least one range partition with custom hash schema in CreateTable RPC,
    all the information on range boundaries and hash schemas should be
    presented only via one field:
    CreateTableRequestPB::partition_schema::custom_hash_schema_ranges.
    That's better than the previous convention because:
      * it's more robust as explained above
      * it naturally follows the restriction of not allowing the split
        rows along with range partitions with custom hash schemas
    
    Also, I updated already existing tests and added extra test scenarios
    to cover the updated functionality.
    
    Change-Id: I14073e72178e6bb85bae719ad377c5bb05f8dd55
    Reviewed-on: http://gerrit.cloudera.org:8080/18590
    Tested-by: Alexey Serbin <al...@apache.org>
    Reviewed-by: Mahesh Reddy <mr...@cloudera.com>
    Reviewed-by: Attila Bukor <ab...@apache.org>
---
 src/kudu/client/client.cc                          | 14 ++--
 .../integration-tests/table_locations-itest.cc     | 72 +++++++++----------
 src/kudu/master/catalog_manager.cc                 | 21 +++---
 src/kudu/master/master-test.cc                     | 84 ++++++++++++++--------
 4 files changed, 101 insertions(+), 90 deletions(-)

diff --git a/src/kudu/client/client.cc b/src/kudu/client/client.cc
index 61349527a..7d11315f4 100644
--- a/src/kudu/client/client.cc
+++ b/src/kudu/client/client.cc
@@ -971,24 +971,26 @@ Status KuduTableCreator::Create() {
       return Status::InvalidArgument("range bounds must not be null");
     }
 
-    RowOperationsPB_Type lower_bound_type =
+    const RowOperationsPB_Type lower_bound_type =
         range->lower_bound_type_ == KuduTableCreator::INCLUSIVE_BOUND
         ? RowOperationsPB::RANGE_LOWER_BOUND
         : RowOperationsPB::EXCLUSIVE_RANGE_LOWER_BOUND;
 
-    RowOperationsPB_Type upper_bound_type =
+    const RowOperationsPB_Type upper_bound_type =
         range->upper_bound_type_ == KuduTableCreator::EXCLUSIVE_BOUND
         ? RowOperationsPB::RANGE_UPPER_BOUND
         : RowOperationsPB::INCLUSIVE_RANGE_UPPER_BOUND;
 
-    splits_encoder.Add(lower_bound_type, *range->lower_bound_);
-    splits_encoder.Add(upper_bound_type, *range->upper_bound_);
-
-    if (has_range_with_custom_hash_schema) {
+    if (!has_range_with_custom_hash_schema) {
+      splits_encoder.Add(lower_bound_type, *range->lower_bound_);
+      splits_encoder.Add(upper_bound_type, *range->upper_bound_);
+    } else {
       auto* range_pb = partition_schema->add_custom_hash_schema_ranges();
       RowOperationsPBEncoder encoder(range_pb->mutable_range_bounds());
       encoder.Add(lower_bound_type, *range->lower_bound_);
       encoder.Add(upper_bound_type, *range->upper_bound_);
+      // Now, after adding the information range bounds, add the information
+      // on hash schema for the range.
       if (range->is_table_wide_hash_schema_) {
         // With the presence of a range with custom hash schema when the
         // table-wide hash schema is used for this particular range, also add an
diff --git a/src/kudu/integration-tests/table_locations-itest.cc b/src/kudu/integration-tests/table_locations-itest.cc
index a274cdeb7..babf660cf 100644
--- a/src/kudu/integration-tests/table_locations-itest.cc
+++ b/src/kudu/integration-tests/table_locations-itest.cc
@@ -162,12 +162,18 @@ class TableLocationsTest : public KuduTest {
   };
   typedef vector<HashDimension> HashSchema;
 
+  struct RangeWithHashSchema {
+    KuduPartialRow lower;
+    KuduPartialRow upper;
+    HashSchema hash_schema;
+  };
+
   Status CreateTable(
       const string& table_name,
       const Schema& schema,
       const vector<KuduPartialRow>& split_rows = {},
       const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds = {},
-      const vector<HashSchema>& range_hash_schemas = {},
+      const vector<RangeWithHashSchema>& ranges_with_hash_schemas = {},
       const HashSchema& table_hash_schema = {});
 
   void CreateTable(const string& table_name, int num_splits);
@@ -185,14 +191,8 @@ Status TableLocationsTest::CreateTable(
     const Schema& schema,
     const vector<KuduPartialRow>& split_rows,
     const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds,
-    const vector<HashSchema>& range_hash_schemas,
+    const vector<RangeWithHashSchema>& ranges_with_hash_schemas,
     const HashSchema& table_hash_schema) {
-
-  if (!range_hash_schemas.empty() && range_hash_schemas.size() != bounds.size()) {
-    return Status::InvalidArgument(
-        "'bounds' and 'range_hash_schemas' must be of the same size");
-  }
-
   CreateTableRequestPB req;
   req.set_name(table_name);
   RETURN_NOT_OK(SchemaToPB(schema, req.mutable_schema()));
@@ -206,14 +206,12 @@ Status TableLocationsTest::CreateTable(
   }
 
   auto* ps_pb = req.mutable_partition_schema();
-  for (size_t i = 0; i < range_hash_schemas.size(); ++i) {
-    const auto& bound = bounds[i];
-    const auto& hash_schema = range_hash_schemas[i];
+  for (const auto& range_and_hash_schema : ranges_with_hash_schemas) {
     auto* range = ps_pb->add_custom_hash_schema_ranges();
     RowOperationsPBEncoder encoder(range->mutable_range_bounds());
-    encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, bound.first);
-    encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, bound.second);
-    for (const auto& hash_dimension : hash_schema) {
+    encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, range_and_hash_schema.lower);
+    encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, range_and_hash_schema.upper);
+    for (const auto& hash_dimension : range_and_hash_schema.hash_schema) {
       auto* hash_dimension_pb = range->add_hash_schema();
       for (const string& col_name : hash_dimension.columns) {
         hash_dimension_pb->add_columns()->set_name(col_name);
@@ -492,23 +490,18 @@ TEST_F(TableLocationsTest, RangeSpecificHashingSameDimensions) {
   ASSERT_OK(bounds[4].first.SetStringNoCopy(0, "g"));
   ASSERT_OK(bounds[4].second.SetStringNoCopy(0, "h"));
 
-  vector<HashSchema> range_hash_schemas;
-  HashSchema hash_schema_5 = { { { "str_0", "int_1" }, 5, 0 } };
-  range_hash_schemas.emplace_back(hash_schema_5);
-  HashSchema hash_schema_4 = { { { "str_0" }, 4, 1 } };
-  range_hash_schemas.emplace_back(hash_schema_4);
-  HashSchema hash_schema_3 = { { { "int_1" }, 3, 2 } };
-  range_hash_schemas.emplace_back(hash_schema_3);
-  HashSchema hash_schema_6 = { { { "str_2", "str_0" }, 6, 3 } };
-  range_hash_schemas.emplace_back(hash_schema_6);
-
-  // Use 2 bucket hash schema as the table-wide one.
-  HashSchema table_hash_schema_2 = { { { "str_2" }, 2, 4 } };
-  // Apply table-wide hash schema applied to this range.
-  range_hash_schemas.emplace_back(table_hash_schema_2);
-
-  ASSERT_OK(CreateTable(
-      table_name, schema, {}, bounds, range_hash_schemas, table_hash_schema_2));
+  const HashSchema table_wide_hash_schema{ { { "str_2" }, 2, 4 } };
+
+  const vector<RangeWithHashSchema> ranges_with_hash_schemas{
+    { bounds[0].first, bounds[0].second, { { { "str_0", "int_1" }, 5, 0 } } },
+    { bounds[1].first, bounds[1].second, { { { "str_0" }, 4, 1 } } },
+    { bounds[2].first, bounds[2].second, { { { "int_1" }, 3, 2 } } },
+    { bounds[3].first, bounds[3].second, { { { "str_2", "str_0" }, 6, 3 } } },
+    { bounds[4].first, bounds[4].second, table_wide_hash_schema },
+  };
+
+  ASSERT_OK(CreateTable(table_name, schema, {}, {},
+                        ranges_with_hash_schemas, table_wide_hash_schema));
   NO_FATALS(CheckMasterTableCreation(table_name, 20));
 
   // The default setting for GetTableLocationsRequestPB::max_returned_locations
@@ -573,17 +566,16 @@ TEST_F(TableLocationsTest, RangeSpecificHashingVaryingDimensions) {
   ASSERT_OK(bounds[1].first.SetStringNoCopy(0, "c"));
   ASSERT_OK(bounds[1].second.SetStringNoCopy(0, "d"));
 
-  vector<HashSchema> range_hash_schemas;
-  HashSchema hash_schema_3_by_2 = { { { "key" }, 3, 0 }, { { "val" }, 2, 1 } };
-  range_hash_schemas.emplace_back(hash_schema_3_by_2);
+  const HashSchema table_wide_hash_schema{ { { "val" }, 4, 2 } };
 
-  // Use 4 bucket hash schema as the table-wide one.
-  HashSchema table_hash_schema_4 = { { { "val" }, 4, 2 } };
-  // Apply table-wide hash schema applied to this range.
-  range_hash_schemas.emplace_back(table_hash_schema_4);
+  const vector<RangeWithHashSchema> ranges_with_hash_schemas{
+    { bounds[0].first, bounds[0].second,
+          { { { { "key" }, 3, 0 }, { { "val" }, 2, 1 } } } },
+    { bounds[1].first, bounds[1].second, table_wide_hash_schema },
+  };
 
-  const auto s = CreateTable(
-      table_name, schema, {}, bounds, range_hash_schemas, table_hash_schema_4);
+  const auto s = CreateTable(table_name, schema, {}, {},
+                             ranges_with_hash_schemas, table_wide_hash_schema);
   ASSERT_TRUE(s.IsNotSupported()) << s.ToString();
   ASSERT_STR_CONTAINS(s.ToString(),
       "varying number of hash dimensions per range is not yet supported");
diff --git a/src/kudu/master/catalog_manager.cc b/src/kudu/master/catalog_manager.cc
index 374587be6..726499d56 100644
--- a/src/kudu/master/catalog_manager.cc
+++ b/src/kudu/master/catalog_manager.cc
@@ -1874,7 +1874,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
       PartitionSchema::FromPB(req.partition_schema(), schema, &partition_schema),
       resp, MasterErrorPB::INVALID_SCHEMA));
 
-  // Decode split rows.
+  // Decode split rows and range bounds.
   vector<KuduPartialRow> split_rows;
   vector<pair<KuduPartialRow, KuduPartialRow>> range_bounds;
 
@@ -1883,7 +1883,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
   vector<DecodedRowOperation> ops;
   RETURN_NOT_OK(decoder.DecodeOperations<DecoderMode::SPLIT_ROWS>(&ops));
 
-  for (int i = 0; i < ops.size(); i++) {
+  for (size_t i = 0; i < ops.size(); ++i) {
     const DecodedRowOperation& op = ops[i];
     switch (op.type) {
       case RowOperationsPB::SPLIT_ROW: {
@@ -1920,21 +1920,15 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
   vector<Partition> partitions;
   if (const auto& ps = req.partition_schema();
       FLAGS_enable_per_range_hash_schemas && !ps.custom_hash_schema_ranges().empty()) {
-    // TODO(aserbin): in addition, should switch to specifying range information
-    //                only via 'PartitionSchemaPB::custom_hash_schema_ranges' or
-    //                'CreateTableRequestPB::split_rows_range_bounds', don't mix
     if (!split_rows.empty()) {
       return Status::InvalidArgument(
-          "both split rows and custom hash schema ranges cannot be "
+          "both split rows and custom hash schema ranges must not be "
           "populated at the same time");
     }
-    if (const auto ranges_with_hash_schemas_size =
-            partition_schema.ranges_with_hash_schemas().size();
-        range_bounds.size() != ranges_with_hash_schemas_size) {
+    if (!range_bounds.empty()) {
       return Status::InvalidArgument(
-          Substitute("$0 vs $1: per range hash schemas and range bounds "
-                     "must have the same size",
-                     ranges_with_hash_schemas_size, range_bounds.size()));
+          "both range bounds and custom hash schema ranges must not be "
+          "populated at the same time");
     }
     // Create partitions based on specified ranges with custom hash schemas.
     RETURN_NOT_OK(partition_schema.CreatePartitions(schema, &partitions));
@@ -1949,7 +1943,8 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
   // of hash dimensions as all the partitions with custom hash schemas.
   //
   // TODO(aserbin): remove the restriction once the rest of the code is ready
-  //                to handle range partitions with arbitrary hash schemas
+  //                to handle range partitions with arbitrary number of hash
+  //                dimensions in hash schemas
   CHECK(!partitions.empty());
   const auto hash_dimensions_num = partition_schema.hash_schema().size();
   for (const auto& p : partitions) {
diff --git a/src/kudu/master/master-test.cc b/src/kudu/master/master-test.cc
index d91dbb653..98ab68973 100644
--- a/src/kudu/master/master-test.cc
+++ b/src/kudu/master/master-test.cc
@@ -183,6 +183,12 @@ class MasterTest : public KuduTest {
   };
   typedef vector<HashDimension> HashSchema;
 
+  struct RangeWithHashSchema {
+    KuduPartialRow lower;
+    KuduPartialRow upper;
+    HashSchema hash_schema;
+  };
+
   void DoListTables(const ListTablesRequestPB& req, ListTablesResponsePB* resp);
   void DoListAllTables(ListTablesResponsePB* resp);
 
@@ -196,7 +202,7 @@ class MasterTest : public KuduTest {
                      const Schema& schema,
                      const vector<KuduPartialRow>& split_rows,
                      const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds = {},
-                     const vector<HashSchema>& range_hash_schemas = {});
+                     const vector<RangeWithHashSchema>& ranges_with_hash_schemas = {});
 
 
   Status CreateTable(const string& name,
@@ -206,7 +212,7 @@ class MasterTest : public KuduTest {
                      const optional<string>& comment,
                      const vector<KuduPartialRow>& split_rows,
                      const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds,
-                     const vector<HashSchema>& range_hash_schemas,
+                     const vector<RangeWithHashSchema>& ranges_with_hash_schemas,
                      const HashSchema& table_wide_hash_schema,
                      CreateTableResponsePB* resp);
 
@@ -237,10 +243,10 @@ Status MasterTest::CreateTable(
     const Schema& schema,
     const vector<KuduPartialRow>& split_rows,
     const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds,
-    const vector<HashSchema>& range_hash_schemas) {
+    const vector<RangeWithHashSchema>& ranges_with_hash_schemas) {
   CreateTableResponsePB resp;
-  return CreateTable(
-        name, schema, none, none, none, split_rows, bounds, range_hash_schemas, {}, &resp);
+  return CreateTable(name, schema, none, none, none, split_rows, bounds,
+                     ranges_with_hash_schemas, {}, &resp);
 }
 
 Status MasterTest::CreateTable(
@@ -251,15 +257,9 @@ Status MasterTest::CreateTable(
     const optional<string>& comment,
     const vector<KuduPartialRow>& split_rows,
     const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds,
-    const vector<HashSchema>& range_hash_schemas,
+    const vector<RangeWithHashSchema>& ranges_with_hash_schemas,
     const HashSchema& table_wide_hash_schema,
     CreateTableResponsePB* resp) {
-
-  if (!range_hash_schemas.empty() && range_hash_schemas.size() != bounds.size()) {
-    return Status::InvalidArgument(
-        "'bounds' and 'range_hash_schemas' must be of the same size");
-  }
-
   CreateTableRequestPB req;
   req.set_name(name);
   if (type) {
@@ -276,7 +276,6 @@ Status MasterTest::CreateTable(
   }
 
   auto* ps_pb = req.mutable_partition_schema();
-
   for (const auto& hash_dimension : table_wide_hash_schema) {
     auto* hash_schema = ps_pb->add_hash_schema();
     for (const string& col_name : hash_dimension.columns) {
@@ -286,14 +285,12 @@ Status MasterTest::CreateTable(
     hash_schema->set_seed(hash_dimension.num_buckets);
   }
 
-  for (size_t i = 0; i < range_hash_schemas.size(); ++i) {
+  for (const auto& range_and_hs : ranges_with_hash_schemas) {
     auto* range = ps_pb->add_custom_hash_schema_ranges();
     RowOperationsPBEncoder encoder(range->mutable_range_bounds());
-    const auto& bound = bounds[i];
-    encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, bound.first);
-    encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, bound.second);
-    const auto& range_hash_schema = range_hash_schemas[i];
-    for (const auto& hash_dimension : range_hash_schema) {
+    encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, range_and_hs.lower);
+    encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, range_and_hs.upper);
+    for (const auto& hash_dimension : range_and_hs.hash_schema) {
       auto* hash_dimension_pb = range->add_hash_schema();
       for (const string& col_name : hash_dimension.columns) {
         hash_dimension_pb->add_columns()->set_name(col_name);
@@ -1031,26 +1028,49 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
                         "least one range partition column");
   }
 
-  // No split rows and range specific hashing concurrently.
+  // In case of custom hash schemas per range, don't supply the information
+  // on split rows.
   {
     google::FlagSaver flag_saver;
-    FLAGS_enable_per_range_hash_schemas = true; // enable for testing.
+    FLAGS_enable_per_range_hash_schemas = true;
     KuduPartialRow split1(&kTableSchema);
     ASSERT_OK(split1.SetInt32("key", 1));
     KuduPartialRow a_lower(&kTableSchema);
     KuduPartialRow a_upper(&kTableSchema);
     ASSERT_OK(a_lower.SetInt32("key", 0));
     ASSERT_OK(a_upper.SetInt32("key", 100));
-    vector<HashSchema> range_hash_schemas = {{}};
-    Status s = CreateTable(kTableName,
-                           kTableSchema,
-                           { split1 },
-                           { { a_lower, a_upper } },
-                           range_hash_schemas);
+    const auto s = CreateTable(kTableName,
+                               kTableSchema,
+                               {split1},
+                               {},
+                               {{ a_lower, a_upper, {}}});
     ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
     ASSERT_STR_CONTAINS(s.ToString(),
                         "both split rows and custom hash schema ranges "
-                        "cannot be populated at the same time");
+                        "must not be populated at the same time");
+  }
+
+  // In case of custom hash schemas per range, don't supply the information
+  // on the range partition boundaries via both the
+  // CreateTableRequestPB::split_rows_range_bounds and the
+  // CreateTableRequestPB::partition_schema::custom_hash_schema_ranges fields.
+  {
+    google::FlagSaver flag_saver;
+    FLAGS_enable_per_range_hash_schemas = true;
+    KuduPartialRow a_lower(&kTableSchema);
+    KuduPartialRow a_upper(&kTableSchema);
+    ASSERT_OK(a_lower.SetInt32("key", 0));
+    ASSERT_OK(a_upper.SetInt32("key", 100));
+    vector<HashSchema> range_hash_schemas = {{}};
+    const auto s = CreateTable(kTableName,
+                               kTableSchema,
+                               vector<KuduPartialRow>(),
+                               {{ a_lower, a_upper }},
+                               {{ a_lower, a_upper, {}}});
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(s.ToString(),
+                        "both range bounds and custom hash schema ranges "
+                        "must not be populated at the same time");
   }
 
   // No non-range columns.
@@ -1295,9 +1315,11 @@ TEST_F(MasterTest, NonPrimaryKeyColumnsForPerRangeCustomHashSchema) {
   KuduPartialRow upper(&kTableSchema);
   ASSERT_OK(lower.SetInt32("key", 0));
   ASSERT_OK(upper.SetInt32("key", 100));
-  vector<HashSchema> range_hash_schemas{{{{"int32_val"}, 2, 0}}};
-  const auto s = CreateTable(
-      kTableName, kTableSchema, {}, { { lower, upper } }, range_hash_schemas);
+  const auto s = CreateTable(kTableName,
+                             kTableSchema,
+                             {},
+                             {},
+                             {{ lower, upper, {{{"int32_val"}, 2, 0}}}});
   ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
   ASSERT_STR_CONTAINS(s.ToString(),
                       "must specify only primary key columns for "