You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kudu.apache.org by al...@apache.org on 2021/08/31 01:24:08 UTC

[kudu] branch master updated: KUDU-2671 update range partitioning with custom hash schema

This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git


The following commit(s) were added to refs/heads/master by this push:
     new 1e8376f  KUDU-2671 update range partitioning with custom hash schema
1e8376f is described below

commit 1e8376f4397a24b981216e88d8d4deb8ab154a1d
Author: Alexey Serbin <al...@apache.org>
AuthorDate: Thu Aug 26 17:50:39 2021 -0700

    KUDU-2671 update range partitioning with custom hash schema
    
    This patch updates already existing, but not yet released (so we should
    not be concerned about the backward compatibility) protobuf data
    structures used to create Kudu tables with custom hash partitioning per
    range.  With this patch, there is no need to have two separate arrays
    of ranges and their hash schemas, requiring them to be of the same size.
    
    I also renamed the 'hash_bucket_schemas' field into 'hash_schema' in
    the PartitionSchemaPB data structure.
    
    Change-Id: I37aae56a33170894f30d6cd73a5698d6cbb7a697
    Reviewed-on: http://gerrit.cloudera.org:8080/17779
    Reviewed-by: Andrew Wong <aw...@cloudera.com>
    Tested-by: Kudu Jenkins
    Reviewed-by: Mahesh Reddy <mr...@cloudera.com>
---
 .../org/apache/kudu/client/AsyncKuduClient.java    |   2 +-
 .../org/apache/kudu/client/CreateTableOptions.java |   2 +-
 .../org/apache/kudu/client/ProtobufHelper.java     |   4 +-
 src/kudu/client/client.cc                          |  36 +++---
 src/kudu/client/flex_partitioning_client-test.cc   |  23 +++-
 src/kudu/client/table_creator-internal.h           |   4 +-
 src/kudu/common/common.proto                       |  32 +++--
 src/kudu/common/partition-test.cc                  | 130 +++++++++++----------
 src/kudu/common/partition.cc                       |  75 ++++++------
 src/kudu/common/partition.h                        |   4 +-
 src/kudu/common/partition_pruner-test.cc           |  24 ++--
 src/kudu/common/scan_spec-test.cc                  |   8 +-
 .../integration-tests/table_locations-itest.cc     |  77 ++++++------
 src/kudu/master/catalog_manager.cc                 |   7 +-
 src/kudu/master/master-test.cc                     |  54 +++------
 15 files changed, 254 insertions(+), 228 deletions(-)

diff --git a/java/kudu-client/src/main/java/org/apache/kudu/client/AsyncKuduClient.java b/java/kudu-client/src/main/java/org/apache/kudu/client/AsyncKuduClient.java
index bb71e86..c0b2870 100644
--- a/java/kudu-client/src/main/java/org/apache/kudu/client/AsyncKuduClient.java
+++ b/java/kudu-client/src/main/java/org/apache/kudu/client/AsyncKuduClient.java
@@ -633,7 +633,7 @@ public class AsyncKuduClient implements AutoCloseable {
       throw new IllegalArgumentException("CreateTableOptions may not be null");
     }
     if (!builder.getBuilder().getPartitionSchema().hasRangeSchema() &&
-        builder.getBuilder().getPartitionSchema().getHashBucketSchemasCount() == 0) {
+        builder.getBuilder().getPartitionSchema().getHashSchemaCount() == 0) {
       throw new IllegalArgumentException("Table partitioning must be specified using " +
                                          "setRangePartitionColumns or addHashPartitions");
 
diff --git a/java/kudu-client/src/main/java/org/apache/kudu/client/CreateTableOptions.java b/java/kudu-client/src/main/java/org/apache/kudu/client/CreateTableOptions.java
index 549d10b..6ac83dd 100644
--- a/java/kudu-client/src/main/java/org/apache/kudu/client/CreateTableOptions.java
+++ b/java/kudu-client/src/main/java/org/apache/kudu/client/CreateTableOptions.java
@@ -77,7 +77,7 @@ public class CreateTableOptions {
    */
   public CreateTableOptions addHashPartitions(List<String> columns, int buckets, int seed) {
     Common.PartitionSchemaPB.HashBucketSchemaPB.Builder hashBucket =
-        pb.getPartitionSchemaBuilder().addHashBucketSchemasBuilder();
+        pb.getPartitionSchemaBuilder().addHashSchemaBuilder();
     for (String column : columns) {
       hashBucket.addColumnsBuilder().setName(column);
     }
diff --git a/java/kudu-client/src/main/java/org/apache/kudu/client/ProtobufHelper.java b/java/kudu-client/src/main/java/org/apache/kudu/client/ProtobufHelper.java
index ae5ec8f..4381fd8 100644
--- a/java/kudu-client/src/main/java/org/apache/kudu/client/ProtobufHelper.java
+++ b/java/kudu-client/src/main/java/org/apache/kudu/client/ProtobufHelper.java
@@ -211,7 +211,7 @@ public class ProtobufHelper {
     ImmutableList.Builder<PartitionSchema.HashBucketSchema> hashSchemas = ImmutableList.builder();
 
     for (Common.PartitionSchemaPB.HashBucketSchemaPB hashBucketSchemaPB
-        : pb.getHashBucketSchemasList()) {
+        : pb.getHashSchemaList()) {
       List<Integer> hashColumnIds = pbToIds(hashBucketSchemaPB.getColumnsList());
 
       PartitionSchema.HashBucketSchema hashSchema =
@@ -235,7 +235,7 @@ public class ProtobufHelper {
                 .addAllColumns(idsToPb(hashBucketSchema.getColumnIds()))
                 .setNumBuckets(hashBucketSchema.getNumBuckets())
                 .setSeed(hashBucketSchema.getSeed());
-      builder.addHashBucketSchemas(hbsBuilder.build());
+      builder.addHashSchema(hbsBuilder.build());
     }
 
     Common.PartitionSchemaPB.RangeSchemaPB rangeSchemaPB =
diff --git a/src/kudu/client/client.cc b/src/kudu/client/client.cc
index 897cc6e..a6c12d0 100644
--- a/src/kudu/client/client.cc
+++ b/src/kudu/client/client.cc
@@ -832,12 +832,12 @@ KuduTableCreator& KuduTableCreator::add_hash_partitions(const vector<string>& co
 KuduTableCreator& KuduTableCreator::add_hash_partitions(const vector<string>& columns,
                                                         int32_t num_buckets,
                                                         int32_t seed) {
-  auto* bucket_schema = data_->partition_schema_.add_hash_bucket_schemas();
+  auto* hash_dimension = data_->partition_schema_.add_hash_schema();
   for (const string& col_name : columns) {
-    bucket_schema->add_columns()->set_name(col_name);
+    hash_dimension->add_columns()->set_name(col_name);
   }
-  bucket_schema->set_num_buckets(num_buckets);
-  bucket_schema->set_seed(seed);
+  hash_dimension->set_num_buckets(num_buckets);
+  hash_dimension->set_seed(seed);
   return *this;
 }
 
@@ -924,7 +924,7 @@ Status KuduTableCreator::Create() {
     return Status::InvalidArgument("Missing schema");
   }
   if (!data_->partition_schema_.has_range_schema() &&
-      data_->partition_schema_.hash_bucket_schemas().empty()) {
+      data_->partition_schema_.hash_schema().empty()) {
     return Status::InvalidArgument(
         "Table partitioning must be specified using "
         "add_hash_partitions or set_range_partition_columns");
@@ -1002,24 +1002,26 @@ Status KuduTableCreator::Create() {
     splits_encoder.Add(upper_bound_type, *range->upper_bound_);
 
     if (has_range_with_custom_hash_schema) {
-      // In case of per-range custom hash bucket schemas, add range bounds
-      // into PartitionSchemaPB::range_bounds as well.
-      RowOperationsPBEncoder encoder(partition_schema->add_range_bounds());
+      auto* range_pb = partition_schema->add_custom_hash_schema_ranges();
+      RowOperationsPBEncoder encoder(range_pb->mutable_range_bounds());
       encoder.Add(lower_bound_type, *range->lower_bound_);
       encoder.Add(upper_bound_type, *range->upper_bound_);
-      // Populate corresponding element in 'range_hash_schemas' if there is at
-      // least one range with custom hash schema.
-      auto* schemas_pb = partition_schema->add_range_hash_schemas();
       if (range->hash_schema_.empty()) {
-        schemas_pb->mutable_hash_schemas()->CopyFrom(
-            data_->partition_schema_.hash_bucket_schemas());
+        // With the presence of a range with custom hash schema when the
+        // table-wide hash schema is used for this particular range, also add an
+        // element into PartitionSchemaPB::custom_hash_schema_ranges to satisfy
+        // the convention used by the backend.
+        range_pb->mutable_hash_schema()->CopyFrom(
+            data_->partition_schema_.hash_schema());
       } else {
+        // In case of per-range custom hash bucket schema, add corresponding
+        // element into PartitionSchemaPB::custom_hash_schema_ranges.
         for (const auto& hash_dimension : range->hash_schema_) {
-          auto* pb = schemas_pb->add_hash_schemas();
-          pb->set_seed(hash_dimension.seed);
-          pb->set_num_buckets(hash_dimension.num_buckets);
+          auto* hash_dimension_pb = range_pb->add_hash_schema();
+          hash_dimension_pb->set_seed(hash_dimension.seed);
+          hash_dimension_pb->set_num_buckets(hash_dimension.num_buckets);
           for (const auto& column_name : hash_dimension.column_names) {
-            pb->add_columns()->set_name(column_name);
+            hash_dimension_pb->add_columns()->set_name(column_name);
           }
         }
       }
diff --git a/src/kudu/client/flex_partitioning_client-test.cc b/src/kudu/client/flex_partitioning_client-test.cc
index f7725d0..a2ab310 100644
--- a/src/kudu/client/flex_partitioning_client-test.cc
+++ b/src/kudu/client/flex_partitioning_client-test.cc
@@ -333,6 +333,7 @@ TEST_F(FlexPartitioningCreateTableTest, DefaultAndCustomHashBuckets) {
   // 111-222  x:{key}     x:{key}     x:{key}         -
   // 222-333  x:{key}     x:{key}     x:{key}     x:{key}
   // 333-444  x:{key}     x:{key}     -               -
+  // 444-555  x:{key}     x:{key}     -               -
   constexpr const char* const kTableName = "DefaultAndCustomHashBuckets";
 
   unique_ptr<KuduTableCreator> table_creator(client_->NewTableCreator());
@@ -375,17 +376,27 @@ TEST_F(FlexPartitioningCreateTableTest, DefaultAndCustomHashBuckets) {
     table_creator->add_custom_range_partition(p.release());
   }
 
+  // Add a range partition with table-wide hash schema: not calling
+  // KuduRangePartition::add_hash_partition() means the range is using the
+  // table-wide schema.
+  //
+  // TODO(aserbin): update this once empty range schema means no hash bucketing
+  {
+    auto p = CreateRangePartition(444, 555);
+    table_creator->add_custom_range_partition(p.release());
+  }
+
   ASSERT_OK(table_creator->Create());
-  NO_FATALS(CheckTabletCount(kTableName, 11));
+  NO_FATALS(CheckTabletCount(kTableName, 13));
 
   // Make sure it's possible to insert rows into the table for all the existing
   // the paritions: first check the range of table-wide schema, then check
   // the ranges with custom hash schemas.
   // TODO(aserbin): uncomment CheckTableRowsNum() once partition pruning works
-  ASSERT_OK(InsertTestRows(kTableName, -111, 111));
-  NO_FATALS(CheckLiveRowCount(kTableName, 222));
+  ASSERT_OK(InsertTestRows(kTableName, -111, 0));
+  NO_FATALS(CheckLiveRowCount(kTableName, 111));
   //NO_FATALS(CheckTableRowsNum(kTableName, 222));
-  ASSERT_OK(InsertTestRows(kTableName, 111, 444));
+  ASSERT_OK(InsertTestRows(kTableName, 111, 555));
   NO_FATALS(CheckLiveRowCount(kTableName, 555));
   //NO_FATALS(CheckTableRowsNum(kTableName, 555));
 
@@ -395,7 +406,7 @@ TEST_F(FlexPartitioningCreateTableTest, DefaultAndCustomHashBuckets) {
     vector<KuduError*> errors;
     ElementDeleter drop(&errors);
     auto s = InsertTestRows(
-        kTableName, 444, 445, KuduSession::AUTO_FLUSH_SYNC, &errors);
+        kTableName, 555, 556, KuduSession::AUTO_FLUSH_SYNC, &errors);
     ASSERT_TRUE(s.IsIOError()) << s.ToString();
     ASSERT_STR_CONTAINS(s.ToString(), "failed to flush data");
     ASSERT_EQ(1, errors.size());
@@ -411,7 +422,7 @@ TEST_F(FlexPartitioningCreateTableTest, DefaultAndCustomHashBuckets) {
     vector<KuduError*> errors;
     ElementDeleter drop(&errors);
     auto s = InsertTestRows(
-        kTableName, 445, 445 + kNumRows, KuduSession::MANUAL_FLUSH, &errors);
+        kTableName, 556, 556 + kNumRows, KuduSession::MANUAL_FLUSH, &errors);
     ASSERT_TRUE(s.IsIOError()) << s.ToString();
     ASSERT_STR_CONTAINS(s.ToString(), "failed to flush data");
     ASSERT_EQ(kNumRows, errors.size());
diff --git a/src/kudu/client/table_creator-internal.h b/src/kudu/client/table_creator-internal.h
index 4cfe477..23a5c25 100644
--- a/src/kudu/client/table_creator-internal.h
+++ b/src/kudu/client/table_creator-internal.h
@@ -24,6 +24,7 @@
 #include <vector>
 
 #include <boost/optional/optional.hpp>
+#include <glog/logging.h>
 
 #include "kudu/client/client.h"
 #include "kudu/common/common.pb.h"
@@ -39,11 +40,12 @@ class KuduSchema;
 
 struct HashDimension {
   HashDimension(std::vector<std::string> column_names,
-                uint32_t num_buckets,
+                int32_t num_buckets,
                 uint32_t seed)
       : column_names(std::move(column_names)),
         num_buckets(num_buckets),
         seed(seed) {
+    DCHECK_GE(num_buckets, 2);
   }
 
   const std::vector<std::string> column_names;
diff --git a/src/kudu/common/common.proto b/src/kudu/common/common.proto
index 2b51a7a..0c08353 100644
--- a/src/kudu/common/common.proto
+++ b/src/kudu/common/common.proto
@@ -346,24 +346,34 @@ message PartitionSchemaPB {
     optional uint32 seed = 3;
 
     // The hash algorithm to use for calculating the hash bucket.
+    // NOTE: this is not used yet -- don't expect setting it to have any effect
     optional HashAlgorithm hash_algorithm = 4;
   }
 
-  message PerRangeHashBucketSchemasPB {
-    repeated HashBucketSchemaPB hash_schemas = 1;
+  // This data structure represents a range partition with a custom hash schema.
+  message RangeWithHashSchemaPB {
+    // Row operations containing the lower and upper range bound for the range.
+    optional RowOperationsPB range_bounds = 1;
+    // Hash schema for the range.
+    repeated HashBucketSchemaPB hash_schema = 2;
   }
 
-  repeated HashBucketSchemaPB hash_bucket_schemas = 1;
+  // Table-wide hash schema. Hash schema for a particular range may be
+  // overriden by corresponding element in 'custom_hash_schema_ranges'.
+  repeated HashBucketSchemaPB hash_schema = 1;
+
+  // Range schema to partition the key space into ranges.
   optional RangeSchemaPB range_schema = 2;
 
-  // Each index of 'range_bounds' represents the upper and lower bounds of
-  // ranges whose hash bucket schemas were specified. Its corresponding index
-  // of 'range_hash_schemas' represents that range's hash schema. An empty
-  // field of 'range_hash_schemas' indicates that the table wide hash schema
-  // specified in 'hash_bucket_schemas' is used. Both of these fields must have
-  // the same size.
-  repeated PerRangeHashBucketSchemasPB range_hash_schemas = 3;
-  repeated RowOperationsPB range_bounds = 4;
+  // Two fields were deprecated in favor of using 'custom_hash_schema_ranges'.
+  reserved 3;
+  reserved 4;
+
+  // If the 'custom_hash_schema_ranges' field is empty, the table-wide hash
+  // schema specified by the 'hash_schema' field is used for all the ranges
+  // of the table. Otherwise, particular ranges have their hash schema
+  // as specified by corresponding elements in 'custom_hash_schema_ranges'.
+  repeated RangeWithHashSchemaPB custom_hash_schema_ranges = 5;
 }
 
 // The serialized format of a Kudu table partition.
diff --git a/src/kudu/common/partition-test.cc b/src/kudu/common/partition-test.cc
index a9719de..abe779e 100644
--- a/src/kudu/common/partition-test.cc
+++ b/src/kudu/common/partition-test.cc
@@ -56,12 +56,12 @@ void AddHashDimension(PartitionSchemaPB* partition_schema_pb,
                       const vector<string>& columns,
                       int32_t num_buckets,
                       uint32_t seed) {
-  auto* hash_bucket_schema = partition_schema_pb->add_hash_bucket_schemas();
+  auto* hash_dimension = partition_schema_pb->add_hash_schema();
   for (const string& column : columns) {
-    hash_bucket_schema->add_columns()->set_name(column);
+    hash_dimension->add_columns()->set_name(column);
   }
-  hash_bucket_schema->set_num_buckets(num_buckets);
-  hash_bucket_schema->set_seed(seed);
+  hash_dimension->set_num_buckets(num_buckets);
+  hash_dimension->set_seed(seed);
 }
 
 void SetRangePartitionComponent(PartitionSchemaPB* partition_schema_pb,
@@ -1392,7 +1392,8 @@ TEST_F(PartitionTest, TestPartitionSchemaPB) {
 
   // [(a0, _, c0), (a0, _, c1))
   {
-    RowOperationsPBEncoder encoder(pb.add_range_bounds());
+    auto* range = pb.add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
     KuduPartialRow lower(&schema);
     KuduPartialRow upper(&schema);
     ASSERT_OK(lower.SetStringCopy("a", "a0"));
@@ -1402,15 +1403,15 @@ TEST_F(PartitionTest, TestPartitionSchemaPB) {
     encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
     encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
 
-    auto* range_hash_component = pb.add_range_hash_schemas();
-    auto* hash_component = range_hash_component->add_hash_schemas();
-    hash_component->add_columns()->set_name("a");
-    hash_component->set_num_buckets(4);
+    auto* hash_dimension = range->add_hash_schema();
+    hash_dimension->add_columns()->set_name("a");
+    hash_dimension->set_num_buckets(4);
   }
 
   // [(a1, _, c2), (a1, _, c3))
   {
-    RowOperationsPBEncoder encoder(pb.add_range_bounds());
+    auto* range = pb.add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
     KuduPartialRow lower(&schema);
     KuduPartialRow upper(&schema);
     ASSERT_OK(lower.SetStringCopy("a", "a1"));
@@ -1420,18 +1421,22 @@ TEST_F(PartitionTest, TestPartitionSchemaPB) {
     encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
     encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
 
-    auto* range_hash_component = pb.add_range_hash_schemas();
-    auto* hash_component_1 = range_hash_component->add_hash_schemas();
-    hash_component_1->add_columns()->set_name("a");
-    hash_component_1->set_num_buckets(2);
-    auto* hash_component_2 = range_hash_component->add_hash_schemas();
-    hash_component_2->add_columns()->set_name("b");
-    hash_component_2->set_num_buckets(3);
+    {
+      auto* hash_dimension = range->add_hash_schema();
+      hash_dimension->add_columns()->set_name("a");
+      hash_dimension->set_num_buckets(2);
+    }
+    {
+      auto* hash_dimension = range->add_hash_schema();
+      hash_dimension->add_columns()->set_name("b");
+      hash_dimension->set_num_buckets(3);
+    }
   }
 
   // [(a2, _, c4), (a2, _, c5))
   {
-    RowOperationsPBEncoder encoder(pb.add_range_bounds());
+    auto* range = pb.add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
     KuduPartialRow lower(&schema);
     KuduPartialRow upper(&schema);
     ASSERT_OK(lower.SetStringCopy("a", "a2"));
@@ -1441,8 +1446,8 @@ TEST_F(PartitionTest, TestPartitionSchemaPB) {
     encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
     encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
 
-    // empty field implies use of table wide hash schema
-    pb.add_range_hash_schemas();
+    // Empty 'hash_schema' field overrides the table-wide hash schema,
+    // meaning 'no hash bucketing for the range'.
   }
 
   PartitionSchema partition_schema;
@@ -1491,11 +1496,11 @@ TEST_F(PartitionTest, TestMalformedPartitionSchemaPB) {
                   ColumnSchema("c", STRING) },
                 { ColumnId(0), ColumnId(1), ColumnId(2) }, 3);
 
-  PartitionSchemaPB pb;
-
   // Testing that only a pair of range bounds is allowed.
   {
-    RowOperationsPBEncoder encoder(pb.add_range_bounds());
+    PartitionSchemaPB pb;
+    auto* range = pb.add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
     KuduPartialRow lower(&schema);
     KuduPartialRow upper(&schema);
     KuduPartialRow extra(&schema);
@@ -1505,45 +1510,50 @@ TEST_F(PartitionTest, TestMalformedPartitionSchemaPB) {
     encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
     encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
     encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, extra);
-  }
 
-  PartitionSchema partition_schema;
-  Status s = PartitionSchema::FromPB(pb, schema, &partition_schema);
-  ASSERT_EQ("Invalid argument: 3 ops were provided; "
-            "only two ops are expected for this pair of range bounds",
-            s.ToString());
+    PartitionSchema partition_schema;
+    auto s = PartitionSchema::FromPB(pb, schema, &partition_schema);
+    ASSERT_EQ("Invalid argument: 3 ops were provided; "
+              "only two ops are expected for this pair of range bounds",
+              s.ToString());
+  }
 
-  pb.Clear();
-  // Testing that no split rows are allowed.
+  // Testing that no split rows are allowed along with ranges with custom
+  // hash schema.
   {
-    RowOperationsPBEncoder encoder(pb.add_range_bounds());
+    PartitionSchemaPB pb;
+    auto* range = pb.add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
     KuduPartialRow split(&schema);
     KuduPartialRow upper(&schema);
     ASSERT_OK(split.SetStringCopy("a", "a0"));
     ASSERT_OK(upper.SetStringCopy("a", "a1"));
     encoder.Add(RowOperationsPB::SPLIT_ROW, split);
     encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
-  }
 
-  Status s1 = PartitionSchema::FromPB(pb, schema, &partition_schema);
-  ASSERT_EQ("Invalid argument: Illegal row operation type in request: 4",
-            s1.ToString());
+    PartitionSchema partition_schema;
+    auto s = PartitionSchema::FromPB(pb, schema, &partition_schema);
+    ASSERT_EQ("Invalid argument: Illegal row operation type in request: 4",
+              s.ToString());
+  }
 
-  pb.Clear();
   // Testing that 2nd bound is either RANGE_UPPER_BOUND or INCLUSIVE_RANGE_UPPER_BOUND.
   {
-    RowOperationsPBEncoder encoder(pb.add_range_bounds());
+    PartitionSchemaPB pb;
+    auto* range = pb.add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
     KuduPartialRow lower(&schema);
     KuduPartialRow upper(&schema);
     ASSERT_OK(lower.SetStringCopy("a", "a0"));
     ASSERT_OK(upper.SetStringCopy("a", "a1"));
     encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
     encoder.Add(RowOperationsPB::SPLIT_ROW, upper);
-  }
 
-  Status s2 = PartitionSchema::FromPB(pb, schema, &partition_schema);
-  ASSERT_EQ("Invalid argument: missing upper range bound in request",
-            s2.ToString());
+    PartitionSchema partition_schema;
+    auto s = PartitionSchema::FromPB(pb, schema, &partition_schema);
+    ASSERT_EQ("Invalid argument: missing upper range bound in request",
+              s.ToString());
+  }
 }
 
 TEST_F(PartitionTest, TestOverloadedEqualsOperator) {
@@ -1585,13 +1595,14 @@ TEST_F(PartitionTest, TestOverloadedEqualsOperator) {
   ASSERT_NE(partition_schema, partition_schema_1);
 
   // Resets table wide hash schemas so both will be equal again.
-  schema_builder_1.clear_hash_bucket_schemas();
+  schema_builder_1.clear_hash_schema();
   AddHashDimension(&schema_builder_1, { "a" }, 2, 0);
 
   // Different sizes of field 'ranges_with_hash_schemas_'
   // [(a, _, _), (b, _, _))
   {
-    RowOperationsPBEncoder encoder(schema_builder_1.add_range_bounds());
+    auto* range = schema_builder_1.add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
     KuduPartialRow lower(&schema);
     KuduPartialRow upper(&schema);
     ASSERT_OK(lower.SetStringCopy("a", "a"));
@@ -1599,10 +1610,9 @@ TEST_F(PartitionTest, TestOverloadedEqualsOperator) {
     encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
     encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
 
-    auto* range_hash_component = schema_builder_1.add_range_hash_schemas();
-    auto* hash_component = range_hash_component->add_hash_schemas();
-    hash_component->add_columns()->set_name("a");
-    hash_component->set_num_buckets(4);
+    auto* hash_dimension = range->add_hash_schema();
+    hash_dimension->add_columns()->set_name("a");
+    hash_dimension->set_num_buckets(4);
   }
 
   ASSERT_OK(PartitionSchema::FromPB(schema_builder_1, schema, &partition_schema_1));
@@ -1611,7 +1621,8 @@ TEST_F(PartitionTest, TestOverloadedEqualsOperator) {
   // Different custom hash bucket schema but same range bounds.
   // [(a, _, _), (b, _, _))
   {
-    RowOperationsPBEncoder encoder(schema_builder.add_range_bounds());
+    auto* range = schema_builder.add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
     KuduPartialRow lower(&schema);
     KuduPartialRow upper(&schema);
     ASSERT_OK(lower.SetStringCopy("a", "a"));
@@ -1619,23 +1630,21 @@ TEST_F(PartitionTest, TestOverloadedEqualsOperator) {
     encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
     encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
 
-    auto* range_hash_component = schema_builder.add_range_hash_schemas();
-    auto* hash_component = range_hash_component->add_hash_schemas();
-    hash_component->add_columns()->set_name("a");
-    hash_component->set_num_buckets(2);
+    auto* hash_dimension = range->add_hash_schema();
+    hash_dimension->add_columns()->set_name("a");
+    hash_dimension->set_num_buckets(2);
   }
 
   ASSERT_OK(PartitionSchema::FromPB(schema_builder, schema, &partition_schema));
   ASSERT_NE(partition_schema, partition_schema_1);
 
-  // Clears custom hash schemas and range bounds field.
-  schema_builder.clear_range_hash_schemas();
-  schema_builder.clear_range_bounds();
+  schema_builder.clear_custom_hash_schema_ranges();
 
   // Different range bounds but same custom hash bucket schema.
   // [(a, _, _), (c, _, _))
   {
-    RowOperationsPBEncoder encoder(schema_builder.add_range_bounds());
+    auto* range = schema_builder.add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
     KuduPartialRow lower(&schema);
     KuduPartialRow upper(&schema);
     ASSERT_OK(lower.SetStringCopy("a", "a"));
@@ -1643,10 +1652,9 @@ TEST_F(PartitionTest, TestOverloadedEqualsOperator) {
     encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
     encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
 
-    auto* range_hash_component = schema_builder.add_range_hash_schemas();
-    auto* hash_component = range_hash_component->add_hash_schemas();
-    hash_component->add_columns()->set_name("a");
-    hash_component->set_num_buckets(4);
+    auto* hash_dimension = range->add_hash_schema();
+    hash_dimension->add_columns()->set_name("a");
+    hash_dimension->set_num_buckets(4);
   }
 
   ASSERT_OK(PartitionSchema::FromPB(schema_builder, schema, &partition_schema));
diff --git a/src/kudu/common/partition.cc b/src/kudu/common/partition.cc
index 6b949e6..c01fbcf 100644
--- a/src/kudu/common/partition.cc
+++ b/src/kudu/common/partition.cc
@@ -165,12 +165,12 @@ void SetColumnIdentifiers(const vector<ColumnId>& column_ids,
 
 Status PartitionSchema::ExtractHashSchemaFromPB(
     const Schema& schema,
-    const RepeatedPtrField<PartitionSchemaPB_HashBucketSchemaPB>& hash_buckets_pb,
+    const RepeatedPtrField<PartitionSchemaPB_HashBucketSchemaPB>& hash_schema_pb,
     HashSchema* hash_schema) {
-  for (const auto& hash_bucket_pb : hash_buckets_pb) {
+  for (const auto& hash_dimension_pb : hash_schema_pb) {
     HashDimension hash_dimension;
     RETURN_NOT_OK(ExtractColumnIds(
-        hash_bucket_pb.columns(), schema, &hash_dimension.column_ids));
+        hash_dimension_pb.columns(), schema, &hash_dimension.column_ids));
 
     // Hashing is column-order dependent, so sort the column_ids to ensure that
     // hash components with the same columns hash consistently. This is
@@ -178,8 +178,8 @@ Status PartitionSchema::ExtractHashSchemaFromPB(
     // table creation; after that the columns should remain in sorted order.
     std::sort(hash_dimension.column_ids.begin(), hash_dimension.column_ids.end());
 
-    hash_dimension.seed = hash_bucket_pb.seed();
-    hash_dimension.num_buckets = hash_bucket_pb.num_buckets();
+    hash_dimension.seed = hash_dimension_pb.seed();
+    hash_dimension.num_buckets = hash_dimension_pb.num_buckets();
     hash_schema->push_back(std::move(hash_dimension));
   }
   return Status::OK();
@@ -190,18 +190,18 @@ Status PartitionSchema::FromPB(const PartitionSchemaPB& pb,
                                PartitionSchema* partition_schema) {
   partition_schema->Clear();
   RETURN_NOT_OK(ExtractHashSchemaFromPB(
-      schema, pb.hash_bucket_schemas(), &partition_schema->hash_schema_));
-  vector<HashSchema> range_hash_schemas;
-  range_hash_schemas.resize(pb.range_hash_schemas_size());
-  for (int i = 0; i < pb.range_hash_schemas_size(); i++) {
-    RETURN_NOT_OK(ExtractHashSchemaFromPB(schema,
-                                          pb.range_hash_schemas(i).hash_schemas(),
-                                          &range_hash_schemas[i]));
-  }
+      schema, pb.hash_schema(), &partition_schema->hash_schema_));
 
+  const auto custom_ranges_num = pb.custom_hash_schema_ranges_size();
+  vector<HashSchema> range_hash_schemas;
+  range_hash_schemas.resize(custom_ranges_num);
   vector<pair<KuduPartialRow, KuduPartialRow>> range_bounds;
-  for (int i = 0; i < pb.range_bounds_size(); i++) {
-    RowOperationsPBDecoder decoder(&pb.range_bounds(i), &schema, &schema, nullptr);
+  for (int i = 0; i < custom_ranges_num; i++) {
+    const auto& range = pb.custom_hash_schema_ranges(i);
+    RETURN_NOT_OK(ExtractHashSchemaFromPB(
+        schema, range.hash_schema(), &range_hash_schemas[i]));
+
+    RowOperationsPBDecoder decoder(&range.range_bounds(), &schema, &schema, nullptr);
     vector<DecodedRowOperation> ops;
     RETURN_NOT_OK(decoder.DecodeOperations<DecoderMode::SPLIT_ROWS>(&ops));
     if (ops.size() != 2) {
@@ -274,22 +274,24 @@ Status PartitionSchema::FromPB(const PartitionSchemaPB& pb,
 
 Status PartitionSchema::ToPB(const Schema& schema, PartitionSchemaPB* pb) const {
   pb->Clear();
-  pb->mutable_hash_bucket_schemas()->Reserve(hash_schema_.size());
+  pb->mutable_hash_schema()->Reserve(hash_schema_.size());
   for (const auto& hash_dimension : hash_schema_) {
-    auto* hash_schema_pb = pb->add_hash_bucket_schemas();
+    auto* hash_dimension_pb = pb->add_hash_schema();
     SetColumnIdentifiers(hash_dimension.column_ids,
-                         hash_schema_pb->mutable_columns());
-    hash_schema_pb->set_num_buckets(hash_dimension.num_buckets);
-    hash_schema_pb->set_seed(hash_dimension.seed);
+                         hash_dimension_pb->mutable_columns());
+    hash_dimension_pb->set_num_buckets(hash_dimension.num_buckets);
+    hash_dimension_pb->set_seed(hash_dimension.seed);
   }
 
   if (!ranges_with_hash_schemas_.empty()) {
-    pb->mutable_range_hash_schemas()->Reserve(ranges_with_hash_schemas_.size());
-    pb->mutable_range_bounds()->Reserve(ranges_with_hash_schemas_.size());
+    pb->mutable_custom_hash_schema_ranges()->Reserve(
+        ranges_with_hash_schemas_.size());
     Arena arena(256);
     for (const auto& range_hash_schema : ranges_with_hash_schemas_) {
-      RowOperationsPBEncoder encoder(pb->add_range_bounds());
+      auto* range_pb = pb->add_custom_hash_schema_ranges();
+
       arena.Reset();
+      RowOperationsPBEncoder encoder(range_pb->mutable_range_bounds());
       KuduPartialRow lower(&schema);
       KuduPartialRow upper(&schema);
       Slice s_lower = Slice(range_hash_schema.lower);
@@ -299,13 +301,14 @@ Status PartitionSchema::ToPB(const Schema& schema, PartitionSchemaPB* pb) const
       encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
       encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
 
-      auto* range_hash_schema_pb = pb->add_range_hash_schemas();
+      range_pb->mutable_hash_schema()->Reserve(
+          range_hash_schema.hash_schema.size());
       for (const auto& hash_dimension : range_hash_schema.hash_schema) {
-        auto* hash_schema_pb = range_hash_schema_pb->add_hash_schemas();
+        auto* hash_dimension_pb = range_pb->add_hash_schema();
         SetColumnIdentifiers(hash_dimension.column_ids,
-                             hash_schema_pb->mutable_columns());
-        hash_schema_pb->set_num_buckets(hash_dimension.num_buckets);
-        hash_schema_pb->set_seed(hash_dimension.seed);
+                             hash_dimension_pb->mutable_columns());
+        hash_dimension_pb->set_num_buckets(hash_dimension.num_buckets);
+        hash_dimension_pb->set_seed(hash_dimension.seed);
       }
     }
   }
@@ -512,21 +515,21 @@ Status PartitionSchema::SplitRangeBounds(
 Status PartitionSchema::CreatePartitions(
     const vector<KuduPartialRow>& split_rows,
     const vector<pair<KuduPartialRow, KuduPartialRow>>& range_bounds,
-    const vector<HashSchema>& ranges_hash_schemas,
+    const vector<HashSchema>& range_hash_schemas,
     const Schema& schema,
     vector<Partition>* partitions) const {
   const auto& hash_encoder = GetKeyEncoder<string>(GetTypeInfo(UINT32));
 
-  if (!ranges_hash_schemas.empty()) {
+  if (!range_hash_schemas.empty()) {
     if (!split_rows.empty()) {
       return Status::InvalidArgument("Both 'split_rows' and 'range_hash_schemas' cannot be "
                                      "populated at the same time.");
     }
-    if (range_bounds.size() != ranges_hash_schemas.size()) {
+    if (range_bounds.size() != range_hash_schemas.size()) {
       return Status::InvalidArgument(
           Substitute("$0 vs $1: per range hash schemas and range bounds "
                      "must have the same size",
-                     ranges_hash_schemas.size(), range_bounds.size()));
+                     range_hash_schemas.size(), range_bounds.size()));
     }
   }
 
@@ -547,7 +550,7 @@ Status PartitionSchema::CreatePartitions(
 
   RangesWithHashSchemas bounds_with_hash_schemas;
   vector<string> splits;
-  RETURN_NOT_OK(EncodeRangeBounds(range_bounds, ranges_hash_schemas, schema,
+  RETURN_NOT_OK(EncodeRangeBounds(range_bounds, range_hash_schemas, schema,
                                   &bounds_with_hash_schemas));
   RETURN_NOT_OK(EncodeRangeSplits(split_rows, schema, &splits));
   RETURN_NOT_OK(SplitRangeBounds(schema, splits, &bounds_with_hash_schemas));
@@ -565,7 +568,7 @@ Status PartitionSchema::CreatePartitions(
   DCHECK(base_hash_partitions.size() > 1 ||
          base_hash_partitions.front().hash_buckets().empty());
 
-  if (ranges_hash_schemas.empty()) {
+  if (range_hash_schemas.empty()) {
     // Create a partition per range bound and hash bucket combination.
     vector<Partition> new_partitions;
     for (const Partition& base_partition : base_hash_partitions) {
@@ -579,7 +582,7 @@ Status PartitionSchema::CreatePartitions(
     *partitions = std::move(new_partitions);
   } else {
     // The number of ranges should match the size of range_hash_schemas.
-    DCHECK_EQ(ranges_hash_schemas.size(), bounds_with_hash_schemas.size());
+    DCHECK_EQ(range_hash_schemas.size(), bounds_with_hash_schemas.size());
     // No split rows should be defined if range_hash_schemas is populated.
     DCHECK(split_rows.empty());
     vector<Partition> result_partitions;
@@ -653,7 +656,7 @@ Status PartitionSchema::CreatePartitions(
       for (int i = static_cast<int>(partition.hash_buckets().size()) - 1; i >= 0; i--) {
         partition.partition_key_end_.erase(kEncodedBucketSize * i);
         int32_t hash_bucket = partition.hash_buckets()[i] + 1;
-        if (ranges_hash_schemas.empty() || partition_idx_to_hash_schema_idx[j] == -1) {
+        if (range_hash_schemas.empty() || partition_idx_to_hash_schema_idx[j] == -1) {
           hash_dimension = &hash_schema_[i];
         } else {
           const auto& hash_schemas_idx = partition_idx_to_hash_schema_idx[j];
diff --git a/src/kudu/common/partition.h b/src/kudu/common/partition.h
index 85b7d62..590fcf9 100644
--- a/src/kudu/common/partition.h
+++ b/src/kudu/common/partition.h
@@ -213,7 +213,7 @@ class PartitionSchema {
   static Status ExtractHashSchemaFromPB(
       const Schema& schema,
       const google::protobuf::RepeatedPtrField<PartitionSchemaPB_HashBucketSchemaPB>&
-          hash_buckets_pb,
+          hash_schema_pb,
       HashSchema* hash_schema);
 
   // Deserializes a protobuf message into a partition schema.
@@ -246,7 +246,7 @@ class PartitionSchema {
   Status CreatePartitions(
       const std::vector<KuduPartialRow>& split_rows,
       const std::vector<std::pair<KuduPartialRow, KuduPartialRow>>& range_bounds,
-      const std::vector<HashSchema>& ranges_hash_schemas,
+      const std::vector<HashSchema>& range_hash_schemas,
       const Schema& schema,
       std::vector<Partition>* partitions) const WARN_UNUSED_RESULT;
 
diff --git a/src/kudu/common/partition_pruner-test.cc b/src/kudu/common/partition_pruner-test.cc
index 7ba9cc1..95bb56a 100644
--- a/src/kudu/common/partition_pruner-test.cc
+++ b/src/kudu/common/partition_pruner-test.cc
@@ -122,12 +122,12 @@ void PartitionPrunerTest::CreatePartitionSchemaPB(
     range_schema->add_columns()->set_name(range_column);
   }
   for (const auto& hash_dimension : table_hash_schema) {
-    auto* hash_schema_component = partition_schema_pb->add_hash_bucket_schemas();
-    for (const auto& hash_schema_columns : get<0>(hash_dimension)) {
-      hash_schema_component->add_columns()->set_name(hash_schema_columns);
+    auto* hash_dimension_pb = partition_schema_pb->add_hash_schema();
+    for (const auto& hash_schema_column : get<0>(hash_dimension)) {
+      hash_dimension_pb->add_columns()->set_name(hash_schema_column);
     }
-    hash_schema_component->set_num_buckets(get<1>(hash_dimension));
-    hash_schema_component->set_seed(get<2>(hash_dimension));
+    hash_dimension_pb->set_num_buckets(get<1>(hash_dimension));
+    hash_dimension_pb->set_seed(get<2>(hash_dimension));
   }
 }
 
@@ -141,7 +141,8 @@ void PartitionPrunerTest::AddRangePartitionWithSchema(
     vector<pair<KuduPartialRow, KuduPartialRow>>* bounds,
     vector<PartitionSchema::HashSchema>* range_hash_schemas,
     PartitionSchemaPB* pb) {
-  RowOperationsPBEncoder encoder(pb->add_range_bounds());
+  auto* range = pb->add_custom_hash_schema_ranges();
+  RowOperationsPBEncoder encoder(range->mutable_range_bounds());
   KuduPartialRow lower(&schema);
   KuduPartialRow upper(&schema);
   for (const auto& bound : lower_string_cols) {
@@ -158,22 +159,21 @@ void PartitionPrunerTest::AddRangePartitionWithSchema(
   }
   encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
   encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
-  auto* range_hash_component = pb->add_range_hash_schemas();
   PartitionSchema::HashSchema hash_schema;
   for (const auto& hash_bucket_info : hash_buckets_info) {
-    auto* hash_component_pb = range_hash_component->add_hash_schemas();
+    auto* hash_dimension_pb = range->add_hash_schema();
     PartitionSchema::HashDimension hash_dimension;
     for (const auto& hash_schema_columns : get<0>(hash_bucket_info)) {
-      hash_component_pb->add_columns()->set_name(hash_schema_columns);
+      hash_dimension_pb->add_columns()->set_name(hash_schema_columns);
       hash_dimension.column_ids.emplace_back(schema.find_column(hash_schema_columns));
     }
-    hash_component_pb->set_num_buckets(get<1>(hash_bucket_info));
+    hash_dimension_pb->set_num_buckets(get<1>(hash_bucket_info));
     hash_dimension.num_buckets = get<1>(hash_bucket_info);
-    hash_component_pb->set_seed(get<2>(hash_bucket_info));
+    hash_dimension_pb->set_seed(get<2>(hash_bucket_info));
     hash_dimension.seed = get<2>(hash_bucket_info);
     hash_schema.emplace_back(hash_dimension);
   }
-  range_hash_schemas->emplace_back(hash_schema);
+  range_hash_schemas->emplace_back(std::move(hash_schema));
   bounds->emplace_back(lower, upper);
 }
 
diff --git a/src/kudu/common/scan_spec-test.cc b/src/kudu/common/scan_spec-test.cc
index f43151e..a43cdfe 100644
--- a/src/kudu/common/scan_spec-test.cc
+++ b/src/kudu/common/scan_spec-test.cc
@@ -60,11 +60,11 @@ void GeneratePartitionSchema(const Schema& schema,
                              PartitionSchema* partition_schema) {
   PartitionSchemaPB partition_schema_pb;
   for (const auto& col_names_and_num_buckets : hash_partitions) {
-    auto* hash_pb = partition_schema_pb.add_hash_bucket_schemas();
-    hash_pb->set_num_buckets(col_names_and_num_buckets.second);
-    hash_pb->set_seed(0);
+    auto* hash_dimension_pb = partition_schema_pb.add_hash_schema();
+    hash_dimension_pb->set_num_buckets(col_names_and_num_buckets.second);
+    hash_dimension_pb->set_seed(0);
     for (const auto& col_name : col_names_and_num_buckets.first) {
-      auto* column_pb = hash_pb->add_columns();
+      auto* column_pb = hash_dimension_pb->add_columns();
       int col_idx = schema.find_column(col_name);
       column_pb->set_id(col_idx);
       column_pb->set_name(col_name);
diff --git a/src/kudu/integration-tests/table_locations-itest.cc b/src/kudu/integration-tests/table_locations-itest.cc
index 0fa4745..f1cafb9 100644
--- a/src/kudu/integration-tests/table_locations-itest.cc
+++ b/src/kudu/integration-tests/table_locations-itest.cc
@@ -161,13 +161,13 @@ class TableLocationsTest : public KuduTest {
   };
   typedef vector<HashDimension> HashSchema;
 
-  Status CreateTable(const string& table_name,
-                     const Schema& schema,
-                     const vector<KuduPartialRow>& split_rows,
-                     const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds,
-                     const vector<HashSchema>& ranges_hash_schemas,
-                     const HashSchema& table_hash_schema);
-
+  Status CreateTable(
+      const string& table_name,
+      const Schema& schema,
+      const vector<KuduPartialRow>& split_rows = {},
+      const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds = {},
+      const vector<HashSchema>& range_hash_schemas = {},
+      const HashSchema& table_hash_schema = {});
 
   void CreateTable(const string& table_name, int num_splits);
 
@@ -182,10 +182,15 @@ class TableLocationsTest : public KuduTest {
 Status TableLocationsTest::CreateTable(
     const string& table_name,
     const Schema& schema,
-    const vector<KuduPartialRow>& split_rows = {},
-    const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds = {},
-    const vector<HashSchema>& ranges_hash_schemas = {},
-    const HashSchema& table_hash_schema = {}) {
+    const vector<KuduPartialRow>& split_rows,
+    const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds,
+    const vector<HashSchema>& range_hash_schemas,
+    const HashSchema& table_hash_schema) {
+
+  if (!range_hash_schemas.empty() && range_hash_schemas.size() != bounds.size()) {
+    return Status::InvalidArgument(
+        "'bounds' and 'range_hash_schemas' must be of the same size");
+  }
 
   CreateTableRequestPB req;
   req.set_name(table_name);
@@ -194,32 +199,32 @@ Status TableLocationsTest::CreateTable(
   for (const KuduPartialRow& row : split_rows) {
     splits_encoder.Add(RowOperationsPB::SPLIT_ROW, row);
   }
-  auto* partition_schema_pb = req.mutable_partition_schema();
   for (const auto& bound : bounds) {
     splits_encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, bound.first);
     splits_encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, bound.second);
-    if (!ranges_hash_schemas.empty()) {
-      RowOperationsPBEncoder encoder(partition_schema_pb->add_range_bounds());
-      encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, bound.first);
-      encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, bound.second);
-    }
   }
 
-  for (const auto& hash_schema : ranges_hash_schemas) {
-    auto* range_hash_schemas_pb = partition_schema_pb->add_range_hash_schemas();
+  auto* ps_pb = req.mutable_partition_schema();
+  for (size_t i = 0; i < range_hash_schemas.size(); ++i) {
+    const auto& bound = bounds[i];
+    const auto& hash_schema = range_hash_schemas[i];
+    auto* range = ps_pb->add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
+    encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, bound.first);
+    encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, bound.second);
     for (const auto& hash_dimension : hash_schema) {
-      auto* hash_schema_pb = range_hash_schemas_pb->add_hash_schemas();
+      auto* hash_dimension_pb = range->add_hash_schema();
       for (const string& col_name : hash_dimension.columns) {
-        hash_schema_pb->add_columns()->set_name(col_name);
+        hash_dimension_pb->add_columns()->set_name(col_name);
       }
-      hash_schema_pb->set_num_buckets(hash_dimension.num_buckets);
-      hash_schema_pb->set_seed(hash_dimension.seed);
+      hash_dimension_pb->set_num_buckets(hash_dimension.num_buckets);
+      hash_dimension_pb->set_seed(hash_dimension.seed);
     }
   }
 
   if (!table_hash_schema.empty()) {
     for (const auto& hash_dimension : table_hash_schema) {
-      auto* hash_schema_pb = partition_schema_pb->add_hash_bucket_schemas();
+      auto* hash_schema_pb = ps_pb->add_hash_schema();
       for (const string& col_name : hash_dimension.columns) {
         hash_schema_pb->add_columns()->set_name(col_name);
       }
@@ -236,6 +241,18 @@ Status TableLocationsTest::CreateTable(
   return proxy_->CreateTable(req, &resp, &controller);
 }
 
+void TableLocationsTest::CreateTable(const string& table_name, int num_splits) {
+  Schema schema({ ColumnSchema("key", INT32) }, 1);
+  KuduPartialRow row(&schema);
+  vector<KuduPartialRow> splits(num_splits, row);
+  for (int i = 0; i < num_splits; i++) {
+    ASSERT_OK(splits[i].SetInt32(0, i*1000));
+  }
+
+  ASSERT_OK(CreateTable(table_name, schema, splits));
+  NO_FATALS(CheckMasterTableCreation(table_name, num_splits + 1));
+}
+
 void TableLocationsTest::CheckMasterTableCreation(const string &table_name,
                                                   int tablet_locations_size) {
   GetTableLocationsRequestPB req;
@@ -263,18 +280,6 @@ void TableLocationsTest::CheckMasterTableCreation(const string &table_name,
   }
 }
 
-void TableLocationsTest::CreateTable(const string& table_name, int num_splits) {
-  Schema schema({ ColumnSchema("key", INT32) }, 1);
-  KuduPartialRow row(&schema);
-  vector<KuduPartialRow> splits(num_splits, row);
-  for (int i = 0; i < num_splits; i++) {
-    ASSERT_OK(splits[i].SetInt32(0, i*1000));
-  }
-
-  ASSERT_OK(CreateTable(table_name, schema, splits));
-  NO_FATALS(CheckMasterTableCreation(table_name, num_splits + 1));
-}
-
 // Test the tablet server location is properly set in the master GetTableLocations RPC.
 class TableLocationsWithTSLocationTest : public TableLocationsTest {
  public:
diff --git a/src/kudu/master/catalog_manager.cc b/src/kudu/master/catalog_manager.cc
index f306284..b0d9b0d 100644
--- a/src/kudu/master/catalog_manager.cc
+++ b/src/kudu/master/catalog_manager.cc
@@ -1865,6 +1865,9 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
     }
   }
 
+  // TODO(aserbin): make sure range boundaries in
+  //                req.partition_schema().custom_hash_schema_ranges()
+  //                correspond to range_bounds?
   vector<PartitionSchema::HashSchema> range_hash_schemas;
   if (FLAGS_enable_per_range_hash_schemas) {
     // TODO(aserbin): the signature of CreatePartitions() require the
@@ -1874,10 +1877,10 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
     //                CatalogManager::ApplyAlterPartitioningSteps() involving
     //                CreatePartitions() should be updated correspondingly.
     const auto& ps = req.partition_schema();
-    for (int i = 0; i < ps.range_hash_schemas_size(); i++) {
+    for (int i = 0; i < ps.custom_hash_schema_ranges_size(); i++) {
       PartitionSchema::HashSchema hash_schema;
       RETURN_NOT_OK(PartitionSchema::ExtractHashSchemaFromPB(
-          schema, ps.range_hash_schemas(i).hash_schemas(), &hash_schema));
+          schema, ps.custom_hash_schema_ranges(i).hash_schema(), &hash_schema));
       range_hash_schemas.emplace_back(std::move(hash_schema));
     }
   }
diff --git a/src/kudu/master/master-test.cc b/src/kudu/master/master-test.cc
index 4a622ad..37493fa 100644
--- a/src/kudu/master/master-test.cc
+++ b/src/kudu/master/master-test.cc
@@ -242,6 +242,12 @@ Status MasterTest::CreateTable(
     const vector<KuduPartialRow>& split_rows,
     const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds,
     const vector<HashSchema>& range_hash_schemas) {
+
+  if (!range_hash_schemas.empty() && range_hash_schemas.size() != bounds.size()) {
+    return Status::InvalidArgument(
+        "'bounds' and 'range_hash_schemas' must be of the same size");
+  }
+
   CreateTableRequestPB req;
   req.set_name(name);
   if (type) {
@@ -252,26 +258,26 @@ Status MasterTest::CreateTable(
   for (const KuduPartialRow& row : split_rows) {
     splits_encoder.Add(RowOperationsPB::SPLIT_ROW, row);
   }
-  auto* partition_schema_pb = req.mutable_partition_schema();
   for (const pair<KuduPartialRow, KuduPartialRow>& bound : bounds) {
     splits_encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, bound.first);
     splits_encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, bound.second);
-    if (!range_hash_schemas.empty()) {
-      RowOperationsPBEncoder encoder(partition_schema_pb->add_range_bounds());
-      encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, bound.first);
-      encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, bound.second);
-    }
   }
 
-  for (const auto& range_hash_schema : range_hash_schemas) {
-    auto* hash_schemas_pb = partition_schema_pb->add_range_hash_schemas();
+  auto* ps_pb = req.mutable_partition_schema();
+  for (size_t i = 0; i < range_hash_schemas.size(); ++i) {
+    auto* range = ps_pb->add_custom_hash_schema_ranges();
+    RowOperationsPBEncoder encoder(range->mutable_range_bounds());
+    const auto& bound = bounds[i];
+    encoder.Add(RowOperationsPB::RANGE_LOWER_BOUND, bound.first);
+    encoder.Add(RowOperationsPB::RANGE_UPPER_BOUND, bound.second);
+    const auto& range_hash_schema = range_hash_schemas[i];
     for (const auto& hash_dimension : range_hash_schema) {
-      auto* hash_bucket_schema_pb = hash_schemas_pb->add_hash_schemas();
+      auto* hash_dimension_pb = range->add_hash_schema();
       for (const string& col_name : hash_dimension.columns) {
-        hash_bucket_schema_pb->add_columns()->set_name(col_name);
+        hash_dimension_pb->add_columns()->set_name(col_name);
       }
-      hash_bucket_schema_pb->set_num_buckets(hash_dimension.num_buckets);
-      hash_bucket_schema_pb->set_seed(hash_dimension.seed);
+      hash_dimension_pb->set_num_buckets(hash_dimension.num_buckets);
+      hash_dimension_pb->set_seed(hash_dimension.seed);
     }
   }
 
@@ -930,30 +936,6 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
                         "populated at the same time.");
   }
 
-  // The number of range bounds must match the size of user defined hash schemas.
-  {
-    google::FlagSaver flag_saver;
-    FLAGS_enable_per_range_hash_schemas = true; // enable for testing.
-    KuduPartialRow a_lower(&kTableSchema);
-    KuduPartialRow a_upper(&kTableSchema);
-    ASSERT_OK(a_lower.SetInt32("key", 0));
-    ASSERT_OK(a_upper.SetInt32("key", 100));
-    KuduPartialRow b_lower(&kTableSchema);
-    KuduPartialRow b_upper(&kTableSchema);
-    ASSERT_OK(b_lower.SetInt32("key", 100));
-    ASSERT_OK(b_upper.SetInt32("key", 200));
-    vector<HashSchema> range_hash_schemas = { { { {"key"}, 4, 0 } } };
-    Status s = CreateTable(kTableName,
-                           kTableSchema,
-                           {},
-                           { { a_lower, a_upper }, { b_lower, b_upper }, },
-                           range_hash_schemas);
-    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
-    ASSERT_STR_CONTAINS(s.ToString(),
-                        "1 vs 2: per range hash schemas and range bounds "
-                        "must have the same size");
-  }
-
   // No non-range columns.
   {
     KuduPartialRow split(&kTableSchema);