You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kudu.apache.org by to...@apache.org on 2017/03/30 19:40:15 UTC

[1/5] kudu git commit: docs: Allow make_site.sh to skip javadoc generation

Repository: kudu
Updated Branches:
  refs/heads/master 824b0f46d -> 08b8ec9e1


docs: Allow make_site.sh to skip javadoc generation

This patch adds the ability to skip javadoc generation when running
make_site.sh. It also adds a --force flag, which will overwrite the
destination output directory if it exists.

Change-Id: Ibd9bb4cb04c8491e26e784fa07d7229791830aab
Reviewed-on: http://gerrit.cloudera.org:8080/6505
Tested-by: Kudu Jenkins
Reviewed-by: Will Berkeley <wd...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/kudu/commit/023629af
Tree: http://git-wip-us.apache.org/repos/asf/kudu/tree/023629af
Diff: http://git-wip-us.apache.org/repos/asf/kudu/diff/023629af

Branch: refs/heads/master
Commit: 023629af2788f473f18bd2a6ee38152c8bf437f2
Parents: 824b0f4
Author: Mike Percy <mp...@apache.org>
Authored: Tue Mar 28 18:43:35 2017 -0700
Committer: Will Berkeley <wd...@gmail.com>
Committed: Wed Mar 29 15:35:56 2017 +0000

----------------------------------------------------------------------
 docs/support/scripts/make_site.sh | 50 ++++++++++++++++++++++------------
 1 file changed, 33 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kudu/blob/023629af/docs/support/scripts/make_site.sh
----------------------------------------------------------------------
diff --git a/docs/support/scripts/make_site.sh b/docs/support/scripts/make_site.sh
index ec24b54..38a4cbd 100755
--- a/docs/support/scripts/make_site.sh
+++ b/docs/support/scripts/make_site.sh
@@ -28,10 +28,14 @@ BUILD_ROOT="$SOURCE_ROOT/build/$BUILD_TYPE"
 SITE_OUTPUT_DIR="$BUILD_ROOT/site"
 
 OPT_DOXYGEN=1 # By default, build doxygen docs.
+OPT_JAVADOC=1 # By default, build javadocs.
+OPT_FORCE='' # By default, don't overwrite the destination directory.
 
 usage() {
-  echo "Usage: $0 [--no-doxygen]"
+  echo "Usage: $0 [--no-doxygen] [--no-javadoc] [--force]"
   echo "Specify --no-doxygen to skip generation of the C++ client API docs"
+  echo "Specify --no-javadoc to skip generation of the Java API docs"
+  echo "Specify --force to overwrite the destination directory, if it exists"
   exit 1
 }
 
@@ -39,6 +43,8 @@ if [ $# -gt 0 ]; then
   for arg in $*; do
     case $arg in
       "--no-doxygen")  OPT_DOXYGEN='' ;;
+      "--no-javadoc")  OPT_JAVADOC='' ;;
+      "--force")       OPT_FORCE=1 ;;
       "--help")        usage ;;
       "-h")            usage ;;
       *)               echo "$0: Unknown command-line option: $arg"; usage ;;
@@ -75,6 +81,9 @@ fi
 make -j$(getconf _NPROCESSORS_ONLN) $MAKE_TARGETS
 
 # Check out the gh-pages repo into $SITE_OUTPUT_DIR
+if [ -d "$SITE_OUTPUT_DIR" -a -n "$OPT_FORCE" ]; then
+  rm -rf "$SITE_OUTPUT_DIR"
+fi
 git clone -q $(git config --get remote.apache.url) --reference $SOURCE_ROOT -b gh-pages --depth 1 "$SITE_OUTPUT_DIR"
 
 # Build the docs using the styles from the Jekyll site
@@ -87,30 +96,36 @@ else
   exit 1
 fi
 
-cd "$SOURCE_ROOT/java"
-mvn clean install -DskipTests
-if mvn clean javadoc:aggregate | tee /dev/stdout | fgrep -q "Javadoc Warnings"; then
-  echo "There are Javadoc warnings. Please fix them."
-  exit 1
-fi
-
-if [ -f "$SOURCE_ROOT/java/target/site/apidocs/index.html" ]; then
-  echo "Successfully built Javadocs."
-else
-  echo "Javadocs failed to build."
-  exit 1
+if [ -n "$OPT_JAVADOC" ]; then
+  JAVADOC_SUBDIR="apidocs"
+  cd "$SOURCE_ROOT/java"
+  mvn clean install -DskipTests
+  if mvn clean javadoc:aggregate | tee /dev/stdout | fgrep -q "Javadoc Warnings"; then
+    echo "There are Javadoc warnings. Please fix them."
+    exit 1
+  fi
+
+  if [ -f "$SOURCE_ROOT/java/target/site/$JAVADOC_SUBDIR/index.html" ]; then
+    echo "Successfully built Javadocs."
+  else
+    echo "Javadocs failed to build."
+    exit 1
+  fi
+
+  rm -Rf "$SITE_OUTPUT_DIR/$JAVADOC_SUBDIR"
+  cp -au "$SOURCE_ROOT/java/target/site/$JAVADOC_SUBDIR" "$SITE_OUTPUT_DIR/"
 fi
 
-rm -Rf "$SITE_OUTPUT_DIR/apidocs"
-cp -au "$SOURCE_ROOT/java/target/site/apidocs" "$SITE_OUTPUT_DIR/"
-
 if [ -n "$OPT_DOXYGEN" ]; then
   CPP_CLIENT_API_SUBDIR="cpp-client-api"
   rm -Rf "$SITE_OUTPUT_DIR/$CPP_CLIENT_API_SUBDIR"
   cp -a "$BUILD_ROOT/docs/doxygen/client_api/html" "$SITE_OUTPUT_DIR/$CPP_CLIENT_API_SUBDIR"
 fi
 
-SITE_SUBDIRS="docs apidocs"
+SITE_SUBDIRS="docs"
+if [ -n "$OPT_JAVADOC" ]; then
+  SITE_SUBDIRS="$SITE_SUBDIRS $JAVADOC_SUBDIR"
+fi
 if [ -n "$OPT_DOXYGEN" ]; then
   SITE_SUBDIRS="$SITE_SUBDIRS $CPP_CLIENT_API_SUBDIR"
 fi
@@ -121,3 +136,4 @@ zip -rq "$SITE_ARCHIVE" $SITE_SUBDIRS
 
 echo "Generated web site at $SITE_OUTPUT_DIR"
 echo "Docs zip generated at $SITE_ARCHIVE"
+echo "To view live site locally, run: (cd $SITE_OUTPUT_DIR && ./site_tool jekyll serve)"


[2/5] kudu git commit: design docs: Fix bad markdown formatting in tablet.md

Posted by to...@apache.org.
design docs: Fix bad markdown formatting in tablet.md

Level 1 headings are only allowed to have a line of equals signs under
the text of the heading, not before.

Change-Id: I336b7a5814da30812e5dd77289027ac296fcd2da
Reviewed-on: http://gerrit.cloudera.org:8080/6500
Tested-by: Kudu Jenkins
Reviewed-by: Will Berkeley <wd...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/kudu/commit/6e74cefd
Tree: http://git-wip-us.apache.org/repos/asf/kudu/tree/6e74cefd
Diff: http://git-wip-us.apache.org/repos/asf/kudu/diff/6e74cefd

Branch: refs/heads/master
Commit: 6e74cefda7ce8e325ae4275ae3a7f2eb801a862f
Parents: 023629a
Author: Mike Percy <mp...@apache.org>
Authored: Tue Mar 28 12:24:58 2017 -0700
Committer: Mike Percy <mp...@apache.org>
Committed: Wed Mar 29 21:26:35 2017 +0000

----------------------------------------------------------------------
 docs/design-docs/tablet.md | 13 -------------
 1 file changed, 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kudu/blob/6e74cefd/docs/design-docs/tablet.md
----------------------------------------------------------------------
diff --git a/docs/design-docs/tablet.md b/docs/design-docs/tablet.md
index 18a4f2f..b021d10 100644
--- a/docs/design-docs/tablet.md
+++ b/docs/design-docs/tablet.md
@@ -23,7 +23,6 @@ are disjoint, ie the set of rows for different RowSets do not
 intersect, so any given key is present in at most one RowSet. While
 RowSets are disjoint, their key spaces may overlap.
 
-============================================================
 Handling Insertions
 ============================================================
 
@@ -42,7 +41,6 @@ of a special header, followed by the packed format of the row data (more detail
 Since the MemRowSet is fully in-memory, it will eventually fill up and "Flush" to disk --
 this process is described in detail later in this document.
 
-============================================================
 MVCC Overview
 ============================================================
 
@@ -84,7 +82,6 @@ a retention period beyond which old transaction records may be GCed (thus preven
 reads from earlier than that point in history).
 (NOTE: history GC not currently implemented)
 
-============================================================
 MVCC Mutations in MemRowSet
 ============================================================
 
@@ -182,7 +179,6 @@ However, we consider the above inefficiencies tolerable given the following assu
 If it turns out that the above inefficiencies impact real applications, various optimizations
 can be applied in the future to reduce the overhead.
 
-============================================================
 MemRowSet Flushes
 ============================================================
 
@@ -218,7 +214,6 @@ NOTE: other systems such as C-Store call the MemRowSet the
 "write optimized store" (WOS), and the on-disk files the "read-optimized store"
 (ROS).
 
-============================================================
 Historical MVCC in DiskRowSets
 ============================================================
 
@@ -295,7 +290,6 @@ the range of transactions for which UNDO records are present. If the scanner's M
 snapshot indicates that all of these transactions are already committed, then the set
 of deltas may be short circuited, and the query can proceed with no MVCC overhead.
 
-============================================================
 Handling mutations against on-disk files
 ============================================================
 
@@ -341,7 +335,6 @@ This allows for fast updates of small columns without the overhead of reading
 or re-writing larger columns (an advantage compared to the MVCC techniques used
 by systems such as C-Store and PostgreSQL).
 
-============================================================
 Summary of delta file processing
 ============================================================
 
@@ -361,7 +354,6 @@ REDO records: data which needs to be processed in order to bring rows up to date
 
 UNDO records and REDO records are stored in the same file format, called a DeltaFile.
 
-============================================================
 Delta Compactions
 ============================================================
 
@@ -408,7 +400,6 @@ UNDO logs have been removed, there is no remaining record of when any row or
 cell was inserted or updated. If users need this functionality, they should
 keep their own "inserted_on" timestamp column, as they would in a traditional RDBMS.
 
-============================================================
 Types of Delta Compaction
 ============================================================
 
@@ -477,7 +468,6 @@ compaction file can be introduced into the RowSet by atomically swapping it with
 the compaction inputs. After the swap is complete, the pre-compaction files may
 be removed.
 
-============================================================
 Merging compactions
 ============================================================
 
@@ -528,7 +518,6 @@ in a Merging Compaction. This makes the handling of concurrent mutations a somew
 intricate dance. This process is described in more detail in 'compaction.txt' in this
 directory.
 
-============================================================
 Overall picture
 ============================================================
 
@@ -585,7 +574,6 @@ DiskRowSet 2:
 +-----------------+ /
 ```
 
-============================================================
 Comparison to BigTable approach
 ============================================================
 
@@ -690,7 +678,6 @@ not another dimension in the row key. Instead, Kudu provides native composite ro
 which can be useful for time series.
 
 
-============================================================
 Comparing the MVCC implementation to other databases
 ============================================================
 


[4/5] kudu git commit: interval_tree: improve an O(n) loop to O(lg n)

Posted by to...@apache.org.
interval_tree: improve an O(n) loop to O(lg n)

In the interval tree implementation, we were scanning forward through a
sorted list to find the first element for which a comparison predicate
returned false. Since we know that the list is sorted, we can use
std::partition_point instead for a logarithmic search instead of linear.

Before:
  Performance counter stats for 'build/latest/bin/rowset_tree-test --gtest_repeat=10 --gtest_filter=*Perf*':

       4583.429978      task-clock (msec)         #    0.999 CPUs utilized
               133      context-switches          #    0.029 K/sec
                 0      cpu-migrations            #    0.000 K/sec
               693      page-faults               #    0.151 K/sec
    14,480,814,146      cycles                    #    3.159 GHz
   <not supported>      stalled-cycles-frontend
   <not supported>      stalled-cycles-backend
    36,542,995,670      instructions              #    2.52  insns per cycle
     6,044,686,478      branches                  # 1318.813 M/sec
        63,527,970      branch-misses             #    1.05% of all branches

       4.585735270 seconds time elapsed

After:
  Performance counter stats for 'build/latest/bin/rowset_tree-test --gtest_repeat=10 --gtest_filter=*Perf*':

       4044.192209      task-clock (msec)         #    1.000 CPUs utilized
                 5      context-switches          #    0.001 K/sec
                 0      cpu-migrations            #    0.000 K/sec
               509      page-faults               #    0.126 K/sec
    13,329,589,168      cycles                    #    3.296 GHz
   <not supported>      stalled-cycles-frontend
   <not supported>      stalled-cycles-backend
    30,484,446,619      instructions              #    2.29  insns per cycle
     5,094,736,604      branches                  # 1259.766 M/sec
        86,320,621      branch-misses             #    1.69% of all branches

       4.044156458 seconds time elapsed

Overall speeds up around 10%.

Change-Id: Ib8c9463fb901b7bf75d32b00cb528e96c119101e
Reviewed-on: http://gerrit.cloudera.org:8080/6496
Tested-by: Kudu Jenkins
Reviewed-by: David Ribeiro Alves <dr...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/kudu/commit/f8e88fa0
Tree: http://git-wip-us.apache.org/repos/asf/kudu/tree/f8e88fa0
Diff: http://git-wip-us.apache.org/repos/asf/kudu/diff/f8e88fa0

Branch: refs/heads/master
Commit: f8e88fa0be3bb64c5f05e755e3db55f3ac7cb998
Parents: 4be39fe
Author: Todd Lipcon <to...@apache.org>
Authored: Mon Mar 27 18:58:30 2017 -0700
Committer: Todd Lipcon <to...@apache.org>
Committed: Thu Mar 30 01:20:43 2017 +0000

----------------------------------------------------------------------
 src/kudu/util/interval_tree-inl.h | 64 ++++++++++++++++++----------------
 1 file changed, 33 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kudu/blob/f8e88fa0/src/kudu/util/interval_tree-inl.h
----------------------------------------------------------------------
diff --git a/src/kudu/util/interval_tree-inl.h b/src/kudu/util/interval_tree-inl.h
index 7e88e42..ec65390 100644
--- a/src/kudu/util/interval_tree-inl.h
+++ b/src/kudu/util/interval_tree-inl.h
@@ -20,6 +20,8 @@
 #include <algorithm>
 #include <vector>
 
+#include "kudu/util/interval_tree.h"
+
 namespace kudu {
 
 template<class Traits>
@@ -223,13 +225,12 @@ void ITNode<Traits>::FindContainingPoint(const point_type &query,
 
     // Any intervals which start before the query point and overlap the split point
     // must therefore contain the query point.
-    for (const interval_type &interval : overlapping_by_asc_left_) {
-      if (Traits::compare(Traits::get_left(interval), query) <= 0) {
-        results->push_back(interval);
-      } else {
-        break;
-      }
-    }
+    auto p = std::partition_point(
+        overlapping_by_asc_left_.cbegin(), overlapping_by_asc_left_.cend(),
+        [&](const interval_type& interval) {
+          return Traits::compare(Traits::get_left(interval), query) <= 0;
+        });
+    results->insert(results->end(), overlapping_by_asc_left_.cbegin(), p);
   } else if (cmp > 0) {
     // None of the intervals in left_ may intersect this.
     if (right_ != NULL) {
@@ -238,13 +239,12 @@ void ITNode<Traits>::FindContainingPoint(const point_type &query,
 
     // Any intervals which end after the query point and overlap the split point
     // must therefore contain the query point.
-    for (const interval_type &interval : overlapping_by_desc_right_) {
-      if (Traits::compare(Traits::get_right(interval), query) >= 0) {
-        results->push_back(interval);
-      } else {
-        break;
-      }
-    }
+    auto p = std::partition_point(
+        overlapping_by_desc_right_.cbegin(), overlapping_by_desc_right_.cend(),
+        [&](const interval_type& interval) {
+          return Traits::compare(Traits::get_right(interval), query) >= 0;
+        });
+    results->insert(results->end(), overlapping_by_desc_right_.cbegin(), p);
   } else {
     DCHECK_EQ(cmp, 0);
     // The query is exactly our split point -- in this case we've already got
@@ -265,30 +265,32 @@ void ITNode<Traits>::FindIntersectingInterval(const interval_type &query,
     }
 
     // Any intervals whose left edge is <= the query interval's right edge
-    // intersect the query interval.
-    for (const interval_type &interval : overlapping_by_asc_left_) {
-      if (Traits::compare(Traits::get_left(interval),Traits::get_right(query)) <= 0) {
-        results->push_back(interval);
-      } else {
-        break;
-      }
-    }
+    // intersect the query interval. 'std::partition_point' returns the first
+    // such interval which does not meet that criterion, so we insert all
+    // up to that point.
+    auto first_greater = std::partition_point(
+        overlapping_by_asc_left_.cbegin(), overlapping_by_asc_left_.cend(),
+        [&](const interval_type& interval) {
+          return Traits::compare(Traits::get_left(interval), Traits::get_right(query)) <= 0;
+        });
+    results->insert(results->end(), overlapping_by_asc_left_.cbegin(), first_greater);
   } else if (Traits::compare(Traits::get_left(query), split_point_) > 0) {
     // The interval is fully right of the split point. So, it may not overlap
-    // with any in 'left_'
+    // with any in 'left_'.
     if (right_ != NULL) {
       right_->FindIntersectingInterval(query, results);
     }
 
     // Any intervals whose right edge is >= the query interval's left edge
-    // intersect the query interval.
-    for (const interval_type &interval : overlapping_by_desc_right_) {
-      if (Traits::compare(Traits::get_right(interval), Traits::get_left(query)) >= 0) {
-        results->push_back(interval);
-      } else {
-        break;
-      }
-    }
+    // intersect the query interval. 'std::partition_point' returns the first
+    // such interval which does not meet that criterion, so we insert all
+    // up to that point.
+    auto first_lesser = std::partition_point(
+        overlapping_by_desc_right_.cbegin(), overlapping_by_desc_right_.cend(),
+        [&](const interval_type& interval) {
+          return Traits::compare(Traits::get_right(interval), Traits::get_left(query)) >= 0;
+        });
+    results->insert(results->end(), overlapping_by_desc_right_.cbegin(), first_lesser);
   } else {
     // The query interval contains the split point. Therefore all other intervals
     // which also contain the split point are intersecting.


[3/5] kudu git commit: KUDU-1890 Allow renaming of primary key column

Posted by to...@apache.org.
KUDU-1890 Allow renaming of primary key column

Removed the checks preventing renaming of primary key
columns, added test cases and updated documents.

Testing: Ran a loop of 100 alter_table-randomized-test.cc test
successfully which includes modification to rename a primary key
and non-primary columns.

Addressed code review comments

Change-Id: I28a8c52bdb9ac5a3661f9a07c737f7252466d307
Reviewed-on: http://gerrit.cloudera.org:8080/6078
Reviewed-by: Todd Lipcon <to...@apache.org>
Tested-by: Todd Lipcon <to...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/kudu/commit/4be39fee
Tree: http://git-wip-us.apache.org/repos/asf/kudu/tree/4be39fee
Diff: http://git-wip-us.apache.org/repos/asf/kudu/diff/4be39fee

Branch: refs/heads/master
Commit: 4be39feef4907550c16f9d2b34430d098b9f1b21
Parents: 6e74cef
Author: rmettu <ra...@rms.com>
Authored: Mon Feb 20 00:04:56 2017 -0500
Committer: Todd Lipcon <to...@apache.org>
Committed: Wed Mar 29 21:52:20 2017 +0000

----------------------------------------------------------------------
 docs/developing.adoc                            |  4 +-
 docs/known_issues.adoc                          |  6 +--
 docs/kudu_impala_integration.adoc               |  4 +-
 docs/schema_design.adoc                         |  1 +
 .../org/apache/kudu/client/TestAlterTable.java  | 51 ++++++++++++++++++++
 src/kudu/client/client-test.cc                  | 32 ++++++++----
 src/kudu/client/table_alterer-internal.cc       |  2 +-
 src/kudu/common/schema-test.cc                  | 46 ++++++++++++++++++
 src/kudu/common/schema.h                        | 34 ++++++++++---
 .../alter_table-randomized-test.cc              | 37 +++++++++-----
 src/kudu/integration-tests/alter_table-test.cc  | 37 ++++++++++++++
 src/kudu/master/catalog_manager.cc              |  5 --
 src/kudu/tablet/tablet.cc                       |  8 +--
 13 files changed, 219 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/docs/developing.adoc
----------------------------------------------------------------------
diff --git a/docs/developing.adoc b/docs/developing.adoc
index 7cb8055..ba9561b 100644
--- a/docs/developing.adoc
+++ b/docs/developing.adoc
@@ -162,8 +162,8 @@ kuduContext.deleteTable("unwanted_table")
 - Kudu tables with a name containing upper case or non-ascii characters must be
   assigned an alternate name when registered as a temporary table.
 - Kudu tables with a column name containing upper case or non-ascii characters
-  may not be used with SparkSQL. Non-primary key columns may be renamed in Kudu
-  to work around this issue.
+  may not be used with SparkSQL. Columns may be renamed in Kudu to work around 
+  this issue.
 - `NULL`, `NOT NULL`, `<>`, `OR`, `LIKE`, and `IN` predicates are not pushed to
   Kudu, and instead will be evaluated by the Spark task.
 - Kudu does not support all types supported by Spark SQL, such as `Date`,

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/docs/known_issues.adoc
----------------------------------------------------------------------
diff --git a/docs/known_issues.adoc b/docs/known_issues.adoc
index 607b4e8..ce73a51 100644
--- a/docs/known_issues.adoc
+++ b/docs/known_issues.adoc
@@ -33,10 +33,8 @@
 
 * The columns which make up the primary key must be listed first in the schema.
 
-* Columns that are part of the primary key cannot be renamed.
-  The primary key may not be changed after the table is created.
-  You must drop and recreate a table to select a new primary key
-  or rename key columns.
+* The primary key may not be changed after the table is created.
+  You must drop and recreate a table to select a new primary key.
 
 * The primary key of a row may not be modified using the `UPDATE` functionality.
   To modify a row's primary key, the row must be deleted and re-inserted with

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/docs/kudu_impala_integration.adoc
----------------------------------------------------------------------
diff --git a/docs/kudu_impala_integration.adoc b/docs/kudu_impala_integration.adoc
index 9383124..4faca85 100755
--- a/docs/kudu_impala_integration.adoc
+++ b/docs/kudu_impala_integration.adoc
@@ -731,8 +731,8 @@ The examples above have only explored a fraction of what you can do with Impala
 - Kudu tables with a name containing upper case or non-ascii characters must be
   assigned an alternate name when used as an external table in Impala.
 - Kudu tables with a column name containing upper case or non-ascii characters
-  may not be used as an external table in Impala. Non-primary key columns may be
-  renamed in Kudu to work around this issue.
+  may not be used as an external table in Impala. Columns may be renamed in Kudu 
+  to work around this issue.
 - When creating a Kudu table, the `CREATE TABLE` statement must include the
   primary key columns before other columns, in primary key order.
 - Kudu tables containing `UNIXTIME_MICROS`-typed columns may not be used as an

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/docs/schema_design.adoc
----------------------------------------------------------------------
diff --git a/docs/schema_design.adoc b/docs/schema_design.adoc
index 14ddd29..cec85fd 100644
--- a/docs/schema_design.adoc
+++ b/docs/schema_design.adoc
@@ -429,6 +429,7 @@ partitioning, which logically adds another dimension of partitioning.
 You can alter a table's schema in the following ways:
 
 - Rename the table
+- Rename primary key columns
 - Rename, add, or drop non-primary key columns
 - Add and drop range partitions
 

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/java/kudu-client/src/test/java/org/apache/kudu/client/TestAlterTable.java
----------------------------------------------------------------------
diff --git a/java/kudu-client/src/test/java/org/apache/kudu/client/TestAlterTable.java b/java/kudu-client/src/test/java/org/apache/kudu/client/TestAlterTable.java
index 3b8b5a2..3b7b9da 100644
--- a/java/kudu-client/src/test/java/org/apache/kudu/client/TestAlterTable.java
+++ b/java/kudu-client/src/test/java/org/apache/kudu/client/TestAlterTable.java
@@ -27,6 +27,7 @@ import java.util.Collections;
 import java.util.List;
 
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -148,6 +149,56 @@ public class TestAlterTable extends BaseKuduTest {
   }
 
   @Test
+  public void testRenameKeyColumn() throws Exception {
+    KuduTable table = createTable(ImmutableList.<Pair<Integer,Integer>>of());
+    insertRows(table, 0, 100);
+    assertEquals(100, countRowsInTable(table));
+
+    syncClient.alterTable(tableName, new AlterTableOptions()
+            .renameColumn("c0", "c0Key"));
+    boolean done = syncClient.isAlterTableDone(tableName);
+    assertTrue(done);
+
+    // scanning with the old schema
+    try {
+      KuduScanner scanner = syncClient.newScannerBuilder(table)
+              .setProjectedColumnNames(Lists.newArrayList("c0", "c1")).build();
+      while (scanner.hasMoreRows()) {
+        scanner.nextRows();
+      }
+    } catch (KuduException e) {
+      assertTrue(e.getStatus().isInvalidArgument());
+      assertTrue(e.getStatus().getMessage().contains(
+              "Some columns are not present in the current schema: c0"));
+    }
+
+    // Reopen table for the new schema.
+    table = syncClient.openTable(tableName);
+    assertEquals("c0Key", table.getSchema().getPrimaryKeyColumns().get(0).getName());
+    assertEquals(2, table.getSchema().getColumnCount());
+
+    // Add a row
+    KuduSession session = syncClient.newSession();
+    Insert insert = table.newInsert();
+    PartialRow row = insert.getRow();
+    row.addInt("c0Key", 101);
+    row.addInt("c1", 101);
+    session.apply(insert);
+    session.flush();
+    RowError[] rowErrors = session.getPendingErrors().getRowErrors();
+    assertEquals(String.format("row errors: %s", Arrays.toString(rowErrors)), 0, rowErrors.length);
+
+    KuduScanner scanner = syncClient.newScannerBuilder(table)
+            .setProjectedColumnNames(Lists.newArrayList("c0Key", "c1")).build();
+    while (scanner.hasMoreRows()) {
+      RowResultIterator it = scanner.nextRows();
+      assertTrue(it.hasNext());
+      RowResult rr = it.next();
+      assertEquals(rr.getInt(0), rr.getInt(1));
+    }
+  }
+
+  @Test
   public void testAlterRangePartitioning() throws Exception {
     KuduTable table = createTable(ImmutableList.<Pair<Integer,Integer>>of());
     Schema schema = table.getSchema();

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/src/kudu/client/client-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/client/client-test.cc b/src/kudu/client/client-test.cc
index 702c13d..7c2a03f 100644
--- a/src/kudu/client/client-test.cc
+++ b/src/kudu/client/client-test.cc
@@ -3167,15 +3167,6 @@ TEST_F(ClientTest, TestBasicAlterOperations) {
     ASSERT_STR_CONTAINS(s.ToString(), "cannot remove a key column");
   }
 
-  // test that renaming a key should throws an error
-  {
-    gscoped_ptr<KuduTableAlterer> table_alterer(client_->NewTableAlterer(kTableName));
-    table_alterer->AlterColumn("key")->RenameTo("key2");
-    Status s = table_alterer->Alter();
-    ASSERT_TRUE(s.IsInvalidArgument());
-    ASSERT_STR_CONTAINS(s.ToString(), "cannot rename a key column");
-  }
-
   // test that renaming to an already-existing name throws an error
   {
     gscoped_ptr<KuduTableAlterer> table_alterer(client_->NewTableAlterer(kTableName));
@@ -3243,6 +3234,7 @@ TEST_F(ClientTest, TestBasicAlterOperations) {
     ASSERT_EQ(1, tablet_peer->tablet()->metadata()->schema_version());
   }
 
+  // test that adds a new column of type string
   {
     gscoped_ptr<KuduTableAlterer> table_alterer(client_->NewTableAlterer(kTableName));
     table_alterer->AddColumn("new_string_val")->Type(KuduColumnSchema::STRING)
@@ -3251,13 +3243,33 @@ TEST_F(ClientTest, TestBasicAlterOperations) {
     ASSERT_EQ(2, tablet_peer->tablet()->metadata()->schema_version());
   }
 
+  // test renaming primary key column
+  {
+    gscoped_ptr<KuduTableAlterer> table_alterer(client_->NewTableAlterer(kTableName));
+    table_alterer->AlterColumn("key")->RenameTo("key2");
+    Status s = table_alterer->Alter();
+    ASSERT_FALSE(s.IsInvalidArgument());
+    ASSERT_EQ(3, tablet_peer->tablet()->metadata()->schema_version());
+  }
+
+  // test that changing the data type of a primary key column throws an error
+  {
+    gscoped_ptr<KuduTableAlterer> table_alterer(client_->NewTableAlterer(kTableName));
+    table_alterer->AlterColumn("key2")->Type(KuduColumnSchema::INT64)->NotNull()->PrimaryKey();
+    Status s = table_alterer->Alter();
+    ASSERT_TRUE(s.IsNotSupported()) << s.ToString();
+    ASSERT_STR_CONTAINS(s.ToString(),
+                        "Not implemented: cannot alter attributes for column: key2");
+  }
+
+  // test that changes table name
   {
     const char *kRenamedTableName = "RenamedTable";
     gscoped_ptr<KuduTableAlterer> table_alterer(client_->NewTableAlterer(kTableName));
     ASSERT_OK(table_alterer
               ->RenameTo(kRenamedTableName)
               ->Alter());
-    ASSERT_EQ(3, tablet_peer->tablet()->metadata()->schema_version());
+    ASSERT_EQ(4, tablet_peer->tablet()->metadata()->schema_version());
     ASSERT_EQ(kRenamedTableName, tablet_peer->tablet()->metadata()->table_name());
 
     CatalogManager *catalog_manager = cluster_->mini_master()->master()->catalog_manager();

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/src/kudu/client/table_alterer-internal.cc
----------------------------------------------------------------------
diff --git a/src/kudu/client/table_alterer-internal.cc b/src/kudu/client/table_alterer-internal.cc
index 3ea2a16..b3ae30a 100644
--- a/src/kudu/client/table_alterer-internal.cc
+++ b/src/kudu/client/table_alterer-internal.cc
@@ -97,7 +97,7 @@ Status KuduTableAlterer::Data::ToRequest(AlterTableRequestPB* req) {
             s.spec->data_->has_default ||
             s.spec->data_->default_val ||
             s.spec->data_->remove_default) {
-          return Status::NotSupported("cannot support AlterColumn of this type",
+          return Status::NotSupported("cannot alter attributes for column",
                                       s.spec->data_->name);
         }
         // We only support rename column

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/src/kudu/common/schema-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/schema-test.cc b/src/kudu/common/schema-test.cc
index ed8f24e..85c96da 100644
--- a/src/kudu/common/schema-test.cc
+++ b/src/kudu/common/schema-test.cc
@@ -101,6 +101,52 @@ TEST_F(TestSchema, TestSwap) {
   ASSERT_EQ(2, schema2.num_key_columns());
 }
 
+TEST_F(TestSchema, TestColumnSchemaEquals) {
+  Slice default_str("read-write default");
+  ColumnSchema col1("key", STRING);
+  ColumnSchema col2("key1", STRING);
+  ColumnSchema col3("key", STRING, true);
+  ColumnSchema col4("key", STRING, true, &default_str, &default_str);
+
+  ASSERT_TRUE(col1.Equals(col1));
+  ASSERT_FALSE(col1.Equals(col2, ColumnSchema::COMPARE_NAME));
+  ASSERT_TRUE(col1.Equals(col2, ColumnSchema::COMPARE_TYPE));
+  ASSERT_TRUE(col1.Equals(col3, ColumnSchema::COMPARE_NAME));
+  ASSERT_FALSE(col1.Equals(col3, ColumnSchema::COMPARE_TYPE));
+  ASSERT_TRUE(col1.Equals(col3, ColumnSchema::COMPARE_DEFAULTS));
+  ASSERT_FALSE(col3.Equals(col4, ColumnSchema::COMPARE_DEFAULTS));
+  ASSERT_TRUE(col4.Equals(col4, ColumnSchema::COMPARE_DEFAULTS));
+}
+
+TEST_F(TestSchema, TestSchemaEquals) {
+  Schema schema1({ ColumnSchema("col1", STRING),
+                   ColumnSchema("col2", STRING),
+                   ColumnSchema("col3", UINT32) },
+                 2);
+  Schema schema2({ ColumnSchema("newCol1", STRING),
+                   ColumnSchema("newCol2", STRING),
+                   ColumnSchema("newCol3", UINT32) },
+                 2);
+  Schema schema3({ ColumnSchema("col1", STRING),
+                   ColumnSchema("col2", UINT32),
+                   ColumnSchema("col3", UINT32, true) },
+                 2);
+  Schema schema4({ ColumnSchema("col1", STRING),
+                   ColumnSchema("col2", UINT32),
+                   ColumnSchema("col3", UINT32, false) },
+                 2);
+  ASSERT_FALSE(schema1.Equals(schema2));
+  ASSERT_TRUE(schema1.KeyEquals(schema1));
+  ASSERT_TRUE(schema1.KeyEquals(schema2, ColumnSchema::COMPARE_TYPE));
+  ASSERT_FALSE(schema1.KeyEquals(schema2, ColumnSchema::COMPARE_NAME));
+  ASSERT_TRUE(schema1.KeyTypeEquals(schema2));
+  ASSERT_FALSE(schema2.KeyTypeEquals(schema3));
+  ASSERT_FALSE(schema3.Equals(schema4));
+  ASSERT_TRUE(schema4.Equals(schema4));
+  ASSERT_TRUE(schema3.KeyEquals(schema4,
+              ColumnSchema::COMPARE_NAME | ColumnSchema::COMPARE_TYPE));
+}
+
 TEST_F(TestSchema, TestReset) {
   Schema schema;
   ASSERT_FALSE(schema.initialized());

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/src/kudu/common/schema.h
----------------------------------------------------------------------
diff --git a/src/kudu/common/schema.h b/src/kudu/common/schema.h
index b8656e9..453e470 100644
--- a/src/kudu/common/schema.h
+++ b/src/kudu/common/schema.h
@@ -211,14 +211,27 @@ class ColumnSchema {
            type_info()->type() == other.type_info()->type();
   }
 
-  bool Equals(const ColumnSchema &other, bool check_defaults) const {
-    if (!EqualsType(other) || this->name_ != other.name_)
+  // compare types in Equals function
+  enum {
+    COMPARE_NAME = 1 << 0,
+    COMPARE_TYPE = 1 << 1,
+    COMPARE_DEFAULTS = 1 << 2,
+
+    COMPARE_ALL = COMPARE_NAME | COMPARE_TYPE | COMPARE_DEFAULTS
+  };
+
+  bool Equals(const ColumnSchema &other,
+              int flags = COMPARE_ALL) const {
+    if ((flags & COMPARE_NAME) && this->name_ != other.name_)
+      return false;
+
+    if ((flags & COMPARE_TYPE) && !EqualsType(other))
       return false;
 
     // For Key comparison checking the defaults doesn't make sense,
     // since we don't support them, for server vs user schema this comparison
     // will always fail, since the user does not specify the defaults.
-    if (check_defaults) {
+    if (flags & COMPARE_DEFAULTS) {
       if (read_default_ == NULL && other.read_default_ != NULL)
         return false;
 
@@ -627,9 +640,8 @@ class Schema {
     if (this->num_key_columns_ != other.num_key_columns_) return false;
     if (this->cols_.size() != other.cols_.size()) return false;
 
-    const bool have_column_ids = other.has_column_ids() && has_column_ids();
     for (size_t i = 0; i < other.cols_.size(); i++) {
-      if (!this->cols_[i].Equals(other.cols_[i], have_column_ids)) return false;
+      if (!this->cols_[i].Equals(other.cols_[i])) return false;
     }
 
     return true;
@@ -637,14 +649,22 @@ class Schema {
 
   // Return true if the key projection schemas have exactly the same set of
   // columns and respective types.
-  bool KeyEquals(const Schema& other) const {
+  bool KeyEquals(const Schema& other,
+                 int flags
+                    = ColumnSchema::COMPARE_NAME | ColumnSchema::COMPARE_TYPE) const {
     if (this->num_key_columns_ != other.num_key_columns_) return false;
     for (size_t i = 0; i < this->num_key_columns_; i++) {
-      if (!this->cols_[i].Equals(other.cols_[i], false)) return false;
+      if (!this->cols_[i].Equals(other.cols_[i], flags)) return false;
     }
     return true;
   }
 
+  // Return true if the key projection schemas have exactly the same set of
+  // columns and respective types except name field.
+  bool KeyTypeEquals(const Schema& other) const {
+    return KeyEquals(other, ColumnSchema::COMPARE_TYPE);
+  }
+
   // Return a non-OK status if the project is not compatible with the current schema
   // - User columns non present in the tablet are considered errors
   // - Matching columns with different types, at the moment, are considered errors

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/src/kudu/integration-tests/alter_table-randomized-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/alter_table-randomized-test.cc b/src/kudu/integration-tests/alter_table-randomized-test.cc
index 14a592c..5387e9c 100644
--- a/src/kudu/integration-tests/alter_table-randomized-test.cc
+++ b/src/kudu/integration-tests/alter_table-randomized-test.cc
@@ -186,7 +186,7 @@ struct TableState {
     }
 
     row->clear();
-    row->push_back(make_pair("key", key));
+    row->push_back(make_pair(col_names_[0], key));
     for (int i = 1; i < col_names_.size(); i++) {
       int32_t val;
       if (col_nullable_[i] && seed % 2 == 1) {
@@ -199,7 +199,7 @@ struct TableState {
   }
 
   bool Insert(const vector<pair<string, int32_t>>& data) {
-    DCHECK_EQ("key", data[0].first);
+    DCHECK_EQ(col_names_[0], data[0].first);
     int32_t key = data[0].second;
     if (ContainsKey(rows_, key)) return false;
 
@@ -210,7 +210,7 @@ struct TableState {
   }
 
   bool Update(const vector<pair<string, int32_t>>& data) {
-    DCHECK_EQ("key", data[0].first);
+    DCHECK_EQ(col_names_[0], data[0].first);
     int32_t key = data[0].second;
     if (!ContainsKey(rows_, key)) return false;
 
@@ -244,6 +244,10 @@ struct TableState {
   void RenameColumn(const string& existing_name, string new_name) {
     auto iter = std::find(col_names_.begin(), col_names_.end(), existing_name);
     CHECK(iter != col_names_.end());
+    int index = iter - col_names_.begin();
+    for (auto& e : rows_) {
+      e.second->cols[index].first = new_name;
+    }
     *iter = std::move(new_name);
   }
 
@@ -340,7 +344,7 @@ struct MirrorTable {
     if (ts_.rows_.empty()) return;
     int32_t row_key = ts_.GetRandomExistingRowKey();
     vector<pair<string, int32_t>> del;
-    del.push_back(make_pair("key", row_key));
+    del.push_back(make_pair(ts_.col_names_[0], row_key));
     CHECK_OK(DoRealOp(del, DELETE));
 
     ts_.Delete(row_key);
@@ -351,7 +355,7 @@ struct MirrorTable {
     int32_t row_key = ts_.GetRandomExistingRowKey();
 
     vector<pair<string, int32_t>> update;
-    update.push_back(make_pair("key", row_key));
+    update.push_back(make_pair(ts_.col_names_[0], row_key));
     for (int i = 1; i < num_columns(); i++) {
       int32_t val = rand * i;
       if (val == RowState::kNullValue) val++;
@@ -387,7 +391,7 @@ struct MirrorTable {
 
     int step_count = 1 + ts_.rand_.Uniform(10);
     for (int step = 0; step < step_count; step++) {
-      int r = ts_.rand_.Uniform(4);
+      int r = ts_.rand_.Uniform(6);
       if (r < 1 && num_columns() < kMaxColumns) {
         AddAColumn(table_alterer.get());
       } else if (r < 2 && num_columns() > 1) {
@@ -395,6 +399,10 @@ struct MirrorTable {
       } else if (num_range_partitions() == 0 ||
                  (r < 3 && num_range_partitions() < kMaxRangePartitions)) {
         AddARangePartition(schema, table_alterer.get());
+      } else if (r < 4 && num_columns() > 1) {
+        RenameAColumn(table_alterer.get());
+      } else if (r < 5 && num_columns() > 1) {
+        RenamePrimaryKeyColumn(table_alterer.get());
       } else {
         DropARangePartition(schema, table_alterer.get());
       }
@@ -440,7 +448,14 @@ struct MirrorTable {
     ts_.RenameColumn(original_name, std::move(new_name));
   }
 
-  void AddARangePartition(KuduSchema& schema, KuduTableAlterer* table_alterer) {
+  void RenamePrimaryKeyColumn(KuduTableAlterer* table_alterer) {
+    string new_name = ts_.GetRandomNewColumnName();
+    LOG(INFO) << "Renaming PrimaryKey column " << ts_.col_names_[0] << " to " << new_name;
+    table_alterer->AlterColumn(ts_.col_names_[0])->RenameTo(new_name);
+    ts_.RenameColumn(ts_.col_names_[0], std::move(new_name));
+  }
+
+  void AddARangePartition(const KuduSchema& schema, KuduTableAlterer* table_alterer) {
     auto bounds = ts_.AddRangePartition();
     LOG(INFO) << "Adding range partition: [" << bounds.first << ", " << bounds.second << ")"
               << " resulting partitions: ("
@@ -464,9 +479,9 @@ struct MirrorTable {
     }
 
     unique_ptr<KuduPartialRow> lower_bound(schema.NewRow());
-    CHECK_OK(lower_bound->SetInt32("key", lower_bound_value));
+    CHECK_OK(lower_bound->SetInt32(schema.Column(0).name(), lower_bound_value));
     unique_ptr<KuduPartialRow> upper_bound(schema.NewRow());
-    CHECK_OK(upper_bound->SetInt32("key", upper_bound_value));
+    CHECK_OK(upper_bound->SetInt32(schema.Column(0).name(), upper_bound_value));
 
     table_alterer->AddRangePartition(lower_bound.release(), upper_bound.release(),
                                      lower_bound_type, upper_bound_type);
@@ -481,9 +496,9 @@ struct MirrorTable {
                                            ", ", "], (") << ")";
 
     unique_ptr<KuduPartialRow> lower_bound(schema.NewRow());
-    CHECK_OK(lower_bound->SetInt32("key", bounds.first));
+    CHECK_OK(lower_bound->SetInt32(schema.Column(0).name(), bounds.first));
     unique_ptr<KuduPartialRow> upper_bound(schema.NewRow());
-    CHECK_OK(upper_bound->SetInt32("key", bounds.second));
+    CHECK_OK(upper_bound->SetInt32(schema.Column(0).name(), bounds.second));
 
     table_alterer->DropRangePartition(lower_bound.release(), upper_bound.release());
   }

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/src/kudu/integration-tests/alter_table-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/alter_table-test.cc b/src/kudu/integration-tests/alter_table-test.cc
index 144c48a..8347e44 100644
--- a/src/kudu/integration-tests/alter_table-test.cc
+++ b/src/kudu/integration-tests/alter_table-test.cc
@@ -348,6 +348,43 @@ TEST_F(AlterTableTest, TestAddNullableColumnWithoutDefault) {
   EXPECT_EQ("(int32 c0=16777216, int32 c1=1, int32 new=NULL)", rows[1]);
 }
 
+// Rename a primary key column
+TEST_F(AlterTableTest, TestRenamePrimaryKeyColumn) {
+  InsertRows(0, 1);
+  ASSERT_OK(tablet_peer_->tablet()->Flush());
+
+  {
+    gscoped_ptr<KuduTableAlterer> table_alterer(client_->NewTableAlterer(kTableName));
+    table_alterer->AlterColumn("c0")->RenameTo("primaryKeyRenamed");
+    table_alterer->AlterColumn("c1")->RenameTo("secondColumn");
+    ASSERT_OK(table_alterer->Alter());
+  }
+
+  InsertRows(1, 1);
+
+  vector<string> rows;
+  ScanToStrings(&rows);
+  ASSERT_EQ(2, rows.size());
+  EXPECT_EQ("(int32 primaryKeyRenamed=0, int32 secondColumn=0)", rows[0]);
+  EXPECT_EQ("(int32 primaryKeyRenamed=16777216, int32 secondColumn=1)", rows[1]);
+
+  {
+    gscoped_ptr<KuduTableAlterer> table_alterer(client_->NewTableAlterer(kTableName));
+    table_alterer->AlterColumn("primaryKeyRenamed")->RenameTo("pk");
+    table_alterer->AlterColumn("secondColumn")->RenameTo("sc");
+    ASSERT_OK(table_alterer->Alter());
+  }
+
+  InsertRows(2, 1);
+
+  rows.clear();
+  ScanToStrings(&rows);
+  ASSERT_EQ(3, rows.size());
+  EXPECT_EQ("(int32 pk=0, int32 sc=0)", rows[0]);
+  EXPECT_EQ("(int32 pk=16777216, int32 sc=1)", rows[1]);
+  EXPECT_EQ("(int32 pk=33554432, int32 sc=2)", rows[2]);
+}
+
 // Verify that, if a tablet server is down when an alter command is issued,
 // it will eventually receive the command when it restarts.
 TEST_F(AlterTableTest, TestAlterOnTSRestart) {

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/src/kudu/master/catalog_manager.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/catalog_manager.cc b/src/kudu/master/catalog_manager.cc
index daf974d..a9d136a 100644
--- a/src/kudu/master/catalog_manager.cc
+++ b/src/kudu/master/catalog_manager.cc
@@ -1592,11 +1592,6 @@ Status CatalogManager::ApplyAlterSchemaSteps(const SysTablesEntryPB& current_pb,
           return Status::InvalidArgument("RENAME_COLUMN missing column info");
         }
 
-        // TODO: In theory we can rename a key
-        if (cur_schema.is_key_column(step.rename_column().old_name())) {
-          return Status::InvalidArgument("cannot rename a key column");
-        }
-
         RETURN_NOT_OK(builder.RenameColumn(
                         step.rename_column().old_name(),
                         step.rename_column().new_name()));

http://git-wip-us.apache.org/repos/asf/kudu/blob/4be39fee/src/kudu/tablet/tablet.cc
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/tablet.cc b/src/kudu/tablet/tablet.cc
index bc0a330..dcbb4ec 100644
--- a/src/kudu/tablet/tablet.cc
+++ b/src/kudu/tablet/tablet.cc
@@ -917,10 +917,6 @@ Status Tablet::FlushInternal(const RowSetsInCompaction& input,
 
 Status Tablet::CreatePreparedAlterSchema(AlterSchemaTransactionState *tx_state,
                                          const Schema* schema) {
-  if (!key_schema_.KeyEquals(*schema)) {
-    return Status::InvalidArgument("Schema keys cannot be altered",
-                                   schema->CreateKeyProjection().ToString());
-  }
 
   if (!schema->has_column_ids()) {
     // this probably means that the request is not from the Master
@@ -937,8 +933,8 @@ Status Tablet::CreatePreparedAlterSchema(AlterSchemaTransactionState *tx_state,
 }
 
 Status Tablet::AlterSchema(AlterSchemaTransactionState *tx_state) {
-  DCHECK(key_schema_.KeyEquals(*DCHECK_NOTNULL(tx_state->schema()))) <<
-    "Schema keys cannot be altered";
+  DCHECK(key_schema_.KeyTypeEquals(*DCHECK_NOTNULL(tx_state->schema()))) <<
+    "Schema keys cannot be altered(except name)";
 
   // Prevent any concurrent flushes. Otherwise, we run into issues where
   // we have an MRS in the rowset tree, and we can't alter its schema


[5/5] kudu git commit: [scanners] info message on scanner expiration is back

Posted by to...@apache.org.
[scanners] info message on scanner expiration is back

Despite of the TODO, logging about expired scanner seems to be useful
for troubleshooting.

This is follow-up for 5df142fa35bf04231ee92b622482a1be0b18b294.

Change-Id: If94558cfab9ae31c68f58c886becc968138c18e2
Reviewed-on: http://gerrit.cloudera.org:8080/6435
Tested-by: Kudu Jenkins
Reviewed-by: Todd Lipcon <to...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/kudu/commit/08b8ec9e
Tree: http://git-wip-us.apache.org/repos/asf/kudu/tree/08b8ec9e
Diff: http://git-wip-us.apache.org/repos/asf/kudu/diff/08b8ec9e

Branch: refs/heads/master
Commit: 08b8ec9e1a7949bf569353eafdf029b6d0276473
Parents: f8e88fa
Author: Alexey Serbin <as...@cloudera.com>
Authored: Sun Mar 19 17:19:25 2017 -0700
Committer: Todd Lipcon <to...@apache.org>
Committed: Thu Mar 30 01:21:59 2017 +0000

----------------------------------------------------------------------
 src/kudu/tserver/scanners.cc | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kudu/blob/08b8ec9e/src/kudu/tserver/scanners.cc
----------------------------------------------------------------------
diff --git a/src/kudu/tserver/scanners.cc b/src/kudu/tserver/scanners.cc
index 7d9a821..43d8144 100644
--- a/src/kudu/tserver/scanners.cc
+++ b/src/kudu/tserver/scanners.cc
@@ -170,12 +170,13 @@ void ScannerManager::RemoveExpiredScanners() {
       }
 
       // The scanner has expired because of inactivity.
-      VLOG(1) << Substitute("Expiring scanner id: $0, of tablet $1, "
-                            "after $2 ms of inactivity, which is > TTL ($3 ms).",
-                            it->first,
-                            scanner->tablet_id(),
-                            idle_time.ToMilliseconds(),
-                            scanner_ttl.ToMilliseconds());
+      LOG(INFO) << Substitute(
+          "Expiring scanner id: $0, of tablet $1, "
+          "after $2 ms of inactivity, which is > TTL ($3 ms).",
+          it->first,
+          scanner->tablet_id(),
+          idle_time.ToMilliseconds(),
+          scanner_ttl.ToMilliseconds());
       it = stripe->scanners_by_id_.erase(it);
       if (metrics_) {
         metrics_->scanners_expired->Increment();