You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ma...@apache.org on 2019/03/06 11:45:05 UTC
[hive] branch master updated: HIVE-21314 : Hive Replication not
retaining the owner in the replicated table. (Mahesh Kumar Behera,
reviewed by Sankar Hariappan)
This is an automated email from the ASF dual-hosted git repository.
mahesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 0413fec HIVE-21314 : Hive Replication not retaining the owner in the replicated table. (Mahesh Kumar Behera, reviewed by Sankar Hariappan)
0413fec is described below
commit 0413fec095c9282fa97a097f1736880a0caebebe
Author: Mahesh Kumar Behera <mb...@hortonworks.com>
AuthorDate: Wed Mar 6 16:51:38 2019 +0530
HIVE-21314 : Hive Replication not retaining the owner in the replicated table. (Mahesh Kumar Behera, reviewed by Sankar Hariappan)
---
.../parse/TestReplicationWithTableMigrationEx.java | 67 ++++++++++++++++++++++
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 3 +
.../bootstrap/events/filesystem/FSTableEvent.java | 3 +
.../hive/ql/parse/ImportSemanticAnalyzer.java | 3 +-
.../hadoop/hive/ql/plan/CreateTableDesc.java | 13 +++++
.../apache/hadoop/hive/ql/plan/CreateViewDesc.java | 13 +++++
.../hadoop/hive/ql/plan/ImportTableDesc.java | 13 +++++
7 files changed, 114 insertions(+), 1 deletion(-)
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigrationEx.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigrationEx.java
index 4637da1..5f59a2a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigrationEx.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigrationEx.java
@@ -348,4 +348,71 @@ public class TestReplicationWithTableMigrationEx {
replica.load(replicatedDbName, tuple.dumpLocation, withClause);
assertFalse(ReplUtils.isFirstIncPending(replica.getDatabase(replicatedDbName).getParameters()));
}
+
+ private void verifyUserName(String userName) throws Throwable {
+ assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "tbl_own").getOwner()));
+ assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "tbl_own").getOwner()));
+ assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "tacid").getOwner()));
+ assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "tacid").getOwner()));
+ assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "tacidpart").getOwner()));
+ assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "tacidpart").getOwner()));
+ assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "tbl_part").getOwner()));
+ assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "tbl_part").getOwner()));
+ assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "view_own").getOwner()));
+ assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "view_own").getOwner()));
+ }
+
+ private void alterUserName(String userName) throws Throwable {
+ primary.run("use " + primaryDbName)
+ .run("alter table tbl_own set owner USER " + userName)
+ .run("alter table tacid set owner USER " + userName)
+ .run("alter table tacidpart set owner USER " + userName)
+ .run("alter table tbl_part set owner USER " + userName)
+ .run("alter table view_own set owner USER " + userName);
+ }
+
+ @Test
+ public void testOnwerPropagation() throws Throwable {
+ primary.run("use " + primaryDbName)
+ .run("create table tbl_own (fld int)")
+ .run("create table tacid (id int) clustered by(id) into 3 buckets stored as orc ")
+ .run("create table tacidpart (place string) partitioned by (country string) clustered by(place) "
+ + "into 3 buckets stored as orc ")
+ .run("create table tbl_part (fld int) partitioned by (country string)")
+ .run("insert into tbl_own values (1)")
+ .run("create view view_own as select * from tbl_own");
+
+ // test bootstrap
+ alterUserName("hive");
+ WarehouseInstance.Tuple tuple = primary.dump(primaryDbName, null);
+ replica.loadWithoutExplain(replicatedDbName, tuple.dumpLocation);
+ verifyUserName("hive");
+
+ // test incremental
+ alterUserName("hive1");
+ tuple = primary.dump(primaryDbName, tuple.lastReplicationId);
+ replica.loadWithoutExplain(replicatedDbName, tuple.dumpLocation);
+ verifyUserName("hive1");
+ }
+
+ @Test
+ public void testOnwerPropagationInc() throws Throwable {
+ WarehouseInstance.Tuple tuple = primary.dump(primaryDbName, null);
+ replica.loadWithoutExplain(replicatedDbName, tuple.dumpLocation);
+
+ primary.run("use " + primaryDbName)
+ .run("create table tbl_own (fld int)")
+ .run("create table tacid (id int) clustered by(id) into 3 buckets stored as orc ")
+ .run("create table tacidpart (place string) partitioned by (country string) clustered by(place) "
+ + "into 3 buckets stored as orc ")
+ .run("create table tbl_part (fld int) partitioned by (country string)")
+ .run("insert into tbl_own values (1)")
+ .run("create view view_own as select * from tbl_own");
+
+ // test incremental when table is getting created in the same load
+ alterUserName("hive");
+ tuple = primary.dump(primaryDbName, tuple.lastReplicationId);
+ replica.loadWithoutExplain(replicatedDbName, tuple.dumpLocation);
+ verifyUserName("hive");
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 76339f1..a39126f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4699,6 +4699,9 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
oldview.setOutputFormatClass(crtView.getOutputFormat());
}
oldview.checkValidity(null);
+ if (crtView.getOwnerName() != null) {
+ oldview.setOwner(crtView.getOwnerName());
+ }
db.alterTable(crtView.getViewName(), oldview, false, null, true);
addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
index 99a8d5d..4b382f2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
@@ -123,6 +123,9 @@ public class FSTableEvent implements TableEvent {
tableDesc.setExternal(true);
}
tableDesc.setReplicationSpec(replicationSpec());
+ if (table.getOwner() != null) {
+ tableDesc.setOwnerName(table.getOwner());
+ }
return tableDesc;
} catch (Exception e) {
throw new SemanticException(e);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index ed06352..b6b4f58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -276,8 +276,8 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
// Create table associated with the import
// Executed if relevant, and used to contain all the other details about the table if not.
ImportTableDesc tblDesc;
+ org.apache.hadoop.hive.metastore.api.Table tblObj = rv.getTable();
try {
- org.apache.hadoop.hive.metastore.api.Table tblObj = rv.getTable();
// The table can be non acid in case of replication from a cluster with STRICT_MANAGED set to false.
if (!TxnUtils.isTransactionalTable(tblObj) && replicationSpec.isInReplicationScope() &&
x.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STRICT_MANAGED_TABLES) &&
@@ -317,6 +317,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
}
inReplicationScope = true;
tblDesc.setReplWriteId(writeId);
+ tblDesc.setOwnerName(tblObj.getOwner());
}
if (isExternalSet) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
index c71ff6d..4514af1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
@@ -115,6 +115,7 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
// This is not needed beyond compilation, so it is transient.
private transient FileSinkDesc writer;
private Long replWriteId; // to be used by repl task to get the txn and valid write id list
+ private String ownerName = null;
public CreateTableDesc() {
}
@@ -909,6 +910,10 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
StatsSetupConst.FALSE);
}
}
+
+ if (ownerName != null) {
+ tbl.setOwner(ownerName);
+ }
return tbl;
}
@@ -939,4 +944,12 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
public void setReplWriteId(Long replWriteId) {
this.replWriteId = replWriteId;
}
+
+ public String getOwnerName() {
+ return ownerName;
+ }
+
+ public void setOwnerName(String ownerName) {
+ this.ownerName = ownerName;
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
index 7130aba..ce85d40 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
@@ -70,6 +70,7 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
private Map<String, String> serdeProps; // only used for materialized views
private Set<String> tablesUsed; // only used for materialized views
private ReplicationSpec replicationSpec = null;
+ private String ownerName = null;
/**
* For serialization only.
@@ -412,6 +413,10 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
}
}
+ if (ownerName != null) {
+ tbl.setOwner(ownerName);
+ }
+
// Sets the column state for the create view statement (false since it is a creation).
// Similar to logic in CreateTableDesc.
StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), null,
@@ -419,4 +424,12 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
return tbl;
}
+
+ public void setOwnerName(String ownerName) {
+ this.ownerName = ownerName;
+ }
+
+ public String getOwnerName() {
+ return this.ownerName;
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
index 5c30fca..017e1c7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
@@ -366,4 +366,17 @@ public class ImportTableDesc {
this.createTblDesc.setReplWriteId(replWriteId);
}
}
+
+ public void setOwnerName(String ownerName) {
+ switch (getDescType()) {
+ case TABLE:
+ createTblDesc.setOwnerName(ownerName);
+ break;
+ case VIEW:
+ createViewDesc.setOwnerName(ownerName);
+ break;
+ default:
+ throw new RuntimeException("Invalid table type : " + getDescType());
+ }
+ }
}