You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by vi...@apache.org on 2018/10/07 23:01:53 UTC

hive git commit: HIVE-20671 : Hive Streaming has a broken dependency on metastore-server (Alexander Kolbasov, reviewed by Vihang Karajgaonkar)

Repository: hive
Updated Branches:
  refs/heads/master 44ef91a67 -> f3d144854


HIVE-20671 : Hive Streaming has a broken dependency on metastore-server (Alexander Kolbasov, reviewed by Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f3d14485
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f3d14485
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f3d14485

Branch: refs/heads/master
Commit: f3d1448548ad3f15db6199df1b4331d0eeda7bb5
Parents: 44ef91a
Author: Alexander Kolbasov <ak...@cloudera.com>
Authored: Sun Oct 7 16:01:16 2018 -0700
Committer: Vihang Karajgaonkar <vi...@apache.org>
Committed: Sun Oct 7 16:01:28 2018 -0700

----------------------------------------------------------------------
 .../hive/metastore/LockComponentBuilder.java    | 126 +++++++++++++
 .../hive/metastore/LockRequestBuilder.java      | 185 +++++++++++++++++++
 .../hive/metastore/LockComponentBuilder.java    | 126 -------------
 .../hive/metastore/LockRequestBuilder.java      | 185 -------------------
 4 files changed, 311 insertions(+), 311 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f3d14485/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
new file mode 100644
index 0000000..c739d4d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.api.DataOperationType;
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockLevel;
+import org.apache.hadoop.hive.metastore.api.LockType;
+
+/**
+ * A builder for {@link LockComponent}s
+ */
+public class LockComponentBuilder {
+  private LockComponent component;
+  private boolean tableNameSet;
+  private boolean partNameSet;
+
+  public LockComponentBuilder() {
+    component = new LockComponent();
+    tableNameSet = partNameSet = false;
+  }
+
+  /**
+   * Set the lock to be exclusive.
+   * @return reference to this builder
+   */
+  public LockComponentBuilder setExclusive() {
+    component.setType(LockType.EXCLUSIVE);
+    return this;
+  }
+
+  /**
+   * Set the lock to be semi-shared.
+   * @return reference to this builder
+   */
+  public LockComponentBuilder setSemiShared() {
+    component.setType(LockType.SHARED_WRITE);
+    return this;
+  }
+
+  /**
+   * Set the lock to be shared.
+   * @return reference to this builder
+   */
+  public LockComponentBuilder setShared() {
+    component.setType(LockType.SHARED_READ);
+    return this;
+  }
+
+  /**
+   * Set the database name.
+   * @param dbName database name
+   * @return reference to this builder
+   */
+  public LockComponentBuilder setDbName(String dbName) {
+    component.setDbname(dbName);
+    return this;
+  }
+  
+  public LockComponentBuilder setOperationType(DataOperationType dop) {
+    component.setOperationType(dop);
+    return this;
+  }
+
+  public LockComponentBuilder setIsTransactional(boolean t) {
+    component.setIsTransactional(t);
+    return this;
+  }
+  /**
+   * Set the table name.
+   * @param tableName table name
+   * @return reference to this builder
+   */
+  public LockComponentBuilder setTableName(String tableName) {
+    component.setTablename(tableName);
+    tableNameSet = true;
+    return this;
+  }
+
+  /**
+   * Set the partition name.
+   * @param partitionName partition name
+   * @return reference to this builder
+   */
+  public LockComponentBuilder setPartitionName(String partitionName) {
+    component.setPartitionname(partitionName);
+    partNameSet = true;
+    return this;
+  }
+  public LockComponentBuilder setIsDynamicPartitionWrite(boolean t) {
+    component.setIsDynamicPartitionWrite(t);
+    return this;
+  }
+
+ /**
+   * Get the constructed lock component.
+   * @return lock component.
+   */
+  public LockComponent build() {
+    LockLevel level = LockLevel.DB;
+    if (tableNameSet) level = LockLevel.TABLE;
+    if (partNameSet) level = LockLevel.PARTITION;
+    component.setLevel(level);
+    return component;
+  }
+
+  public LockComponent setLock(LockType type) {
+    component.setType(type);
+    return component;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/f3d14485/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
new file mode 100644
index 0000000..22902a9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockType;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * Builder class to make constructing {@link LockRequest} easier.
+ */
+public class LockRequestBuilder {
+
+  private LockRequest req;
+  private LockTrie trie;
+  private boolean userSet;
+
+  /**
+   * @deprecated 
+   */
+  public LockRequestBuilder() {
+    this(null);
+  }
+  public LockRequestBuilder(String agentInfo) {
+    req = new LockRequest();
+    trie = new LockTrie();
+    userSet = false;
+    if(agentInfo != null) {
+      req.setAgentInfo(agentInfo);
+    }
+  }
+
+  /**
+   * Get the constructed LockRequest.
+   * @return lock request
+   */
+  public LockRequest build() {
+    if (!userSet) {
+      throw new RuntimeException("Cannot build a lock without giving a user");
+    }
+    trie.addLocksToRequest(req);
+    try {
+      req.setHostname(InetAddress.getLocalHost().getHostName());
+    } catch (UnknownHostException e) {
+      throw new RuntimeException("Unable to determine our local host!");
+    }
+    return req;
+  }
+
+  /**
+   * Set the transaction id.
+   * @param txnid transaction id
+   * @return reference to this builder
+   */
+  public LockRequestBuilder setTransactionId(long txnid) {
+    req.setTxnid(txnid);
+    return this;
+  }
+
+  public LockRequestBuilder setUser(String user) {
+    if (user == null) user = "unknown";
+    req.setUser(user);
+    userSet = true;
+    return this;
+  }
+
+  /**
+   * Add a lock component to the lock request
+   * @param component to add
+   * @return reference to this builder
+   */
+  public LockRequestBuilder addLockComponent(LockComponent component) {
+    trie.add(component);
+    return this;
+  }
+
+  /**
+   * Add a collection with lock components to the lock request
+   * @param components to add
+   * @return reference to this builder
+   */
+  public LockRequestBuilder addLockComponents(Collection<LockComponent> components) {
+    trie.addAll(components);
+    return this;
+  }
+
+  // For reasons that are completely incomprehensible to me the semantic
+  // analyzers often ask for multiple locks on the same entity (for example
+  // a shared_read and an exlcusive lock).  The db locking system gets confused
+  // by this and dead locks on it.  To resolve that, we'll make sure in the
+  // request that multiple locks are coalesced and promoted to the higher
+  // level of locking.  To do this we put all locks components in trie based
+  // on dbname, tablename, partition name and handle the promotion as new
+  // requests come in.  This structure depends on the fact that null is a
+  // valid key in a LinkedHashMap.  So a database lock will map to (dbname, null,
+  // null).
+  private static class LockTrie {
+    Map<String, TableTrie> trie;
+
+    LockTrie() {
+      trie = new LinkedHashMap<>();
+    }
+
+    public void add(LockComponent comp) {
+      TableTrie tabs = trie.get(comp.getDbname());
+      if (tabs == null) {
+        tabs = new TableTrie();
+        trie.put(comp.getDbname(), tabs);
+      }
+      setTable(comp, tabs);
+    }
+
+    public void addAll(Collection<LockComponent> components) {
+      for(LockComponent component: components) {
+        add(component);
+      }
+    }
+
+    public void addLocksToRequest(LockRequest request) {
+      for (TableTrie tab : trie.values()) {
+        for (PartTrie part : tab.values()) {
+          for (LockComponent lock :  part.values()) {
+            request.addToComponent(lock);
+          }
+        }
+      }
+    }
+
+    private void setTable(LockComponent comp, TableTrie tabs) {
+      PartTrie parts = tabs.get(comp.getTablename());
+      if (parts == null) {
+        parts = new PartTrie();
+        tabs.put(comp.getTablename(), parts);
+      }
+      setPart(comp, parts);
+    }
+
+    private void setPart(LockComponent comp, PartTrie parts) {
+      LockComponent existing = parts.get(comp.getPartitionname());
+      if (existing == null) {
+        // No existing lock for this partition.
+        parts.put(comp.getPartitionname(), comp);
+      }  else if (existing.getType() != LockType.EXCLUSIVE  &&
+          (comp.getType() ==  LockType.EXCLUSIVE ||
+            comp.getType() ==  LockType.SHARED_WRITE)) {
+        // We only need to promote if comp.type is > existing.type.  For
+        // efficiency we check if existing is exclusive (in which case we
+        // need never promote) or if comp is exclusive or shared_write (in
+        // which case we can promote even though they may both be shared
+        // write).  If comp is shared_read there's never a need to promote.
+        parts.put(comp.getPartitionname(), comp);
+      }
+    }
+
+    private static class TableTrie extends LinkedHashMap<String, PartTrie> {
+    }
+
+    private static class PartTrie extends LinkedHashMap<String, LockComponent> {
+    }
+
+
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/f3d14485/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
deleted file mode 100644
index c739d4d..0000000
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.metastore.api.DataOperationType;
-import org.apache.hadoop.hive.metastore.api.LockComponent;
-import org.apache.hadoop.hive.metastore.api.LockLevel;
-import org.apache.hadoop.hive.metastore.api.LockType;
-
-/**
- * A builder for {@link LockComponent}s
- */
-public class LockComponentBuilder {
-  private LockComponent component;
-  private boolean tableNameSet;
-  private boolean partNameSet;
-
-  public LockComponentBuilder() {
-    component = new LockComponent();
-    tableNameSet = partNameSet = false;
-  }
-
-  /**
-   * Set the lock to be exclusive.
-   * @return reference to this builder
-   */
-  public LockComponentBuilder setExclusive() {
-    component.setType(LockType.EXCLUSIVE);
-    return this;
-  }
-
-  /**
-   * Set the lock to be semi-shared.
-   * @return reference to this builder
-   */
-  public LockComponentBuilder setSemiShared() {
-    component.setType(LockType.SHARED_WRITE);
-    return this;
-  }
-
-  /**
-   * Set the lock to be shared.
-   * @return reference to this builder
-   */
-  public LockComponentBuilder setShared() {
-    component.setType(LockType.SHARED_READ);
-    return this;
-  }
-
-  /**
-   * Set the database name.
-   * @param dbName database name
-   * @return reference to this builder
-   */
-  public LockComponentBuilder setDbName(String dbName) {
-    component.setDbname(dbName);
-    return this;
-  }
-  
-  public LockComponentBuilder setOperationType(DataOperationType dop) {
-    component.setOperationType(dop);
-    return this;
-  }
-
-  public LockComponentBuilder setIsTransactional(boolean t) {
-    component.setIsTransactional(t);
-    return this;
-  }
-  /**
-   * Set the table name.
-   * @param tableName table name
-   * @return reference to this builder
-   */
-  public LockComponentBuilder setTableName(String tableName) {
-    component.setTablename(tableName);
-    tableNameSet = true;
-    return this;
-  }
-
-  /**
-   * Set the partition name.
-   * @param partitionName partition name
-   * @return reference to this builder
-   */
-  public LockComponentBuilder setPartitionName(String partitionName) {
-    component.setPartitionname(partitionName);
-    partNameSet = true;
-    return this;
-  }
-  public LockComponentBuilder setIsDynamicPartitionWrite(boolean t) {
-    component.setIsDynamicPartitionWrite(t);
-    return this;
-  }
-
- /**
-   * Get the constructed lock component.
-   * @return lock component.
-   */
-  public LockComponent build() {
-    LockLevel level = LockLevel.DB;
-    if (tableNameSet) level = LockLevel.TABLE;
-    if (partNameSet) level = LockLevel.PARTITION;
-    component.setLevel(level);
-    return component;
-  }
-
-  public LockComponent setLock(LockType type) {
-    component.setType(type);
-    return component;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/f3d14485/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
deleted file mode 100644
index 22902a9..0000000
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.metastore.api.LockComponent;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockType;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.Collection;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-/**
- * Builder class to make constructing {@link LockRequest} easier.
- */
-public class LockRequestBuilder {
-
-  private LockRequest req;
-  private LockTrie trie;
-  private boolean userSet;
-
-  /**
-   * @deprecated 
-   */
-  public LockRequestBuilder() {
-    this(null);
-  }
-  public LockRequestBuilder(String agentInfo) {
-    req = new LockRequest();
-    trie = new LockTrie();
-    userSet = false;
-    if(agentInfo != null) {
-      req.setAgentInfo(agentInfo);
-    }
-  }
-
-  /**
-   * Get the constructed LockRequest.
-   * @return lock request
-   */
-  public LockRequest build() {
-    if (!userSet) {
-      throw new RuntimeException("Cannot build a lock without giving a user");
-    }
-    trie.addLocksToRequest(req);
-    try {
-      req.setHostname(InetAddress.getLocalHost().getHostName());
-    } catch (UnknownHostException e) {
-      throw new RuntimeException("Unable to determine our local host!");
-    }
-    return req;
-  }
-
-  /**
-   * Set the transaction id.
-   * @param txnid transaction id
-   * @return reference to this builder
-   */
-  public LockRequestBuilder setTransactionId(long txnid) {
-    req.setTxnid(txnid);
-    return this;
-  }
-
-  public LockRequestBuilder setUser(String user) {
-    if (user == null) user = "unknown";
-    req.setUser(user);
-    userSet = true;
-    return this;
-  }
-
-  /**
-   * Add a lock component to the lock request
-   * @param component to add
-   * @return reference to this builder
-   */
-  public LockRequestBuilder addLockComponent(LockComponent component) {
-    trie.add(component);
-    return this;
-  }
-
-  /**
-   * Add a collection with lock components to the lock request
-   * @param components to add
-   * @return reference to this builder
-   */
-  public LockRequestBuilder addLockComponents(Collection<LockComponent> components) {
-    trie.addAll(components);
-    return this;
-  }
-
-  // For reasons that are completely incomprehensible to me the semantic
-  // analyzers often ask for multiple locks on the same entity (for example
-  // a shared_read and an exlcusive lock).  The db locking system gets confused
-  // by this and dead locks on it.  To resolve that, we'll make sure in the
-  // request that multiple locks are coalesced and promoted to the higher
-  // level of locking.  To do this we put all locks components in trie based
-  // on dbname, tablename, partition name and handle the promotion as new
-  // requests come in.  This structure depends on the fact that null is a
-  // valid key in a LinkedHashMap.  So a database lock will map to (dbname, null,
-  // null).
-  private static class LockTrie {
-    Map<String, TableTrie> trie;
-
-    LockTrie() {
-      trie = new LinkedHashMap<>();
-    }
-
-    public void add(LockComponent comp) {
-      TableTrie tabs = trie.get(comp.getDbname());
-      if (tabs == null) {
-        tabs = new TableTrie();
-        trie.put(comp.getDbname(), tabs);
-      }
-      setTable(comp, tabs);
-    }
-
-    public void addAll(Collection<LockComponent> components) {
-      for(LockComponent component: components) {
-        add(component);
-      }
-    }
-
-    public void addLocksToRequest(LockRequest request) {
-      for (TableTrie tab : trie.values()) {
-        for (PartTrie part : tab.values()) {
-          for (LockComponent lock :  part.values()) {
-            request.addToComponent(lock);
-          }
-        }
-      }
-    }
-
-    private void setTable(LockComponent comp, TableTrie tabs) {
-      PartTrie parts = tabs.get(comp.getTablename());
-      if (parts == null) {
-        parts = new PartTrie();
-        tabs.put(comp.getTablename(), parts);
-      }
-      setPart(comp, parts);
-    }
-
-    private void setPart(LockComponent comp, PartTrie parts) {
-      LockComponent existing = parts.get(comp.getPartitionname());
-      if (existing == null) {
-        // No existing lock for this partition.
-        parts.put(comp.getPartitionname(), comp);
-      }  else if (existing.getType() != LockType.EXCLUSIVE  &&
-          (comp.getType() ==  LockType.EXCLUSIVE ||
-            comp.getType() ==  LockType.SHARED_WRITE)) {
-        // We only need to promote if comp.type is > existing.type.  For
-        // efficiency we check if existing is exclusive (in which case we
-        // need never promote) or if comp is exclusive or shared_write (in
-        // which case we can promote even though they may both be shared
-        // write).  If comp is shared_read there's never a need to promote.
-        parts.put(comp.getPartitionname(), comp);
-      }
-    }
-
-    private static class TableTrie extends LinkedHashMap<String, PartTrie> {
-    }
-
-    private static class PartTrie extends LinkedHashMap<String, LockComponent> {
-    }
-
-
-
-  }
-}