You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by bl...@apache.org on 2019/07/14 19:09:44 UTC

[incubator-iceberg] branch master updated: Remove BaseMetastoreTables and HiveTables (#284)

This is an automated email from the ASF dual-hosted git repository.

blue pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-iceberg.git


The following commit(s) were added to refs/heads/master by this push:
     new b77b253  Remove BaseMetastoreTables and HiveTables (#284)
b77b253 is described below

commit b77b253ca8281bb271211525f29088f480426569
Author: Ratandeep Ratti <rr...@linkedin.com>
AuthorDate: Sun Jul 14 12:09:39 2019 -0700

    Remove BaseMetastoreTables and HiveTables (#284)
---
 .../org/apache/iceberg/BaseMetastoreTables.java    | 115 ---------------------
 .../java/org/apache/iceberg/hive/HiveTables.java   |  74 -------------
 2 files changed, 189 deletions(-)

diff --git a/core/src/main/java/org/apache/iceberg/BaseMetastoreTables.java b/core/src/main/java/org/apache/iceberg/BaseMetastoreTables.java
deleted file mode 100644
index 9026111..0000000
--- a/core/src/main/java/org/apache/iceberg/BaseMetastoreTables.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iceberg;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import java.util.Map;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.iceberg.exceptions.AlreadyExistsException;
-import org.apache.iceberg.exceptions.NoSuchTableException;
-
-public abstract class BaseMetastoreTables implements Tables {
-  private final Configuration conf;
-
-  public BaseMetastoreTables(Configuration conf) {
-    this.conf = conf;
-  }
-
-  protected abstract BaseMetastoreTableOperations newTableOps(Configuration newConf,
-                                                              String database, String table);
-
-  public Table load(String database, String table) {
-    TableOperations ops = newTableOps(conf, database, table);
-    if (ops.current() == null) {
-      throw new NoSuchTableException("Table does not exist: " + database + "." + table);
-    }
-
-    return new BaseTable(ops, database + "." + table);
-  }
-
-  public Table create(Schema schema, String database, String table) {
-    return create(schema, PartitionSpec.unpartitioned(), database, table);
-  }
-
-  public Table create(Schema schema, PartitionSpec spec, String database, String table) {
-    return create(schema, spec, ImmutableMap.of(), database, table);
-  }
-
-  public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties,
-                      String database, String table) {
-    TableOperations ops = newTableOps(conf, database, table);
-    if (ops.current() != null) {
-      throw new AlreadyExistsException("Table already exists: " + database + "." + table);
-    }
-
-    String location = defaultWarehouseLocation(conf, database, table);
-    TableMetadata metadata = TableMetadata.newTableMetadata(ops, schema, spec, location, properties);
-    ops.commit(null, metadata);
-
-    return new BaseTable(ops, database + "." + table);
-  }
-
-  public Transaction beginCreate(Schema schema, PartitionSpec spec, String database, String table) {
-    return beginCreate(schema, spec, ImmutableMap.of(), database, table);
-  }
-
-  public Transaction beginCreate(Schema schema, PartitionSpec spec, Map<String, String> properties,
-                                 String database, String table) {
-    TableOperations ops = newTableOps(conf, database, table);
-    if (ops.current() != null) {
-      throw new AlreadyExistsException("Table already exists: " + database + "." + table);
-    }
-
-    String location = defaultWarehouseLocation(conf, database, table);
-    TableMetadata metadata = TableMetadata.newTableMetadata(ops, schema, spec, location, properties);
-
-    return Transactions.createTableTransaction(ops, metadata);
-  }
-
-  public Transaction beginReplace(Schema schema, PartitionSpec spec,
-                                  String database, String table) {
-    return beginReplace(schema, spec, ImmutableMap.of(), database, table);
-  }
-
-  public Transaction beginReplace(Schema schema, PartitionSpec spec, Map<String, String> properties,
-                                  String database, String table) {
-    TableOperations ops = newTableOps(conf, database, table);
-    TableMetadata current = ops.current();
-
-    TableMetadata metadata;
-    if (current != null) {
-      metadata = current.buildReplacement(schema, spec, properties);
-      return Transactions.replaceTableTransaction(ops, metadata);
-    } else {
-      String location = defaultWarehouseLocation(conf, database, table);
-      metadata = TableMetadata.newTableMetadata(ops, schema, spec, location, properties);
-      return Transactions.createTableTransaction(ops, metadata);
-    }
-  }
-
-  protected String defaultWarehouseLocation(Configuration hadoopConf,
-                                            String database, String table) {
-    String warehouseLocation = hadoopConf.get("hive.metastore.warehouse.dir");
-    Preconditions.checkNotNull(warehouseLocation,
-        "Warehouse location is not set: hive.metastore.warehouse.dir=null");
-    return String.format("%s/%s.db/%s", warehouseLocation, database, table);
-  }
-}
diff --git a/hive/src/main/java/org/apache/iceberg/hive/HiveTables.java b/hive/src/main/java/org/apache/iceberg/hive/HiveTables.java
deleted file mode 100644
index 4177a93..0000000
--- a/hive/src/main/java/org/apache/iceberg/hive/HiveTables.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iceberg.hive;
-
-import com.google.common.base.Splitter;
-import java.io.Closeable;
-import java.util.List;
-import java.util.Map;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.iceberg.BaseMetastoreTableOperations;
-import org.apache.iceberg.BaseMetastoreTables;
-import org.apache.iceberg.PartitionSpec;
-import org.apache.iceberg.Schema;
-import org.apache.iceberg.Table;
-
-public class HiveTables extends BaseMetastoreTables implements Closeable {
-  private static final Splitter DOT = Splitter.on('.').limit(2);
-  private final HiveClientPool clients;
-
-  public HiveTables(Configuration conf) {
-    super(conf);
-    this.clients = new HiveClientPool(2, conf);
-  }
-
-  @Override
-  public Table create(Schema schema, String tableIdentifier) {
-    return create(schema, PartitionSpec.unpartitioned(), tableIdentifier);
-  }
-
-  @Override
-  public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties, String tableIdentifier) {
-    List<String> parts = DOT.splitToList(tableIdentifier);
-    if (parts.size() == 2) {
-      return create(schema, spec, properties, parts.get(0), parts.get(1));
-    }
-    throw new UnsupportedOperationException("Could not parse table identifier: " + tableIdentifier);
-  }
-
-  @Override
-  public Table load(String tableIdentifier) {
-    List<String> parts = DOT.splitToList(tableIdentifier);
-    if (parts.size() == 2) {
-      return load(parts.get(0), parts.get(1));
-    }
-    throw new UnsupportedOperationException("Could not parse table identifier: " + tableIdentifier);
-  }
-
-  @Override
-  public BaseMetastoreTableOperations newTableOps(Configuration conf, String database, String table) {
-    return new HiveTableOperations(conf, clients, database, table);
-  }
-
-  @Override
-  public void close() {
-    clients.close();
-  }
-}