You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2010/09/16 19:09:43 UTC

svn commit: r997851 [9/12] - in /hadoop/hive/branches/branch-0.6: ./ eclipse-templates/ metastore/if/ metastore/src/gen-cpp/ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ metastore/src/gen-php/ metastore/src/gen-php/hive_metastore/ m...

Modified: hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Thu Sep 16 17:09:41 2010
@@ -60,6 +60,7 @@ public class MetaStoreUtils {
   protected static final Log LOG = LogFactory.getLog("hive.log");
 
   public static final String DEFAULT_DATABASE_NAME = "default";
+  public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database";
 
   /**
    * printStackTrace
@@ -324,134 +325,6 @@ public class MetaStoreUtils {
     return "map<" + k + "," + v + ">";
   }
 
-  public static Table getTable(Configuration conf, Properties schema)
-      throws MetaException {
-    Table t = new Table();
-    t.setSd(new StorageDescriptor());
-    t
-        .setTableName(schema
-            .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME));
-    t
-        .getSd()
-        .setLocation(
-            schema
-                .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_LOCATION));
-    t.getSd().setInputFormat(
-      schema.getProperty(
-        org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT,
-        org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName()));
-    t.getSd().setOutputFormat(
-      schema.getProperty(
-        org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT,
-        org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName()));
-    t.setPartitionKeys(new ArrayList<FieldSchema>());
-    t.setDbName(MetaStoreUtils.DEFAULT_DATABASE_NAME);
-    String part_cols_str = schema
-        .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS);
-    t.setPartitionKeys(new ArrayList<FieldSchema>());
-    if (part_cols_str != null && (part_cols_str.trim().length() != 0)) {
-      String[] part_keys = part_cols_str.trim().split("/");
-      for (String key : part_keys) {
-        FieldSchema part = new FieldSchema();
-        part.setName(key);
-        part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default
-                                                                               // partition
-                                                                               // key
-        t.getPartitionKeys().add(part);
-      }
-    }
-    t.getSd()
-        .setNumBuckets(
-            Integer.parseInt(schema.getProperty(
-                org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT,
-                "-1")));
-    String bucketFieldName = schema
-        .getProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME);
-    t.getSd().setBucketCols(new ArrayList<String>(1));
-    if ((bucketFieldName != null) && (bucketFieldName.trim().length() != 0)) {
-      t.getSd().setBucketCols(new ArrayList<String>(1));
-      t.getSd().getBucketCols().add(bucketFieldName);
-    }
-
-    t.getSd().setSerdeInfo(new SerDeInfo());
-    t.getSd().getSerdeInfo().setParameters(new HashMap<String, String>());
-    t.getSd().getSerdeInfo().setName(t.getTableName());
-    t
-        .getSd()
-        .getSerdeInfo()
-        .setSerializationLib(
-            schema
-                .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB));
-    setSerdeParam(t.getSd().getSerdeInfo(), schema,
-        org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS);
-    setSerdeParam(t.getSd().getSerdeInfo(), schema,
-        org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT);
-    if (org.apache.commons.lang.StringUtils
-        .isNotBlank(schema
-            .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS))) {
-      setSerdeParam(t.getSd().getSerdeInfo(), schema,
-          org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_SERDE);
-    }
-    // needed for MetadataTypedColumnSetSerDe and LazySimpleSerDe
-    setSerdeParam(t.getSd().getSerdeInfo(), schema,
-        org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS);
-    // needed for LazySimpleSerDe
-    setSerdeParam(t.getSd().getSerdeInfo(), schema,
-        org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMN_TYPES);
-    // needed for DynamicSerDe
-    setSerdeParam(t.getSd().getSerdeInfo(), schema,
-        org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL);
-
-    String colstr = schema
-        .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS);
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    if (colstr != null) {
-      String[] cols = colstr.split(",");
-      for (String colName : cols) {
-        FieldSchema col = new FieldSchema(colName,
-            org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME,
-            "'default'");
-        fields.add(col);
-      }
-    }
-
-    if (fields.size() == 0) {
-      // get the fields from serde
-      try {
-        fields = getFieldsFromDeserializer(t.getTableName(), getDeserializer(
-            conf, schema));
-      } catch (SerDeException e) {
-        LOG.error(StringUtils.stringifyException(e));
-        throw new MetaException("Invalid serde or schema. " + e.getMessage());
-      }
-    }
-    t.getSd().setCols(fields);
-
-    t.setOwner(schema.getProperty("creator"));
-
-    // remove all the used up parameters to find out the remaining parameters
-    schema.remove(Constants.META_TABLE_NAME);
-    schema.remove(Constants.META_TABLE_LOCATION);
-    schema.remove(Constants.FILE_INPUT_FORMAT);
-    schema.remove(Constants.FILE_OUTPUT_FORMAT);
-    schema.remove(Constants.META_TABLE_PARTITION_COLUMNS);
-    schema.remove(Constants.BUCKET_COUNT);
-    schema.remove(Constants.BUCKET_FIELD_NAME);
-    schema.remove(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS);
-    schema.remove(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT);
-    schema.remove(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB);
-    schema.remove(Constants.META_TABLE_SERDE);
-    schema.remove(Constants.META_TABLE_COLUMNS);
-    schema.remove(Constants.META_TABLE_COLUMN_TYPES);
-
-    // add the remaining unknown parameters to the table's parameters
-    t.setParameters(new HashMap<String, String>());
-    for (Entry<Object, Object> e : schema.entrySet()) {
-      t.getParameters().put(e.getKey().toString(), e.getValue().toString());
-    }
-
-    return t;
-  }
 
   public static void setSerdeParam(SerDeInfo sdi, Properties schema,
       String param) {

Modified: hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Thu Sep 16 17:09:41 2010
@@ -284,126 +284,133 @@ public class ObjectStore implements RawS
     }
   }
 
-  public boolean createDatabase(Database db) {
-    boolean success = false;
+
+  public void createDatabase(Database db) throws InvalidObjectException, MetaException {
     boolean commited = false;
-    MDatabase mdb = new MDatabase(db.getName().toLowerCase(), db
-        .getDescription());
+    MDatabase mdb = new MDatabase();
+    mdb.setName(db.getName().toLowerCase());
+    mdb.setLocationUri(db.getLocationUri());
+    mdb.setDescription(db.getDescription());
     try {
       openTransaction();
       pm.makePersistent(mdb);
-      success = true;
       commited = commitTransaction();
     } finally {
       if (!commited) {
         rollbackTransaction();
       }
     }
-    return success;
-  }
-
-  public boolean createDatabase(String name) {
-    // TODO: get default path
-    Database db = new Database(name, "default_path");
-    return this.createDatabase(db);
   }
 
   @SuppressWarnings("nls")
   private MDatabase getMDatabase(String name) throws NoSuchObjectException {
-    MDatabase db = null;
+    MDatabase mdb = null;
     boolean commited = false;
     try {
       openTransaction();
-      name = name.toLowerCase();
+      name = name.toLowerCase().trim();
       Query query = pm.newQuery(MDatabase.class, "name == dbname");
       query.declareParameters("java.lang.String dbname");
       query.setUnique(true);
-      db = (MDatabase) query.execute(name.trim());
-      pm.retrieve(db);
+      mdb = (MDatabase) query.execute(name);
+      pm.retrieve(mdb);
       commited = commitTransaction();
     } finally {
       if (!commited) {
         rollbackTransaction();
       }
     }
-    if (db == null) {
+    if (mdb == null) {
       throw new NoSuchObjectException("There is no database named " + name);
     }
-    return db;
+    return mdb;
   }
 
   public Database getDatabase(String name) throws NoSuchObjectException {
-    MDatabase db = null;
+    MDatabase mdb = null;
     boolean commited = false;
     try {
       openTransaction();
-      db = getMDatabase(name);
+      mdb = getMDatabase(name);
       commited = commitTransaction();
     } finally {
       if (!commited) {
         rollbackTransaction();
       }
     }
-    return new Database(db.getName(), db.getDescription());
+    Database db = new Database();
+    db.setName(mdb.getName());
+    db.setDescription(mdb.getDescription());
+    db.setLocationUri(mdb.getLocationUri());
+    return db;
   }
 
-  public boolean dropDatabase(String dbname) {
-
+  public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException {
     boolean success = false;
-    boolean commited = false;
+    LOG.info("Dropping database " + dbname + " along with all tables");
+    dbname = dbname.toLowerCase();
     try {
       openTransaction();
 
       // first drop tables
-      dbname = dbname.toLowerCase();
-      LOG.info("Dropping database along with all tables " + dbname);
-      Query q1 = pm.newQuery(MTable.class, "database.name == dbName");
-      q1.declareParameters("java.lang.String dbName");
-      List<MTable> mtbls = (List<MTable>) q1.execute(dbname.trim());
-      pm.deletePersistentAll(mtbls);
+      for (String tableName : getAllTables(dbname)) {
+        dropTable(dbname, tableName);
+      }
 
       // then drop the database
-      Query query = pm.newQuery(MDatabase.class, "name == dbName");
-      query.declareParameters("java.lang.String dbName");
-      query.setUnique(true);
-      MDatabase db = (MDatabase) query.execute(dbname.trim());
+      MDatabase db = getMDatabase(dbname);
       pm.retrieve(db);
-
-      // StringIdentity id = new StringIdentity(MDatabase.class, dbname);
-      // MDatabase db = (MDatabase) pm.getObjectById(id);
       if (db != null) {
         pm.deletePersistent(db);
       }
-      commited = commitTransaction();
-      success = true;
-    } catch (JDOObjectNotFoundException e) {
-      LOG.debug("database not found " + dbname, e);
-      commited = commitTransaction();
+      success = commitTransaction();
     } finally {
-      if (!commited) {
+      if (!success) {
         rollbackTransaction();
       }
     }
     return success;
   }
 
-  public List<String> getDatabases() {
-    List dbs = null;
+  public List<String> getDatabases(String pattern) throws MetaException {
     boolean commited = false;
+    List<String> databases = null;
     try {
       openTransaction();
-      Query query = pm.newQuery(MDatabase.class);
-      query.setResult("name");
-      query.setResultClass(String.class);
-      query.setOrdering("name asc");
-      dbs = (List) query.execute();
+      // Take the pattern and split it on the | to get all the composing
+      // patterns
+      String[] subpatterns = pattern.trim().split("\\|");
+      String query = "select name from org.apache.hadoop.hive.metastore.model.MDatabase where (";
+      boolean first = true;
+      for (String subpattern : subpatterns) {
+        subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*");
+        if (!first) {
+          query = query + " || ";
+        }
+        query = query + " name.matches(\"" + subpattern + "\")";
+        first = false;
+      }
+      query = query + ")";
+
+      Query q = pm.newQuery(query);
+      q.setResult("name");
+      q.setOrdering("name ascending");
+      Collection names = (Collection) q.execute();
+      databases = new ArrayList<String>();
+      for (Iterator i = names.iterator(); i.hasNext();) {
+        databases.add((String) i.next());
+      }
       commited = commitTransaction();
     } finally {
       if (!commited) {
         rollbackTransaction();
       }
     }
-    return dbs;
+    return databases;
+  }
+
+  public List<String> getAllDatabases() throws MetaException {
+    return getDatabases(".*");
   }
 
   private MType getMType(Type type) {
@@ -507,7 +514,7 @@ public class ObjectStore implements RawS
     }
   }
 
-  public boolean dropTable(String dbName, String tableName) {
+  public boolean dropTable(String dbName, String tableName) throws MetaException {
 
     boolean success = false;
     try {
@@ -550,11 +557,13 @@ public class ObjectStore implements RawS
     List<String> tbls = null;
     try {
       openTransaction();
-      dbName = dbName.toLowerCase();
+      dbName = dbName.toLowerCase().trim();
       // Take the pattern and split it on the | to get all the composing
       // patterns
       String[] subpatterns = pattern.trim().split("\\|");
-      String query = "select tableName from org.apache.hadoop.hive.metastore.model.MTable where database.name == dbName && (";
+      String query =
+        "select tableName from org.apache.hadoop.hive.metastore.model.MTable "
+        + "where database.name == dbName && (";
       boolean first = true;
       for (String subpattern : subpatterns) {
         subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*");
@@ -569,7 +578,8 @@ public class ObjectStore implements RawS
       Query q = pm.newQuery(query);
       q.declareParameters("java.lang.String dbName");
       q.setResult("tableName");
-      Collection names = (Collection) q.execute(dbName.trim());
+      q.setOrdering("tableName ascending");
+      Collection names = (Collection) q.execute(dbName);
       tbls = new ArrayList<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         tbls.add((String) i.next());
@@ -583,18 +593,21 @@ public class ObjectStore implements RawS
     return tbls;
   }
 
+  public List<String> getAllTables(String dbName) throws MetaException {
+    return getTables(dbName, ".*");
+  }
+
   private MTable getMTable(String db, String table) {
     MTable mtbl = null;
     boolean commited = false;
     try {
       openTransaction();
-      db = db.toLowerCase();
-      table = table.toLowerCase();
-      Query query = pm.newQuery(MTable.class,
-          "tableName == table && database.name == db");
+      db = db.toLowerCase().trim();
+      table = table.toLowerCase().trim();
+      Query query = pm.newQuery(MTable.class, "tableName == table && database.name == db");
       query.declareParameters("java.lang.String table, java.lang.String db");
       query.setUnique(true);
-      mtbl = (MTable) query.execute(table.trim(), db.trim());
+      mtbl = (MTable) query.execute(table, db);
       pm.retrieve(mtbl);
       commited = commitTransaction();
     } finally {
@@ -639,7 +652,7 @@ public class ObjectStore implements RawS
     } catch (NoSuchObjectException e) {
       LOG.error(StringUtils.stringifyException(e));
       throw new InvalidObjectException("Database " + tbl.getDbName()
-          + " doesn't exsit.");
+          + " doesn't exist.");
     }
 
     // If the table has property EXTERNAL set, update table type
@@ -788,8 +801,8 @@ public class ObjectStore implements RawS
     boolean commited = false;
     try {
       openTransaction();
-      dbName = dbName.toLowerCase();
-      tableName = tableName.toLowerCase();
+      dbName = dbName.toLowerCase().trim();
+      tableName = tableName.toLowerCase().trim();
       MTable mtbl = getMTable(dbName, tableName);
       if (mtbl == null) {
         commited = commitTransaction();
@@ -799,13 +812,11 @@ public class ObjectStore implements RawS
       // redundant
       String name = Warehouse.makePartName(convertToFieldSchemas(mtbl
           .getPartitionKeys()), part_vals);
-      Query query = pm
-          .newQuery(MPartition.class,
-              "table.tableName == t1 && table.database.name == t2 && partitionName == t3");
-      query
-          .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
+      Query query = pm.newQuery(MPartition.class,
+          "table.tableName == t1 && table.database.name == t2 && partitionName == t3");
+      query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
       query.setUnique(true);
-      mpart = (MPartition) query.execute(tableName.trim(), dbName.trim(), name);
+      mpart = (MPartition) query.execute(tableName, dbName, name);
       pm.retrieve(mpart);
       commited = commitTransaction();
     } finally {
@@ -886,14 +897,15 @@ public class ObjectStore implements RawS
     try {
       openTransaction();
       LOG.debug("Executing getPartitionNames");
-      dbName = dbName.toLowerCase();
-      tableName = tableName.toLowerCase();
-      Query q = pm
-          .newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition where table.database.name == t1 && table.tableName == t2 order by partitionName asc");
+      dbName = dbName.toLowerCase().trim();
+      tableName = tableName.toLowerCase().trim();
+      Query q = pm.newQuery(
+          "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+          + "where table.database.name == t1 && table.tableName == t2 "
+          + "order by partitionName asc");
       q.declareParameters("java.lang.String t1, java.lang.String t2");
       q.setResult("partitionName");
-      Collection names = (Collection) q
-          .execute(dbName.trim(), tableName.trim());
+      Collection names = (Collection) q.execute(dbName, tableName);
       pns = new ArrayList<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         pns.add((String) i.next());
@@ -915,13 +927,12 @@ public class ObjectStore implements RawS
     try {
       openTransaction();
       LOG.debug("Executing listMPartitions");
-      dbName = dbName.toLowerCase();
-      tableName = tableName.toLowerCase();
+      dbName = dbName.toLowerCase().trim();
+      tableName = tableName.toLowerCase().trim();
       Query query = pm.newQuery(MPartition.class,
           "table.tableName == t1 && table.database.name == t2");
       query.declareParameters("java.lang.String t1, java.lang.String t2");
-      mparts = (List<MPartition>) query
-          .execute(tableName.trim(), dbName.trim());
+      mparts = (List<MPartition>) query.execute(tableName, dbName);
       LOG.debug("Done executing query for listMPartitions");
       pm.retrieveAll(mparts);
       success = commitTransaction();

Modified: hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Thu Sep 16 17:09:41 2010
@@ -55,16 +55,18 @@ public interface RawStore extends Config
    */
   public abstract void rollbackTransaction();
 
-  public abstract boolean createDatabase(Database db) throws MetaException;
-
-  public abstract boolean createDatabase(String name) throws MetaException;
+  public abstract void createDatabase(Database db)
+      throws InvalidObjectException, MetaException;
 
   public abstract Database getDatabase(String name)
       throws NoSuchObjectException;
 
-  public abstract boolean dropDatabase(String dbname);
+  public abstract boolean dropDatabase(String dbname)
+      throws NoSuchObjectException, MetaException;
+
+  public abstract List<String> getDatabases(String pattern) throws MetaException;
 
-  public abstract List<String> getDatabases() throws MetaException;
+  public abstract List<String> getAllDatabases() throws MetaException;
 
   public abstract boolean createType(Type type);
 
@@ -99,6 +101,8 @@ public interface RawStore extends Config
   public List<String> getTables(String dbName, String pattern)
       throws MetaException;
 
+  public List<String> getAllTables(String dbName) throws MetaException;
+    
   public abstract List<String> listPartitionNames(String db_name,
       String tbl_name, short max_parts) throws MetaException;
 

Modified: hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hadoop/hive/branches/branch-0.6/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Thu Sep 16 17:09:41 2010
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hive.metastore;
 
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -47,8 +49,10 @@ import org.apache.hadoop.hive.metastore.
 public class Warehouse {
   private Path whRoot;
   private final Configuration conf;
-  String whRootString;
+  private final String whRootString;
 
+  private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
+    
   public static final Log LOG = LogFactory.getLog("hive.metastore.warehouse");
 
   public Warehouse(Configuration conf) throws MetaException {
@@ -117,10 +121,10 @@ public class Warehouse {
   }
 
   public Path getDefaultDatabasePath(String dbName) throws MetaException {
-    if (dbName.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) {
+    if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
       return getWhRoot();
     }
-    return new Path(getWhRoot(), dbName.toLowerCase() + ".db");
+    return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
   }
 
   public Path getDefaultTablePath(String dbName, String tableName)

Modified: hadoop/hive/branches/branch-0.6/metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java (original)
+++ hadoop/hive/branches/branch-0.6/metastore/src/model/org/apache/hadoop/hive/metastore/model/MDatabase.java Thu Sep 16 17:09:41 2010
@@ -27,6 +27,7 @@ package org.apache.hadoop.hive.metastore
  */
 public class MDatabase {
   private String name;
+  private String locationUri;
   private String description;
 
   /**
@@ -37,11 +38,13 @@ public class MDatabase {
   /**
    * To create a database object
    * @param name of the database
-   * @param location future use
+   * @param locationUri Location of the database in the warehouse
+   * @param description Comment describing the database
    */
-  public MDatabase(String name, String location) {
+  public MDatabase(String name, String locationUri, String description) {
     this.name = name;
-    this.description = location;
+    this.locationUri = locationUri;
+    this.description = description;
   }
 
   /**
@@ -59,6 +62,20 @@ public class MDatabase {
   }
 
   /**
+   * @return the location_uri
+   */
+  public String getLocationUri() {
+    return locationUri;
+  }
+
+  /**
+   * @param locationUri the locationUri to set
+   */
+  public void setLocationUri(String locationUri) {
+    this.locationUri = locationUri;
+  }
+
+  /**
    * @return the description
    */
   public String getDescription() {

Modified: hadoop/hive/branches/branch-0.6/metastore/src/model/package.jdo
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/metastore/src/model/package.jdo?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/metastore/src/model/package.jdo (original)
+++ hadoop/hive/branches/branch-0.6/metastore/src/model/package.jdo Thu Sep 16 17:09:41 2010
@@ -8,14 +8,17 @@
         <column name="DB_ID"/>
       </datastore-identity>
       <field name="name">  
-        <column name="NAME" length="128" jdbc-type="VARCHAR"/>  
+        <column name="NAME" length="128" jdbc-type="VARCHAR"/>
         <index name="UniqueDatabase" unique="true"/>
       </field>
-      <field name="description">  
-        <column name="DESC" length="767" jdbc-type="VARCHAR"/>  
-      </field>  
+      <field name="description">
+        <column name="DESC" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+      </field>
+      <field name="locationUri">
+        <column name="DB_LOCATION_URI" length="4000" jdbc-type="VARCHAR" allows-null="false"/>
+      </field>
     </class>
-
+    
     <class name="MFieldSchema" embedded-only="true" table="TYPE_FIELDS" detachable="true">
       <field name="name">
         <column name="FNAME" length="128" jdbc-type="VARCHAR"/>

Added: hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java?rev=997851&view=auto
==============================================================================
--- hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java (added)
+++ hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java Thu Sep 16 17:09:41 2010
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.util.StringUtils;
+
+public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore {
+
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+
+    try {
+      client = new HiveMetaStoreClient(hiveConf, null);
+    } catch (Throwable e) {
+      System.err.println("Unable to open the metastore");
+      System.err.println(StringUtils.stringifyException(e));
+      throw new Exception(e);
+    }
+  }
+
+  @Override
+  protected void tearDown() throws Exception {
+    try {
+      super.tearDown();
+      client.close();
+    } catch (Throwable e) {
+      System.err.println("Unable to close metastore");
+      System.err.println(StringUtils.stringifyException(e));
+      throw new Exception(e);
+    }
+  }
+}

Modified: hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Thu Sep 16 17:09:41 2010
@@ -48,40 +48,25 @@ import org.apache.hadoop.hive.serde.Cons
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
 
-public class TestHiveMetaStore extends TestCase {
-  private HiveMetaStoreClient client;
-  private HiveConf hiveConf;
+public abstract class TestHiveMetaStore extends TestCase {
+  protected static HiveMetaStoreClient client;
+  protected static HiveConf hiveConf;
+  protected static Warehouse warehouse;
+  protected static boolean isThriftClient = false;
+
+  private static final String TEST_DB1_NAME = "testdb1";
+  private static final String TEST_DB2_NAME = "testdb2";
 
   @Override
   protected void setUp() throws Exception {
-    super.setUp();
     hiveConf = new HiveConf(this.getClass());
+    warehouse = new Warehouse(hiveConf);
 
     // set some values to use for getting conf. vars
     hiveConf.set("hive.key1", "value1");
     hiveConf.set("hive.key2", "http://www.example.com");
     hiveConf.set("hive.key3", "");
     hiveConf.set("hive.key4", "0");
-
-    try {
-      client = new HiveMetaStoreClient(hiveConf, null);
-    } catch (Throwable e) {
-      System.err.println("Unable to open the metastore");
-      System.err.println(StringUtils.stringifyException(e));
-      throw new Exception(e);
-    }
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    try {
-      super.tearDown();
-      client.close();
-    } catch (Throwable e) {
-      System.err.println("Unable to close metastore");
-      System.err.println(StringUtils.stringifyException(e));
-      throw new Exception(e);
-    }
   }
 
   public void testNameMethods() {
@@ -118,11 +103,11 @@ public class TestHiveMetaStore extends T
    * @throws Exception
    */
   public void testPartition() throws Exception {
-    partitionTester(client, hiveConf, false);
+    partitionTester(client, hiveConf);
   }
 
-  public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf,
-      boolean isThriftClient) throws Exception {
+  public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf)
+    throws Exception {
     try {
       String dbName = "compdb";
       String tblName = "comptbl";
@@ -139,9 +124,10 @@ public class TestHiveMetaStore extends T
       vals3.add("15");
 
       client.dropTable(dbName, tblName);
-      client.dropDatabase(dbName);
-      boolean ret = client.createDatabase(dbName, "strange_loc");
-      assertTrue("Unable to create the databse " + dbName, ret);
+      silentDropDatabase(dbName);
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
 
       client.dropType(typeName);
       Type typ1 = new Type();
@@ -151,8 +137,7 @@ public class TestHiveMetaStore extends T
           new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
       typ1.getFields().add(
           new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
-      ret = client.createType(typ1);
-      assertTrue("Unable to create type " + typeName, ret);
+      client.createType(typ1);
 
       Table tbl = new Table();
       tbl.setDbName(dbName);
@@ -181,7 +166,7 @@ public class TestHiveMetaStore extends T
 
       client.createTable(tbl);
 
-      if(isThriftClient) {
+      if (isThriftClient) {
         // the createTable() above does not update the location in the 'tbl'
         // object when the client is a thrift client and the code below relies
         // on the location being present in the 'tbl' object - so get the table
@@ -303,12 +288,12 @@ public class TestHiveMetaStore extends T
       }
       assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
 
-      FileSystem fs = FileSystem.get(hiveConf);
       Path partPath = new Path(part2.getSd().getLocation());
+      FileSystem fs = FileSystem.get(partPath.toUri(), hiveConf);
+
 
       assertTrue(fs.exists(partPath));
-      ret = client.dropPartition(dbName, tblName, part.getValues(), true);
-      assertTrue(ret);
+      client.dropPartition(dbName, tblName, part.getValues(), true);
       assertFalse(fs.exists(partPath));
 
       // Test append_partition_by_name
@@ -326,12 +311,11 @@ public class TestHiveMetaStore extends T
       // add the partition again so that drop table with a partition can be
       // tested
       retp = client.add_partition(part);
-      assertNotNull("Unable to create partition " + part, ret);
+      assertNotNull("Unable to create partition " + part, retp);
 
       client.dropTable(dbName, tblName);
 
-      ret = client.dropType(typeName);
-      assertTrue("Unable to drop type " + typeName, ret);
+      client.dropType(typeName);
 
       // recreate table as external, drop partition and it should
       // still exist
@@ -343,8 +327,11 @@ public class TestHiveMetaStore extends T
       client.dropPartition(dbName, tblName, part.getValues(), true);
       assertTrue(fs.exists(partPath));
 
-      ret = client.dropDatabase(dbName);
-      assertTrue("Unable to create the databse " + dbName, ret);
+      for (String tableName : client.getTables(dbName, "*")) {
+        client.dropTable(dbName, tableName);
+      }
+
+      client.dropDatabase(dbName);
 
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
@@ -363,9 +350,11 @@ public class TestHiveMetaStore extends T
       vals.add("14");
 
       client.dropTable(dbName, tblName);
-      client.dropDatabase(dbName);
-      boolean ret = client.createDatabase(dbName, "strange_loc");
-      assertTrue("Unable to create the databse " + dbName, ret);
+      silentDropDatabase(dbName);
+      Database db = new Database();
+      db.setName(dbName);
+      db.setDescription("Alter Partition Test database");
+      client.createDatabase(db);
 
       ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
       cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
@@ -398,6 +387,14 @@ public class TestHiveMetaStore extends T
 
       client.createTable(tbl);
 
+      if (isThriftClient) {
+        // the createTable() above does not update the location in the 'tbl'
+        // object when the client is a thrift client and the code below relies
+        // on the location being present in the 'tbl' object - so get the table
+        // from the metastore
+        tbl = client.getTable(dbName, tblName);
+      }
+
       Partition part = new Partition();
       part.setDbName(dbName);
       part.setTableName(tblName);
@@ -426,8 +423,7 @@ public class TestHiveMetaStore extends T
 
       client.dropTable(dbName, tblName);
 
-      ret = client.dropDatabase(dbName);
-      assertTrue("Unable to create the databse " + dbName, ret);
+      client.dropDatabase(dbName);
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testPartition() failed.");
@@ -438,40 +434,40 @@ public class TestHiveMetaStore extends T
   public void testDatabase() throws Throwable {
     try {
       // clear up any existing databases
-      client.dropDatabase("test1");
-      client.dropDatabase("test2");
+      silentDropDatabase(TEST_DB1_NAME);
+      silentDropDatabase(TEST_DB2_NAME);
 
-      boolean ret = client.createDatabase("test1", "strange_loc");
-      assertTrue("Unable to create the databse", ret);
+      Database db = new Database();
+      db.setName(TEST_DB1_NAME);
+      client.createDatabase(db);
 
-      Database db = client.getDatabase("test1");
+      db = client.getDatabase(TEST_DB1_NAME);
 
       assertEquals("name of returned db is different from that of inserted db",
-          "test1", db.getName());
-      assertEquals(
-          "location of the returned db is different from that of inserted db",
-          "strange_loc", db.getDescription());
-
-      boolean ret2 = client.createDatabase("test2", "another_strange_loc");
-      assertTrue("Unable to create the databse", ret2);
+          TEST_DB1_NAME, db.getName());
+      assertEquals("location of the returned db is different from that of inserted db",
+          warehouse.getDefaultDatabasePath(TEST_DB1_NAME).toString(), db.getLocationUri());
+
+      Database db2 = new Database();
+      db2.setName(TEST_DB2_NAME);
+      client.createDatabase(db2);
 
-      Database db2 = client.getDatabase("test2");
+      db2 = client.getDatabase(TEST_DB2_NAME);
 
       assertEquals("name of returned db is different from that of inserted db",
-          "test2", db2.getName());
-      assertEquals(
-          "location of the returned db is different from that of inserted db",
-          "another_strange_loc", db2.getDescription());
-
-      List<String> dbs = client.getDatabases();
-
-      assertTrue("first database is not test1", dbs.contains("test1"));
-      assertTrue("second database is not test2", dbs.contains("test2"));
-
-      ret = client.dropDatabase("test1");
-      assertTrue("couldn't delete first database", ret);
-      ret = client.dropDatabase("test2");
-      assertTrue("couldn't delete second database", ret);
+          TEST_DB2_NAME, db2.getName());
+      assertEquals("location of the returned db is different from that of inserted db",
+          warehouse.getDefaultDatabasePath(TEST_DB2_NAME).toString(), db2.getLocationUri());
+
+      List<String> dbs = client.getDatabases(".*");
+
+      assertTrue("first database is not " + TEST_DB1_NAME, dbs.contains(TEST_DB1_NAME));
+      assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME));
+
+      client.dropDatabase(TEST_DB1_NAME);
+      client.dropDatabase(TEST_DB2_NAME);
+      silentDropDatabase(TEST_DB1_NAME);
+      silentDropDatabase(TEST_DB2_NAME);
     } catch (Throwable e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testDatabase() failed.");
@@ -495,9 +491,13 @@ public class TestHiveMetaStore extends T
       ret = client.dropType(Constants.INT_TYPE_NAME);
       assertTrue("unable to drop type integer", ret);
 
-      Type typ1_3 = null;
-      typ1_3 = client.getType(Constants.INT_TYPE_NAME);
-      assertNull("unable to drop type integer", typ1_3);
+      boolean exceptionThrown = false;
+      try {
+        client.getType(Constants.INT_TYPE_NAME);
+      } catch (NoSuchObjectException e) {
+        exceptionThrown = true;
+      }
+      assertTrue("Expected NoSuchObjectException", exceptionThrown);
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testSimpleTypeApi() failed.");
@@ -554,9 +554,13 @@ public class TestHiveMetaStore extends T
       ret = client.dropType("Person");
       assertTrue("unable to drop type Person", ret);
 
-      Type typ1_3 = null;
-      typ1_3 = client.getType("Person");
-      assertNull("unable to drop type Person", typ1_3);
+      boolean exceptionThrown = false;
+      try {
+        client.getType("Person");
+      } catch (NoSuchObjectException e) {
+        exceptionThrown = true;
+      }
+      assertTrue("Expected NoSuchObjectException", exceptionThrown);
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testComplexTypeApi() failed.");
@@ -572,9 +576,11 @@ public class TestHiveMetaStore extends T
       String typeName = "Person";
 
       client.dropTable(dbName, tblName);
-      client.dropDatabase(dbName);
-      boolean ret = client.createDatabase(dbName, "strange_loc");
-      assertTrue("Unable to create the databse " + dbName, ret);
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
 
       client.dropType(typeName);
       Type typ1 = new Type();
@@ -584,8 +590,7 @@ public class TestHiveMetaStore extends T
           new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
       typ1.getFields().add(
           new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
-      ret = client.createType(typ1);
-      assertTrue("Unable to create type " + typeName, ret);
+      client.createType(typ1);
 
       Table tbl = new Table();
       tbl.setDbName(dbName);
@@ -610,6 +615,14 @@ public class TestHiveMetaStore extends T
 
       client.createTable(tbl);
 
+      if (isThriftClient) {
+        // the createTable() above does not update the location in the 'tbl'
+        // object when the client is a thrift client and the code below relies
+        // on the location being present in the 'tbl' object - so get the table
+        // from the metastore
+        tbl = client.getTable(dbName, tblName);
+      }
+
       Table tbl2 = client.getTable(dbName, tblName);
       assertNotNull(tbl2);
       assertEquals(tbl2.getDbName(), dbName);
@@ -647,6 +660,9 @@ public class TestHiveMetaStore extends T
       }
 
       client.createTable(tbl2);
+      if (isThriftClient) {
+        tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
+      }
 
       Table tbl3 = client.getTable(dbName, tblName2);
       assertNotNull(tbl3);
@@ -683,17 +699,15 @@ public class TestHiveMetaStore extends T
           (tbl2.getPartitionKeys() == null)
               || (tbl2.getPartitionKeys().size() == 0));
 
-      FileSystem fs = FileSystem.get(hiveConf);
+      FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf);
       client.dropTable(dbName, tblName);
       assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
 
       client.dropTable(dbName, tblName2);
       assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
 
-      ret = client.dropType(typeName);
-      assertTrue("Unable to drop type " + typeName, ret);
-      ret = client.dropDatabase(dbName);
-      assertTrue("Unable to drop databse " + dbName, ret);
+      client.dropType(typeName);
+      client.dropDatabase(dbName);
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testSimpleTable() failed.");
@@ -702,15 +716,17 @@ public class TestHiveMetaStore extends T
   }
 
   public void testAlterTable() throws Exception {
-    try {
-      String dbName = "alterdb";
-      String invTblName = "alter-tbl";
-      String tblName = "altertbl";
+    String dbName = "alterdb";
+    String invTblName = "alter-tbl";
+    String tblName = "altertbl";
 
+    try {
       client.dropTable(dbName, tblName);
-      client.dropDatabase(dbName);
-      boolean ret = client.createDatabase(dbName, "strange_loc");
-      assertTrue("Unable to create the databse " + dbName, ret);
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
 
       ArrayList<FieldSchema> invCols = new ArrayList<FieldSchema>(2);
       invCols.add(new FieldSchema("n-ame", Constants.STRING_TYPE_NAME, ""));
@@ -752,6 +768,10 @@ public class TestHiveMetaStore extends T
       tbl.getSd().setCols(cols);
       client.createTable(tbl);
 
+      if (isThriftClient) {
+        tbl = client.getTable(tbl.getDbName(), tbl.getTableName());
+      }
+
       // now try to invalid alter table
       Table tbl2 = client.getTable(dbName, tblName);
       failed = false;
@@ -775,17 +795,22 @@ public class TestHiveMetaStore extends T
       assertEquals("Alter table didn't succeed. Num buckets is different ",
           tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets());
       // check that data has moved
-      FileSystem fs = FileSystem.get(hiveConf);
+      FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf);
       assertFalse("old table location still exists", fs.exists(new Path(tbl
           .getSd().getLocation())));
       assertTrue("data did not move to new location", fs.exists(new Path(tbl3
           .getSd().getLocation())));
-      assertEquals("alter table didn't move data correct location", tbl3
-          .getSd().getLocation(), tbl2.getSd().getLocation());
+
+      if (!isThriftClient) {
+        assertEquals("alter table didn't move data correct location", tbl3
+            .getSd().getLocation(), tbl2.getSd().getLocation());
+      }
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testSimpleTable() failed.");
       throw e;
+    } finally {
+      silentDropDatabase(dbName);
     }
   }
 
@@ -797,9 +822,10 @@ public class TestHiveMetaStore extends T
 
     try {
       client.dropTable(dbName, tblName);
-      client.dropDatabase(dbName);
-      boolean ret = client.createDatabase(dbName, "strange_loc");
-      assertTrue("Unable to create the databse " + dbName, ret);
+      silentDropDatabase(dbName);
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
 
       client.dropType(typeName);
       Type typ1 = new Type();
@@ -809,8 +835,7 @@ public class TestHiveMetaStore extends T
           new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
       typ1.getFields().add(
           new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
-      ret = client.createType(typ1);
-      assertTrue("Unable to create type " + typeName, ret);
+      client.createType(typ1);
 
       Table tbl = new Table();
       tbl.setDbName(dbName);
@@ -887,8 +912,7 @@ public class TestHiveMetaStore extends T
       client.dropTable(dbName, tblName);
       boolean ret = client.dropType(typeName);
       assertTrue("Unable to drop type " + typeName, ret);
-      ret = client.dropDatabase(dbName);
-      assertTrue("Unable to create the databse " + dbName, ret);
+      client.dropDatabase(dbName);
     }
   }
 
@@ -896,20 +920,21 @@ public class TestHiveMetaStore extends T
 
     String val = "value";
 
-    try {
-      assertEquals(client.getConfigValue("hive.key1", val), "value1");
-      assertEquals(client.getConfigValue("hive.key2", val),
-          "http://www.example.com");
-      assertEquals(client.getConfigValue("hive.key3", val), "");
-      assertEquals(client.getConfigValue("hive.key4", val), "0");
-      assertEquals(client.getConfigValue("hive.key5", val), val);
-      assertEquals(client.getConfigValue(null, val), val);
-    } catch (TException e) {
-      e.printStackTrace();
-      assert (false);
-    } catch (ConfigValSecurityException e) {
-      e.printStackTrace();
-      assert (false);
+    if (!isThriftClient) {
+      try {
+        assertEquals(client.getConfigValue("hive.key1", val), "value1");
+        assertEquals(client.getConfigValue("hive.key2", val), "http://www.example.com");
+        assertEquals(client.getConfigValue("hive.key3", val), "");
+        assertEquals(client.getConfigValue("hive.key4", val), "0");
+        assertEquals(client.getConfigValue("hive.key5", val), val);
+        assertEquals(client.getConfigValue(null, val), val);
+      } catch (TException e) {
+        e.printStackTrace();
+          assert (false);
+      } catch (ConfigValSecurityException e) {
+        e.printStackTrace();
+        assert (false);
+      }
     }
 
     boolean threwException = false;
@@ -932,4 +957,15 @@ public class TestHiveMetaStore extends T
     part.setCreateTime(part_get.getCreateTime());
     part.putToParameters(org.apache.hadoop.hive.metastore.api.Constants.DDL_TIME, Long.toString(part_get.getCreateTime()));
   }
+
+  private static void silentDropDatabase(String dbName) throws MetaException, TException {
+    try {
+      for (String tableName : client.getTables(dbName, "*")) {
+        client.dropTable(dbName, tableName);
+      }
+      client.dropDatabase(dbName);
+    } catch (NoSuchObjectException e) {
+    } catch (InvalidOperationException e) {
+    }
+  }
 }

Added: hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java?rev=997851&view=auto
==============================================================================
--- hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java (added)
+++ hadoop/hive/branches/branch-0.6/metastore/src/test/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java Thu Sep 16 17:09:41 2010
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+
+
+public class TestRemoteHiveMetaStore extends TestHiveMetaStore {
+  private static final String METASTORE_PORT = "29083";
+  private static boolean isServerRunning = false;
+
+  private static class RunMS implements Runnable {
+
+      @Override
+      public void run() {
+        System.out.println("Running metastore!");
+        String [] args = new String [1];
+        args[0] = METASTORE_PORT;
+        HiveMetaStore.main(args);
+      }
+
+    }
+
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+    if(isServerRunning) {
+      return;
+    }
+    Thread t = new Thread(new RunMS());
+    t.start();
+
+    // Wait a little bit for the metastore to start. Should probably have
+    // a better way of detecting if the metastore has started?
+    Thread.sleep(5000);
+
+    // hive.metastore.local should be defined in HiveConf
+    hiveConf.set("hive.metastore.local", "false");
+    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + METASTORE_PORT);
+    hiveConf.setIntVar(HiveConf.ConfVars.METATORETHRIFTRETRIES, 3);
+
+    client = new HiveMetaStoreClient(hiveConf);
+    isThriftClient = true;
+
+    // Now you have the client - run necessary tests.
+    isServerRunning = true;
+  }
+
+}

Modified: hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Thu Sep 16 17:09:41 2010
@@ -52,9 +52,12 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.QueryPlan;
@@ -71,18 +74,22 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
+import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
 import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
 import org.apache.hadoop.hive.ql.plan.DescTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 import org.apache.hadoop.hive.ql.plan.MsckDesc;
+import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
+import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.serde.Constants;
@@ -139,6 +146,21 @@ public class DDLTask extends Task<DDLWor
     try {
       db = Hive.get(conf);
 
+      CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
+      if (null != createDatabaseDesc) {
+        return createDatabase(db, createDatabaseDesc);
+      }
+
+      DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
+      if (dropDatabaseDesc != null) {
+        return dropDatabase(db, dropDatabaseDesc);
+      }
+
+      SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
+      if (switchDatabaseDesc != null) {
+        return switchDatabase(db, switchDatabaseDesc);
+      }
+      
       CreateTableDesc crtTbl = work.getCreateTblDesc();
       if (crtTbl != null) {
         return createTable(db, crtTbl);
@@ -195,6 +217,11 @@ public class DDLTask extends Task<DDLWor
         return describeFunction(descFunc);
       }
 
+      ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
+      if (showDatabases != null) {
+        return showDatabases(db, showDatabases);
+      }
+
       ShowTablesDesc showTbls = work.getShowTblsDesc();
       if (showTbls != null) {
         return showTables(db, showTbls);
@@ -843,11 +870,10 @@ public class DDLTask extends Task<DDLWor
     List<String> repairOutput = new ArrayList<String>();
     try {
       HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db);
-      checker.checkMetastore(MetaStoreUtils.DEFAULT_DATABASE_NAME, msckDesc
+      checker.checkMetastore(db.getCurrentDatabase(), msckDesc
           .getTableName(), msckDesc.getPartSpecs(), result);
       if (msckDesc.isRepairPartitions()) {
-        Table table = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,
-            msckDesc.getTableName());
+        Table table = db.getTable(msckDesc.getTableName());
         for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) {
           try {
             db.createPartition(table, Warehouse.makeSpecFromName(part
@@ -959,18 +985,17 @@ public class DDLTask extends Task<DDLWor
     Table tbl = null;
     List<String> parts = null;
 
-    tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName);
+    tbl = db.getTable(tabName);
 
     if (!tbl.isPartitioned()) {
       console.printError("Table " + tabName + " is not a partitioned table");
       return 1;
     }
     if (showParts.getPartSpec() != null) {
-      parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+      parts = db.getPartitionNames(db.getCurrentDatabase(),
           tbl.getTableName(), showParts.getPartSpec(), (short) -1);
     } else {
-      parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl
-          .getTableName(), (short) -1);
+      parts = db.getPartitionNames(db.getCurrentDatabase(), tbl.getTableName(), (short) -1);
     }
 
     // write the results in the file
@@ -1000,6 +1025,50 @@ public class DDLTask extends Task<DDLWor
   }
 
   /**
+   * Write a list of the available databases to a file.
+   *
+   * @param showDatabases
+   *          These are the databases we're interested in.
+   * @return Returns 0 when execution succeeds and above 0 if it fails.
+   * @throws HiveException
+   *           Throws this exception if an unexpected error occurs.
+   */
+  private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) throws HiveException {
+    // get the databases for the desired pattern - populate the output stream
+    List<String> databases = null;
+    if (showDatabasesDesc.getPattern() != null) {
+      LOG.info("pattern: " + showDatabasesDesc.getPattern());
+      databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern());
+    } else {
+      databases = db.getAllDatabases();
+    }
+    LOG.info("results : " + databases.size());
+
+    // write the results in the file
+    try {
+      Path resFile = new Path(showDatabasesDesc.getResFile());
+      FileSystem fs = resFile.getFileSystem(conf);
+      DataOutput outStream = fs.create(resFile);
+
+      for (String database : databases) {
+        // create a row per database name
+        outStream.writeBytes(database);
+        outStream.write(terminator);
+      }
+      ((FSDataOutputStream) outStream).close();
+    } catch (FileNotFoundException e) {
+      LOG.warn("show databases: " + stringifyException(e));
+      return 1;
+    } catch (IOException e) {
+      LOG.warn("show databases: " + stringifyException(e));
+      return 1;
+    } catch (Exception e) {
+      throw new HiveException(e.toString());
+    }
+    return 0;
+  }
+
+  /**
    * Write a list of the tables in the database to a file.
    *
    * @param db
@@ -1294,7 +1363,7 @@ public class DDLTask extends Task<DDLWor
         colPath.indexOf('.') == -1 ? colPath.length() : colPath.indexOf('.'));
 
     // describe the table - populate the output stream
-    Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName,
+    Table tbl = db.getTable(db.getCurrentDatabase(), tableName,
         false);
     Partition part = null;
     try {
@@ -1546,8 +1615,7 @@ public class DDLTask extends Task<DDLWor
    */
   private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException {
     // alter the table
-    Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, alterTbl
-        .getOldName());
+    Table tbl = db.getTable(alterTbl.getOldName());
 
     validateAlterTableType(tbl, alterTbl.getOp());
 
@@ -1767,8 +1835,7 @@ public class DDLTask extends Task<DDLWor
     // post-execution hook
     Table tbl = null;
     try {
-      tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl
-          .getTableName());
+      tbl = db.getTable(dropTbl.getTableName());
     } catch (InvalidTableException e) {
       // drop table is idempotent
     }
@@ -1787,17 +1854,14 @@ public class DDLTask extends Task<DDLWor
 
     if (dropTbl.getPartSpecs() == null) {
       // drop the table
-      db
-          .dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl
-          .getTableName());
+      db.dropTable(db.getCurrentDatabase(), dropTbl.getTableName());
       if (tbl != null) {
         work.getOutputs().add(new WriteEntity(tbl));
       }
     } else {
       // get all partitions of the table
-      List<String> partitionNames = db.getPartitionNames(
-          MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl.getTableName(),
-          (short) -1);
+      List<String> partitionNames =
+        db.getPartitionNames(db.getCurrentDatabase(), dropTbl.getTableName(), (short) -1);
       Set<Map<String, String>> partitions = new HashSet<Map<String, String>>();
       for (int i = 0; i < partitionNames.size(); i++) {
         try {
@@ -1831,7 +1895,7 @@ public class DDLTask extends Task<DDLWor
       // drop all existing partitions from the list
       for (Partition partition : partsToDelete) {
         console.printInfo("Dropping the partition " + partition.getName());
-        db.dropPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl
+        db.dropPartition(db.getCurrentDatabase(), dropTbl
             .getTableName(), partition.getValues(), true); // drop data for the
         // partition
         work.getOutputs().add(new WriteEntity(partition));
@@ -1856,6 +1920,56 @@ public class DDLTask extends Task<DDLWor
   }
 
   /**
+   * Create a Database
+   * @param db
+   * @param crtDb
+   * @return Always returns 0
+   * @throws HiveException
+   * @throws AlreadyExistsException
+   */
+  private int createDatabase(Hive db, CreateDatabaseDesc crtDb)
+      throws HiveException, AlreadyExistsException {
+    Database database = new Database();
+    database.setName(crtDb.getName());
+    database.setDescription(crtDb.getComment());
+    database.setLocationUri(crtDb.getLocationUri());
+
+    db.createDatabase(database, crtDb.getIfNotExists());
+    return 0;
+  }
+
+  /**
+   * Drop a Database
+   * @param db
+   * @param dropDb
+   * @return Always returns 0
+   * @throws HiveException
+   * @throws NoSuchObjectException
+   */
+  private int dropDatabase(Hive db, DropDatabaseDesc dropDb)
+      throws HiveException, NoSuchObjectException {
+    db.dropDatabase(dropDb.getDatabaseName(), true, dropDb.getIfExists());
+    return 0;
+  }
+
+  /**
+   * Switch to a different Database
+   * @param db
+   * @param switchDb
+   * @return Always returns 0
+   * @throws HiveException
+   */
+  private int switchDatabase(Hive db, SwitchDatabaseDesc switchDb)
+      throws HiveException {
+    String dbName = switchDb.getDatabaseName();
+    if (!db.databaseExists(dbName)) {
+      throw new HiveException("ERROR: The database " + dbName + " does not exist.");
+    }
+    db.setCurrentDatabase(dbName);
+    return 0;
+  }
+  
+  /**
    * Create a new table.
    *
    * @param db
@@ -1868,7 +1982,7 @@ public class DDLTask extends Task<DDLWor
    */
   private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
     // create the table
-    Table tbl = new Table(crtTbl.getTableName());
+    Table tbl = new Table(db.getCurrentDatabase(), crtTbl.getTableName());
     if (crtTbl.getPartCols() != null) {
       tbl.setPartCols(crtTbl.getPartCols());
     }
@@ -2027,8 +2141,7 @@ public class DDLTask extends Task<DDLWor
    */
   private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveException {
     // Get the existing table
-    Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, crtTbl
-        .getLikeTableName());
+    Table tbl = db.getTable(crtTbl.getLikeTableName());
 
     tbl.setTableName(crtTbl.getTableName());
 
@@ -2062,7 +2175,7 @@ public class DDLTask extends Task<DDLWor
    *           Throws this exception if an unexpected error occurs.
    */
   private int createView(Hive db, CreateViewDesc crtView) throws HiveException {
-    Table tbl = new Table(crtView.getViewName());
+    Table tbl = new Table(db.getCurrentDatabase(), crtView.getViewName());
     tbl.setTableType(TableType.VIRTUAL_VIEW);
     tbl.setSerializationLib(null);
     tbl.clearSerDeInfo();

Modified: hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Thu Sep 16 17:09:41 2010
@@ -32,7 +32,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
@@ -131,7 +130,7 @@ public class MoveTask extends Task<MoveW
         }
         String mesg_detail = " from " + tbd.getSourceDir();
         console.printInfo(mesg.toString(), mesg_detail);
-        Table table = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbd
+        Table table = db.getTable(db.getCurrentDatabase(), tbd
             .getTable().getTableName());
 
         if (work.getCheckFileFormat()) {

Modified: hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Thu Sep 16 17:09:41 2010
@@ -18,6 +18,16 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+import static org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE;
+import static org.apache.hadoop.hive.serde.Constants.COLLECTION_DELIM;
+import static org.apache.hadoop.hive.serde.Constants.ESCAPE_CHAR;
+import static org.apache.hadoop.hive.serde.Constants.FIELD_DELIM;
+import static org.apache.hadoop.hive.serde.Constants.LINE_DELIM;
+import static org.apache.hadoop.hive.serde.Constants.MAPKEY_DELIM;
+import static org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT;
+import static org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.LinkedHashMap;
@@ -39,7 +49,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.Constants;
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -65,6 +75,7 @@ public class Hive {
 
   private HiveConf conf = null;
   private IMetaStoreClient metaStoreClient;
+  private String currentDatabase;
 
   private static ThreadLocal<Hive> hiveDB = new ThreadLocal() {
     @Override
@@ -165,6 +176,68 @@ public class Hive {
   }
 
   /**
+   * Create a database
+   * @param db
+   * @param ifNotExist if true, will ignore AlreadyExistsException exception
+   * @throws AlreadyExistsException
+   * @throws HiveException
+   */
+  public void createDatabase(Database db, boolean ifNotExist)
+      throws AlreadyExistsException, HiveException {
+    try {
+      getMSC().createDatabase(db);
+    } catch (AlreadyExistsException e) {
+      if (!ifNotExist) {
+        throw e;
+      }
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+
+  /**
+   * Create a Database. Raise an error if a database with the same name already exists.
+   * @param db
+   * @throws AlreadyExistsException
+   * @throws HiveException
+   */
+  public void createDatabase(Database db) throws AlreadyExistsException, HiveException {
+    createDatabase(db, false);
+  }
+
+  /**
+   * Drop a database.
+   * @param name
+   * @throws NoSuchObjectException
+   * @throws HiveException
+   * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#dropDatabase(java.lang.String)
+   */
+  public void dropDatabase(String name) throws HiveException, NoSuchObjectException {
+    dropDatabase(name, true, false);
+  }
+
+
+  /**
+   * Drop a database
+   * @param name
+   * @param deleteData
+   * @param ignoreUnknownDb if true, will ignore NoSuchObjectException
+   * @return
+   * @throws HiveException
+   * @throws NoSuchObjectException
+   */
+  public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+      throws HiveException, NoSuchObjectException {
+    try {
+      getMSC().dropDatabase(name, deleteData, ignoreUnknownDb);
+    } catch (NoSuchObjectException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+    
+  /**
    * Creates a table metdata and the directory for the table data
    *
    * @param tableName
@@ -216,13 +289,12 @@ public class Hive {
       throw new HiveException("columns not specified for table " + tableName);
     }
 
-    Table tbl = new Table(tableName);
+    Table tbl = new Table(getCurrentDatabase(), tableName);
     tbl.setInputFormatClass(fileInputFormat.getName());
     tbl.setOutputFormatClass(fileOutputFormat.getName());
 
     for (String col : columns) {
-      FieldSchema field = new FieldSchema(col,
-          org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "default");
+      FieldSchema field = new FieldSchema(col, STRING_TYPE_NAME, "default");
       tbl.getCols().add(field);
     }
 
@@ -230,7 +302,7 @@ public class Hive {
       for (String partCol : partCols) {
         FieldSchema part = new FieldSchema();
         part.setName(partCol);
-        part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default
+        part.setType(STRING_TYPE_NAME); // default
                                                                                // partition
                                                                                // key
         tbl.getPartCols().add(part);
@@ -256,7 +328,7 @@ public class Hive {
   public void alterTable(String tblName, Table newTbl)
       throws InvalidOperationException, HiveException {
     try {
-      getMSC().alter_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName,
+      getMSC().alter_table(getCurrentDatabase(), tblName,
           newTbl.getTTable());
     } catch (MetaException e) {
       throw new HiveException("Unable to alter table.", e);
@@ -279,7 +351,7 @@ public class Hive {
   public void alterPartition(String tblName, Partition newPart)
       throws InvalidOperationException, HiveException {
     try {
-      getMSC().alter_partition(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName,
+      getMSC().alter_partition(getCurrentDatabase(), tblName,
           newPart.getTPartition());
 
     } catch (MetaException e) {
@@ -311,6 +383,9 @@ public class Hive {
    */
   public void createTable(Table tbl, boolean ifNotExists) throws HiveException {
     try {
+      if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) {
+        tbl.setDbName(getCurrentDatabase());
+      }
       if (tbl.getCols().size() == 0) {
         tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(),
             tbl.getDeserializer()));
@@ -337,6 +412,26 @@ public class Hive {
    * @throws HiveException
    *           thrown if the drop fails
    */
+  public void dropTable(String tableName) throws HiveException {
+    dropTable(getCurrentDatabase(), tableName, true, true);
+  }
+
+  /**
+   * Drops table along with the data in it. If the table doesn't exist
+   * then it is a no-op
+   * @param dbName database where the table lives
+   * @param tableName table to drop
+   * @throws HiveException thrown if the drop fails
+   * Drops table along with the data in it. If the table doesn't exist then it
+   * is a no-op
+   *
+   * @param dbName
+   *          database where the table lives
+   * @param tableName
+   *          table to drop
+   * @throws HiveException
+   *           thrown if the drop fails
+   */
   public void dropTable(String dbName, String tableName) throws HiveException {
     dropTable(dbName, tableName, true, true);
   }
@@ -370,7 +465,18 @@ public class Hive {
   }
 
   /**
-   * Returns metadata of the table.
+   * Returns metadata for the table named tableName in the current database.
+   * @param tableName the name of the table
+   * @return
+   * @throws HiveException if there's an internal error or if the
+   * table doesn't exist
+   */
+  public Table getTable(final String tableName) throws HiveException {
+    return this.getTable(getCurrentDatabase(), tableName, true);
+  }
+
+  /**
+   * Returns metadata of the table
    *
    * @param dbName
    *          the name of the database
@@ -380,12 +486,10 @@ public class Hive {
    * @exception HiveException
    *              if there's an internal error or if the table doesn't exist
    */
-  public Table getTable(final String dbName, final String tableName)
-      throws HiveException {
-
+  public Table getTable(final String dbName, final String tableName) throws HiveException {
     return this.getTable(dbName, tableName, true);
   }
-
+    
   /**
    * Returns metadata of the table
    *
@@ -423,11 +527,11 @@ public class Hive {
     if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) {
       // Fix the non-printable chars
       Map<String, String> parameters = tTable.getSd().getParameters();
-      String sf = parameters.get(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT);
+      String sf = parameters.get(SERIALIZATION_FORMAT);
       if (sf != null) {
         char[] b = sf.toCharArray();
         if ((b.length == 1) && (b[0] < 10)) { // ^A, ^B, ^C, ^D, \t
-          parameters.put(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT,
+          parameters.put(SERIALIZATION_FORMAT,
               Integer.toString(b[0]));
         }
       }
@@ -454,12 +558,27 @@ public class Hive {
     return table;
   }
 
+  /**
+   * Get all table names for the current database.
+   * @return List of table names
+   * @throws HiveException
+   */
   public List<String> getAllTables() throws HiveException {
-    return getTablesByPattern(".*");
+    return getAllTables(getCurrentDatabase());
   }
 
   /**
-   * returns all existing tables from default database which match the given
+   * Get all table names for the specified database.
+   * @param dbName
+   * @return List of table names
+   * @throws HiveException
+   */
+  public List<String> getAllTables(String dbName) throws HiveException {
+    return getTablesByPattern(dbName, ".*");
+  }
+
+  /**
+   * Returns all existing tables from default database which match the given
    * pattern. The matching occurs as per Java regular expressions
    *
    * @param tablePattern
@@ -467,13 +586,28 @@ public class Hive {
    * @return list of table names
    * @throws HiveException
    */
-  public List<String> getTablesByPattern(String tablePattern)
-      throws HiveException {
-    return getTablesForDb(MetaStoreUtils.DEFAULT_DATABASE_NAME, tablePattern);
+  public List<String> getTablesByPattern(String tablePattern) throws HiveException {
+    return getTablesByPattern(getCurrentDatabase(), tablePattern);
+  }
+
+  /**
+   * Returns all existing tables from the specified database which match the given
+   * pattern. The matching occurs as per Java regular expressions.
+   * @param dbName
+   * @param tablePattern
+   * @return list of table names
+   * @throws HiveException
+   */
+  public List<String> getTablesByPattern(String dbName, String tablePattern) throws HiveException {
+    try {
+      return getMSC().getTables(dbName, tablePattern);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
   }
 
   /**
-   * returns all existing tables from the given database which match the given
+   * Returns all existing tables from the given database which match the given
    * pattern. The matching occurs as per Java regular expressions
    *
    * @param database
@@ -493,29 +627,55 @@ public class Hive {
   }
 
   /**
-   * @param name
-   * @param locationUri
-   * @return true or false
-   * @throws AlreadyExistsException
-   * @throws MetaException
-   * @throws TException
-   * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#createDatabase(java.lang.String,
-   *      java.lang.String)
+   * Get all existing database names.
+   *
+   * @return List of database names.
+   * @throws HiveException
    */
-  protected boolean createDatabase(String name, String locationUri)
-      throws AlreadyExistsException, MetaException, TException {
-    return getMSC().createDatabase(name, locationUri);
+  public List<String> getAllDatabases() throws HiveException {
+    try {
+      return getMSC().getAllDatabases();
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
   }
 
   /**
-   * @param name
-   * @return true or false
-   * @throws MetaException
-   * @throws TException
-   * @see org.apache.hadoop.hive.metastore.HiveMetaStoreClient#dropDatabase(java.lang.String)
+   * Get all existing databases that match the given
+   * pattern. The matching occurs as per Java regular expressions
+   *
+   * @param databasePattern
+   *          java re pattern
+   * @return list of database names
+   * @throws HiveException
+   */
+  public List<String> getDatabasesByPattern(String databasePattern) throws HiveException {
+    try {
+      return getMSC().getDatabases(databasePattern);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+
+  /**
+   * Query metadata to see if a database with the given name already exists.
+   *
+   * @param dbName
+   * @return true if a database with the given name already exists, false if
+   *         does not exist.
+   * @throws HiveException
    */
-  protected boolean dropDatabase(String name) throws MetaException, TException {
-    return getMSC().dropDatabase(name);
+  public boolean databaseExists(String dbName) throws HiveException {
+    try {
+      if (null != getMSC().getDatabase(dbName)) {
+        return true;
+      }
+      return false;
+    } catch (NoSuchObjectException e) {
+      return false;
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
   }
 
   /**
@@ -539,7 +699,7 @@ public class Hive {
   public void loadPartition(Path loadPath, String tableName,
       Map<String, String> partSpec, boolean replace, Path tmpDirPath)
       throws HiveException {
-    Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+    Table tbl = getTable(tableName);
     try {
       /**
        * Move files before creating the partition since down stream processes
@@ -662,7 +822,7 @@ public class Hive {
    */
   public void loadTable(Path loadPath, String tableName, boolean replace,
       Path tmpDirPath) throws HiveException {
-    Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
+    Table tbl = getTable(tableName);
 
     if (replace) {
       tbl.replaceFiles(loadPath, tmpDirPath);
@@ -911,6 +1071,25 @@ public class Hive {
     return qlPartitions;
   }
 
+  /**
+   * Get the name of the current database
+   * @return
+   */
+  public String getCurrentDatabase() {
+    if (null == currentDatabase) {
+      currentDatabase = DEFAULT_DATABASE_NAME;
+    }
+    return currentDatabase;
+  }
+
+  /**
+   * Set the name of the current database
+   * @param currentDatabase
+   */
+  public void setCurrentDatabase(String currentDatabase) {
+    this.currentDatabase = currentDatabase;
+  }
+
   static private void checkPaths(FileSystem fs, FileStatus[] srcs, Path destf,
       boolean replace) throws HiveException {
     try {
@@ -1075,7 +1254,7 @@ public class Hive {
             HiveStorageHandler storageHandler =
               HiveUtils.getStorageHandler(
                 conf,
-                tbl.getParameters().get(Constants.META_TABLE_STORAGE));
+                tbl.getParameters().get(META_TABLE_STORAGE));
             if (storageHandler == null) {
               return null;
             }

Modified: hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (original)
+++ hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java Thu Sep 16 17:09:41 2010
@@ -58,7 +58,7 @@ import org.apache.hadoop.mapred.Sequence
 
 /**
  * A Hive Table: is a fundamental unit of data in Hive that shares a common schema/DDL.
- * 
+ *
  * Please note that the ql code should always go through methods of this class to access the
  * metadata, instead of directly accessing org.apache.hadoop.hive.metastore.api.Table.  This
  * helps to isolate the metastore code and the ql code.
@@ -79,7 +79,7 @@ public class Table implements Serializab
   private Class<? extends InputFormat> inputFormatClass;
   private URI uri;
   private HiveStorageHandler storageHandler;
-  
+
   /**
    * Used only for serialization.
    */
@@ -96,8 +96,8 @@ public class Table implements Serializab
     }
   }
 
-  public Table(String name) {
-    this(getEmptyTable(name));
+  public Table(String databaseName, String tableName) {
+    this(getEmptyTable(databaseName, tableName));
   }
 
   /**
@@ -108,18 +108,19 @@ public class Table implements Serializab
   public org.apache.hadoop.hive.metastore.api.Table getTTable() {
     return tTable;
   }
-  
+
   /**
    * This function should only be called by Java serialization.
    */
   public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) {
     this.tTable = tTable;
   }
-  
+
   /**
    * Initialize an emtpy table.
    */
-  static org.apache.hadoop.hive.metastore.api.Table getEmptyTable(String name) {
+  static org.apache.hadoop.hive.metastore.api.Table
+  getEmptyTable(String databaseName, String tableName) {
     StorageDescriptor sd = new StorageDescriptor();
     {
       sd.setSerdeInfo(new SerDeInfo());
@@ -136,15 +137,16 @@ public class Table implements Serializab
       sd.setInputFormat(SequenceFileInputFormat.class.getName());
       sd.setOutputFormat(HiveSequenceFileOutputFormat.class.getName());
     }
-    
+
     org.apache.hadoop.hive.metastore.api.Table t = new org.apache.hadoop.hive.metastore.api.Table();
     {
       t.setSd(sd);
       t.setPartitionKeys(new ArrayList<FieldSchema>());
       t.setParameters(new HashMap<String, String>());
       t.setTableType(TableType.MANAGED_TABLE.toString());
-      t.setTableName(name);
-      t.setDbName(MetaStoreUtils.DEFAULT_DATABASE_NAME);
+      t.setDbName(databaseName);
+      t.setTableName(tableName);
+      t.setDbName(databaseName);
     }
     return t;
   }
@@ -179,7 +181,7 @@ public class Table implements Serializab
       assert(getViewOriginalText() == null);
       assert(getViewExpandedText() == null);
     }
-    
+
     Iterator<FieldSchema> iterCols = getCols().iterator();
     List<String> colNames = new ArrayList<String>();
     while (iterCols.hasNext()) {
@@ -246,7 +248,7 @@ public class Table implements Serializab
   }
 
   final public Deserializer getDeserializer() {
-    if (deserializer == null) { 
+    if (deserializer == null) {
       try {
         deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(), tTable);
       } catch (MetaException e) {
@@ -290,12 +292,12 @@ public class Table implements Serializab
         throw new RuntimeException(e);
       }
     }
-    return inputFormatClass; 
+    return inputFormatClass;
   }
 
   final public Class<? extends HiveOutputFormat> getOutputFormatClass() {
     // Replace FileOutputFormat for backward compatibility
-    
+
     if (outputFormatClass == null) {
       try {
         String className = tTable.getSd().getOutputFormat();
@@ -490,7 +492,7 @@ public class Table implements Serializab
   /**
    * Returns a list of all the columns of the table (data columns + partition
    * columns in that order.
-   * 
+   *
    * @return List<FieldSchema>
    */
   public List<FieldSchema> getAllCols() {
@@ -515,7 +517,7 @@ public class Table implements Serializab
   /**
    * Replaces files in the partition with new data set specified by srcf. Works
    * by moving files
-   * 
+   *
    * @param srcf
    *          Files to be replaced. Leaf directories or globbed file paths
    * @param tmpd
@@ -533,7 +535,7 @@ public class Table implements Serializab
 
   /**
    * Inserts files specified into the partition. Works by moving files
-   * 
+   *
    * @param srcf
    *          Files to be moved. Leaf directories or globbed file paths
    */
@@ -662,15 +664,15 @@ public class Table implements Serializab
   public void setTableName(String tableName) {
     tTable.setTableName(tableName);
   }
-  
+
   public void setDbName(String databaseName) {
     tTable.setDbName(databaseName);
   }
-  
+
   public List<FieldSchema> getPartitionKeys() {
     return tTable.getPartitionKeys();
   }
-  
+
   /**
    * @return the original view text, or null if this table is not a view
    */
@@ -713,7 +715,7 @@ public class Table implements Serializab
 
   /**
    * Creates a partition name -> value spec map object
-   * 
+   *
    * @param tp
    *          Use the information from this partition.
    * @return Partition name to value mapping.
@@ -735,7 +737,7 @@ public class Table implements Serializab
   public Table copy() throws HiveException {
     return new Table(tTable.clone());
   }
-  
+
   public void setCreateTime(int createTime) {
     tTable.setCreateTime(createTime);
   }

Modified: hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java?rev=997851&r1=997850&r2=997851&view=diff
==============================================================================
--- hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (original)
+++ hadoop/hive/branches/branch-0.6/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java Thu Sep 16 17:09:41 2010
@@ -31,7 +31,6 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.Context;
@@ -457,8 +456,7 @@ public abstract class BaseSemanticAnalyz
               + tableName;
         }
 
-        tableHandle = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,
-            tableName);
+        tableHandle = db.getTable(tableName);
       } catch (InvalidTableException ite) {
         throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(ast
             .getChild(0)), ite);