You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/02/28 03:13:46 UTC

svn commit: r1572806 [2/2] - in /hive/branches/tez: ./ common/src/java/org/apache/hadoop/hive/conf/ itests/hive-unit/src/test/java/org/apache/hive/jdbc/ metastore/scripts/upgrade/derby/ metastore/scripts/upgrade/mysql/ metastore/scripts/upgrade/oracle/...

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java Fri Feb 28 02:13:45 2014
@@ -47,371 +47,344 @@ import org.codehaus.jackson.map.ObjectMa
  * json.
  */
 public class JsonMetaDataFormatter implements MetaDataFormatter {
-    private static final Log LOG = LogFactory.getLog(JsonMetaDataFormatter.class);
+  private static final Log LOG = LogFactory.getLog(JsonMetaDataFormatter.class);
 
-    /**
-     * Convert the map to a JSON string.
-     */
-    private void asJson(OutputStream out, Map<String, Object> data)
-        throws HiveException
-    {
-        try {
-            new ObjectMapper().writeValue(out, data);
-        } catch (IOException e) {
-            throw new HiveException("Unable to convert to json", e);
-        }
+  /**
+   * Convert the map to a JSON string.
+   */
+  private void asJson(OutputStream out, Map<String, Object> data)
+      throws HiveException
+      {
+    try {
+      new ObjectMapper().writeValue(out, data);
+    } catch (IOException e) {
+      throw new HiveException("Unable to convert to json", e);
     }
+      }
 
-    /**
-     * Write an error message.
-     */
-    @Override
-    public void error(OutputStream out, String msg, int errorCode, String sqlState)
-        throws HiveException
-    {
-        error(out, msg, errorCode, sqlState, null);
-    }
-    @Override
-    public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail) throws HiveException {
-        MapBuilder mb = MapBuilder.create().put("error", errorMessage);
-        if(errorDetail != null) {
-            mb.put("errorDetail", errorDetail);
-        }
-        mb.put("errorCode", errorCode);
-        if(sqlState != null) {
-          mb.put("sqlState", sqlState);
-        }
-        asJson(out,mb.build());
+  /**
+   * Write an error message.
+   */
+  @Override
+  public void error(OutputStream out, String msg, int errorCode, String sqlState)
+      throws HiveException
+      {
+    error(out, msg, errorCode, sqlState, null);
+      }
+  @Override
+  public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail) throws HiveException {
+    MapBuilder mb = MapBuilder.create().put("error", errorMessage);
+    if(errorDetail != null) {
+      mb.put("errorDetail", errorDetail);
+    }
+    mb.put("errorCode", errorCode);
+    if(sqlState != null) {
+      mb.put("sqlState", sqlState);
+    }
+    asJson(out,mb.build());
+  }
+
+  /**
+   * Show a list of tables.
+   */
+  @Override
+  public void showTables(DataOutputStream out, Set<String> tables)
+      throws HiveException {
+    asJson(out, MapBuilder.create().put("tables", tables).build());
+  }
+
+  /**
+   * Describe table.
+   */
+  @Override
+  public void describeTable(DataOutputStream out, String colPath,
+      String tableName, Table tbl, Partition part, List<FieldSchema> cols,
+      boolean isFormatted, boolean isExt, boolean isPretty,
+      boolean isOutputPadded) throws HiveException {
+    MapBuilder builder = MapBuilder.create();
+    builder.put("columns", makeColsUnformatted(cols));
+
+    if (isExt) {
+      if (part != null) {
+        builder.put("partitionInfo", part.getTPartition());
+      }
+      else {
+        builder.put("tableInfo", tbl.getTTable());
+      }
     }
 
-    /**
-     * Show a list of tables.
-     */
-    @Override
-    public void showTables(DataOutputStream out, Set<String> tables)
-        throws HiveException
-    {
-        asJson(out,
-               MapBuilder.create()
-               .put("tables", tables)
-               .build());
-    }
-
-    /**
-     * Describe table.
-     */
-    @Override
-    public void describeTable(DataOutputStream out,
-                              String colPath, String tableName,
-                              Table tbl, Partition part, List<FieldSchema> cols,
-                              boolean isFormatted, boolean isExt,
-                              boolean isPretty)
-        throws HiveException
-    {
-        MapBuilder builder = MapBuilder.create();
-
-        builder.put("columns", makeColsUnformatted(cols));
-
-        if (isExt) {
-            if (part != null)
-                builder.put("partitionInfo", part.getTPartition());
-            else
-                builder.put("tableInfo", tbl.getTTable());
-        }
+    asJson(out, builder.build());
+  }
 
-        asJson(out, builder.build());
+  private List<Map<String, Object>> makeColsUnformatted(List<FieldSchema> cols) {
+    ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
+    for (FieldSchema col : cols) {
+      res.add(makeOneColUnformatted(col));
+    }
+    return res;
+  }
+
+  private Map<String, Object> makeOneColUnformatted(FieldSchema col) {
+    return MapBuilder.create()
+        .put("name", col.getName())
+        .put("type", col.getType())
+        .put("comment", col.getComment())
+        .build();
+  }
+
+  @Override
+  public void showTableStatus(DataOutputStream out, Hive db, HiveConf conf,
+      List<Table> tbls, Map<String, String> part, Partition par)
+          throws HiveException {
+    asJson(out, MapBuilder.create().put(
+        "tables", makeAllTableStatus(db, conf, tbls, part, par)).build());
+  }
+
+  private List<Map<String, Object>> makeAllTableStatus(Hive db, HiveConf conf,
+      List<Table> tbls, Map<String, String> part, Partition par)
+          throws HiveException {
+    try {
+      ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
+      for (Table tbl : tbls) {
+        res.add(makeOneTableStatus(tbl, db, conf, part, par));
+      }
+      return res;
+    } catch(IOException e) {
+      throw new HiveException(e);
+    }
+  }
+
+  private Map<String, Object> makeOneTableStatus(Table tbl, Hive db,
+      HiveConf conf, Map<String, String> part, Partition par)
+          throws HiveException, IOException {
+    String tblLoc = null;
+    String inputFormattCls = null;
+    String outputFormattCls = null;
+    if (part != null) {
+      if (par != null) {
+        if (par.getLocation() != null) {
+          tblLoc = par.getDataLocation().toString();
+        }
+        inputFormattCls = par.getInputFormatClass().getName();
+        outputFormattCls = par.getOutputFormatClass().getName();
+      }
+    } else {
+      if (tbl.getPath() != null) {
+        tblLoc = tbl.getDataLocation().toString();
+      }
+      inputFormattCls = tbl.getInputFormatClass().getName();
+      outputFormattCls = tbl.getOutputFormatClass().getName();
     }
 
-    private List<Map<String, Object>> makeColsUnformatted(List<FieldSchema> cols) {
-        ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
-        for (FieldSchema col : cols)
-            res.add(makeOneColUnformatted(col));
-        return res;
-    }
-
-    private Map<String, Object> makeOneColUnformatted(FieldSchema col) {
-        return MapBuilder.create()
-            .put("name", col.getName())
-            .put("type", col.getType())
-            .put("comment", col.getComment())
-            .build();
-    }
-
-    @Override
-    public void showTableStatus(DataOutputStream out,
-                                Hive db,
-                                HiveConf conf,
-                                List<Table> tbls,
-                                Map<String, String> part,
-                                Partition par)
-        throws HiveException
-    {
-        asJson(out, MapBuilder
-               .create()
-               .put("tables", makeAllTableStatus(db, conf,
-                                                 tbls, part, par))
-               .build());
-    }
-
-    private List<Map<String, Object>> makeAllTableStatus(Hive db,
-                                    HiveConf conf,
-                                    List<Table> tbls,
-                                    Map<String, String> part,
-                                    Partition par)
-        throws HiveException
-    {
-        try {
-            ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
-            for (Table tbl : tbls)
-                res.add(makeOneTableStatus(tbl, db, conf, part, par));
-            return res;
-        } catch(IOException e) {
-            throw new HiveException(e);
-        }
+    MapBuilder builder = MapBuilder.create();
+
+    builder.put("tableName", tbl.getTableName());
+    builder.put("owner", tbl.getOwner());
+    builder.put("location", tblLoc);
+    builder.put("inputFormat", inputFormattCls);
+    builder.put("outputFormat", outputFormattCls);
+    builder.put("columns", makeColsUnformatted(tbl.getCols()));
+
+    builder.put("partitioned", tbl.isPartitioned());
+    if (tbl.isPartitioned()) {
+      builder.put("partitionColumns", makeColsUnformatted(tbl.getPartCols()));
     }
 
-    private Map<String, Object> makeOneTableStatus(Table tbl,
-                                   Hive db,
-                                   HiveConf conf,
-                                   Map<String, String> part,
-                                   Partition par)
-        throws HiveException, IOException
-    {
-        String tblLoc = null;
-        String inputFormattCls = null;
-        String outputFormattCls = null;
-        if (part != null) {
-          if (par != null) {
-            if (par.getLocation() != null) {
-              tblLoc = par.getDataLocation().toString();
-            }
-            inputFormattCls = par.getInputFormatClass().getName();
-            outputFormattCls = par.getOutputFormatClass().getName();
-          }
-        } else {
-          if (tbl.getPath() != null) {
-            tblLoc = tbl.getDataLocation().toString();
-          }
-          inputFormattCls = tbl.getInputFormatClass().getName();
-          outputFormattCls = tbl.getOutputFormatClass().getName();
-        }
+    putFileSystemsStats(builder, makeTableStatusLocations(tbl, db, par),
+        conf, tbl.getPath());
 
-        MapBuilder builder = MapBuilder.create();
+    return builder.build();
+  }
 
-        builder.put("tableName", tbl.getTableName());
-        builder.put("owner", tbl.getOwner());
-        builder.put("location", tblLoc);
-        builder.put("inputFormat", inputFormattCls);
-        builder.put("outputFormat", outputFormattCls);
-        builder.put("columns", makeColsUnformatted(tbl.getCols()));
-
-        builder.put("partitioned", tbl.isPartitioned());
-        if (tbl.isPartitioned())
-            builder.put("partitionColumns", makeColsUnformatted(tbl.getPartCols()));
-
-        putFileSystemsStats(builder, makeTableStatusLocations(tbl, db, par),
-                            conf, tbl.getPath());
-
-        return builder.build();
-    }
-
-    private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par)
-        throws HiveException
-    {
-        // output file system information
-        Path tblPath = tbl.getPath();
-        List<Path> locations = new ArrayList<Path>();
-        if (tbl.isPartitioned()) {
-          if (par == null) {
-            for (Partition curPart : db.getPartitions(tbl)) {
-              if (curPart.getLocation() != null) {
-                locations.add(new Path(curPart.getLocation()));
-              }
-            }
-          } else {
-            if (par.getLocation() != null) {
-              locations.add(new Path(par.getLocation()));
-            }
-          }
-        } else {
-          if (tblPath != null) {
-            locations.add(tblPath);
+  private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par)
+      throws HiveException {
+    // output file system information
+    Path tblPath = tbl.getPath();
+    List<Path> locations = new ArrayList<Path>();
+    if (tbl.isPartitioned()) {
+      if (par == null) {
+        for (Partition curPart : db.getPartitions(tbl)) {
+          if (curPart.getLocation() != null) {
+            locations.add(new Path(curPart.getLocation()));
           }
         }
+      } else {
+        if (par.getLocation() != null) {
+          locations.add(new Path(par.getLocation()));
+        }
+      }
+    } else {
+      if (tblPath != null) {
+        locations.add(tblPath);
+      }
+    }
+
+    return locations;
+  }
 
-        return locations;
+  // Duplicates logic in TextMetaDataFormatter
+  private void putFileSystemsStats(MapBuilder builder, List<Path> locations,
+      HiveConf conf, Path tblPath)
+          throws IOException {
+    long totalFileSize = 0;
+    long maxFileSize = 0;
+    long minFileSize = Long.MAX_VALUE;
+    long lastAccessTime = 0;
+    long lastUpdateTime = 0;
+    int numOfFiles = 0;
+
+    boolean unknown = false;
+    FileSystem fs = tblPath.getFileSystem(conf);
+    // in case all files in locations do not exist
+    try {
+      FileStatus tmpStatus = fs.getFileStatus(tblPath);
+      lastAccessTime = tmpStatus.getAccessTime();
+      lastUpdateTime = tmpStatus.getModificationTime();
+    } catch (IOException e) {
+      LOG.warn(
+          "Cannot access File System. File System status will be unknown: ", e);
+      unknown = true;
     }
 
-    // Duplicates logic in TextMetaDataFormatter
-    private void putFileSystemsStats(MapBuilder builder, List<Path> locations,
-                                     HiveConf conf, Path tblPath)
-        throws IOException
-    {
-      long totalFileSize = 0;
-      long maxFileSize = 0;
-      long minFileSize = Long.MAX_VALUE;
-      long lastAccessTime = 0;
-      long lastUpdateTime = 0;
-      int numOfFiles = 0;
-
-      boolean unknown = false;
-      FileSystem fs = tblPath.getFileSystem(conf);
-      // in case all files in locations do not exist
-      try {
-        FileStatus tmpStatus = fs.getFileStatus(tblPath);
-        lastAccessTime = tmpStatus.getAccessTime();
-        lastUpdateTime = tmpStatus.getModificationTime();
-      } catch (IOException e) {
-        LOG.warn(
-            "Cannot access File System. File System status will be unknown: ", e);
-        unknown = true;
-      }
-
-      if (!unknown) {
-        for (Path loc : locations) {
-          try {
-            FileStatus status = fs.getFileStatus(tblPath);
-            FileStatus[] files = fs.listStatus(loc);
-            long accessTime = status.getAccessTime();
-            long updateTime = status.getModificationTime();
-            // no matter loc is the table location or part location, it must be a
-            // directory.
-            if (!status.isDir()) {
+    if (!unknown) {
+      for (Path loc : locations) {
+        try {
+          FileStatus status = fs.getFileStatus(tblPath);
+          FileStatus[] files = fs.listStatus(loc);
+          long accessTime = status.getAccessTime();
+          long updateTime = status.getModificationTime();
+          // no matter loc is the table location or part location, it must be a
+          // directory.
+          if (!status.isDir()) {
+            continue;
+          }
+          if (accessTime > lastAccessTime) {
+            lastAccessTime = accessTime;
+          }
+          if (updateTime > lastUpdateTime) {
+            lastUpdateTime = updateTime;
+          }
+          for (FileStatus currentStatus : files) {
+            if (currentStatus.isDir()) {
               continue;
             }
+            numOfFiles++;
+            long fileLen = currentStatus.getLen();
+            totalFileSize += fileLen;
+            if (fileLen > maxFileSize) {
+              maxFileSize = fileLen;
+            }
+            if (fileLen < minFileSize) {
+              minFileSize = fileLen;
+            }
+            accessTime = currentStatus.getAccessTime();
+            updateTime = currentStatus.getModificationTime();
             if (accessTime > lastAccessTime) {
               lastAccessTime = accessTime;
             }
             if (updateTime > lastUpdateTime) {
               lastUpdateTime = updateTime;
             }
-            for (FileStatus currentStatus : files) {
-              if (currentStatus.isDir()) {
-                continue;
-              }
-              numOfFiles++;
-              long fileLen = currentStatus.getLen();
-              totalFileSize += fileLen;
-              if (fileLen > maxFileSize) {
-                maxFileSize = fileLen;
-              }
-              if (fileLen < minFileSize) {
-                minFileSize = fileLen;
-              }
-              accessTime = currentStatus.getAccessTime();
-              updateTime = currentStatus.getModificationTime();
-              if (accessTime > lastAccessTime) {
-                lastAccessTime = accessTime;
-              }
-              if (updateTime > lastUpdateTime) {
-                lastUpdateTime = updateTime;
-              }
-            }
-          } catch (IOException e) {
-            // ignore
           }
+        } catch (IOException e) {
+          // ignore
         }
       }
-
-      builder
-          .put("totalNumberFiles", numOfFiles, ! unknown)
-          .put("totalFileSize",    totalFileSize, ! unknown)
-          .put("maxFileSize",      maxFileSize, ! unknown)
-          .put("minFileSize",      numOfFiles > 0 ? minFileSize : 0, ! unknown)
-          .put("lastAccessTime",   lastAccessTime, ! (unknown  || lastAccessTime < 0))
-          .put("lastUpdateTime",   lastUpdateTime, ! unknown);
-    }
-
-    /**
-     * Show the table partitions.
-     */
-    @Override
-    public void showTablePartitons(DataOutputStream out, List<String> parts)
-        throws HiveException
-    {
-        asJson(out,
-               MapBuilder.create()
-               .put("partitions", makeTablePartions(parts))
-               .build());
-    }
-
-    private List<Map<String, Object>> makeTablePartions(List<String> parts)
-        throws HiveException
-    {
-        try {
-            ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
-            for (String part : parts)
-                res.add(makeOneTablePartition(part));
-            return res;
-        } catch (UnsupportedEncodingException e) {
-            throw new HiveException(e);
-        }
     }
 
-    // This seems like a very wrong implementation.
-    private Map<String, Object> makeOneTablePartition(String partIdent)
-        throws UnsupportedEncodingException
-    {
-        ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
-
-        ArrayList<String> names = new ArrayList<String>();
-        for (String part : StringUtils.split(partIdent, "/")) {
-            String name = part;
-            String val = null;
-            String[] kv = StringUtils.split(part, "=", 2);
-            if (kv != null) {
-                name = kv[0];
-                if (kv.length > 1)
-                    val = URLDecoder.decode(kv[1], "UTF-8");
-            }
-            if (val != null)
-                names.add(name + "='" + val + "'");
-            else
-                names.add(name);
-
-            res.add(MapBuilder.create()
-                    .put("columnName", name)
-                    .put("columnValue", val)
-                    .build());
-        }
+    builder
+    .put("totalNumberFiles", numOfFiles, ! unknown)
+    .put("totalFileSize",    totalFileSize, ! unknown)
+    .put("maxFileSize",      maxFileSize, ! unknown)
+    .put("minFileSize",      numOfFiles > 0 ? minFileSize : 0, ! unknown)
+    .put("lastAccessTime",   lastAccessTime, ! (unknown  || lastAccessTime < 0))
+    .put("lastUpdateTime",   lastUpdateTime, ! unknown);
+  }
+
+  /**
+   * Show the table partitions.
+   */
+  @Override
+  public void showTablePartitons(DataOutputStream out, List<String> parts)
+      throws HiveException {
+    asJson(out, MapBuilder.create().put("partitions",
+        makeTablePartions(parts)).build());
+  }
 
-        return MapBuilder.create()
-            .put("name", StringUtils.join(names, ","))
-            .put("values", res)
-            .build();
-    }
-
-    /**
-     * Show a list of databases
-     */
-    @Override
-    public void showDatabases(DataOutputStream out, List<String> databases)
-        throws HiveException
-    {
-        asJson(out,
-               MapBuilder.create()
-               .put("databases", databases)
-               .build());
-    }
-
-    /**
-     * Show the description of a database
-     */
-    @Override
-    public void showDatabaseDescription(DataOutputStream out, String database, String comment,
-      String location, String ownerName, String ownerType, Map<String, String> params)
+  private List<Map<String, Object>> makeTablePartions(List<String> parts)
       throws HiveException {
-      MapBuilder builder = MapBuilder.create().put("database", database).put("comment", comment)
-        .put("location", location);
-      if (null != ownerName) {
-        builder.put("owner", ownerName);
+    try {
+      ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
+      for (String part : parts) {
+        res.add(makeOneTablePartition(part));
+      }
+      return res;
+    } catch (UnsupportedEncodingException e) {
+      throw new HiveException(e);
+    }
+  }
+
+  // This seems like a very wrong implementation.
+  private Map<String, Object> makeOneTablePartition(String partIdent)
+      throws UnsupportedEncodingException {
+    ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
+
+    ArrayList<String> names = new ArrayList<String>();
+    for (String part : StringUtils.split(partIdent, "/")) {
+      String name = part;
+      String val = null;
+      String[] kv = StringUtils.split(part, "=", 2);
+      if (kv != null) {
+        name = kv[0];
+        if (kv.length > 1)
+          val = URLDecoder.decode(kv[1], "UTF-8");
       }
-      if (null != ownerType) {
-        builder.put("ownerType", ownerType);
+      if (val != null) {
+        names.add(name + "='" + val + "'");
       }
-      if (null != params && !params.isEmpty()) {
-        builder.put("params", params);
+      else {
+        names.add(name);
       }
-      asJson(out, builder.build());
+
+      res.add(MapBuilder.create()
+          .put("columnName", name)
+          .put("columnValue", val)
+          .build());
+    }
+
+    return MapBuilder.create()
+        .put("name", StringUtils.join(names, ","))
+        .put("values", res)
+        .build();
+  }
+
+  /**
+   * Show a list of databases
+   */
+  @Override
+  public void showDatabases(DataOutputStream out, List<String> databases)
+      throws HiveException {
+    asJson(out, MapBuilder.create().put("databases", databases).build());
+  }
+
+  /**
+   * Show the description of a database
+   */
+  @Override
+  public void showDatabaseDescription(DataOutputStream out, String database, String comment,
+      String location, String ownerName, String ownerType, Map<String, String> params)
+          throws HiveException {
+    MapBuilder builder = MapBuilder.create().put("database", database).put("comment", comment)
+        .put("location", location);
+    if (null != ownerName) {
+      builder.put("owner", ownerName);
+    }
+    if (null != ownerType) {
+      builder.put("ownerType", ownerType);
+    }
+    if (null != params && !params.isEmpty()) {
+      builder.put("params", params);
     }
+    asJson(out, builder.build());
+  }
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java Fri Feb 28 02:13:45 2014
@@ -63,40 +63,81 @@ public final class MetaDataFormatUtils {
     columnInformation.append(LINE_DELIM);
   }
 
+  /**
+   * Write formatted information about the given columns to a string
+   * @param cols - list of columns
+   * @param printHeader - if header should be included
+   * @param isOutputPadded - make it more human readable by setting indentation
+   *        with spaces. Turned off for use by HiveServer2
+   * @return string with formatted column information
+   */
   public static String getAllColumnsInformation(List<FieldSchema> cols,
-      boolean printHeader) {
+      boolean printHeader, boolean isOutputPadded) {
     StringBuilder columnInformation = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
     if(printHeader){
       formatColumnsHeader(columnInformation);
     }
-    formatAllFields(columnInformation, cols);
+    formatAllFields(columnInformation, cols, isOutputPadded);
     return columnInformation.toString();
   }
 
-  public static String getAllColumnsInformation(List<FieldSchema> cols, List<FieldSchema> partCols,
-      boolean printHeader) {
+  /**
+   * Write formatted information about the given columns, including partition
+   * columns to a string
+   * @param cols - list of columns
+   * @param partCols - list of partition columns
+   * @param printHeader - if header should be included
+   * @param isOutputPadded - make it more human readable by setting indentation
+   *        with spaces. Turned off for use by HiveServer2
+   * @return string with formatted column information
+   */
+  public static String getAllColumnsInformation(List<FieldSchema> cols,
+      List<FieldSchema> partCols, boolean printHeader, boolean isOutputPadded) {
     StringBuilder columnInformation = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
     if(printHeader){
       formatColumnsHeader(columnInformation);
     }
-    formatAllFields(columnInformation, cols);
+    formatAllFields(columnInformation, cols, isOutputPadded);
 
     if ((partCols != null) && (!partCols.isEmpty())) {
       columnInformation.append(LINE_DELIM).append("# Partition Information")
-        .append(LINE_DELIM);
+      .append(LINE_DELIM);
       formatColumnsHeader(columnInformation);
-      formatAllFields(columnInformation, partCols);
+      formatAllFields(columnInformation, partCols, isOutputPadded);
     }
 
     return columnInformation.toString();
   }
 
-  private static void formatAllFields(StringBuilder tableInfo, List<FieldSchema> cols) {
+  /**
+   * Write formatted column information into given StringBuilder
+   * @param tableInfo - StringBuilder to append column information into
+   * @param cols - list of columns
+   * @param isOutputPadded - make it more human readable by setting indentation
+   *        with spaces. Turned off for use by HiveServer2
+   */
+  private static void formatAllFields(StringBuilder tableInfo,
+      List<FieldSchema> cols, boolean isOutputPadded) {
     for (FieldSchema col : cols) {
-      formatOutput(col.getName(), col.getType(), getComment(col), tableInfo);
+      if(isOutputPadded) {
+        formatWithIndentation(col.getName(), col.getType(), getComment(col), tableInfo);
+      }
+      else {
+        formatWithoutIndentation(col.getName(), col.getType(), col.getComment(), tableInfo);
+      }
     }
   }
 
+  private static void formatWithoutIndentation(String name, String type, String comment,
+      StringBuilder colBuffer) {
+    colBuffer.append(name);
+    colBuffer.append(FIELD_DELIM);
+    colBuffer.append(type);
+    colBuffer.append(FIELD_DELIM);
+    colBuffer.append(comment == null ? "" : comment);
+    colBuffer.append(LINE_DELIM);
+  }
+
   public static String getAllColumnsInformation(Index index) {
     StringBuilder indexInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
 
@@ -133,7 +174,7 @@ public final class MetaDataFormatUtils {
     formatOutput(indexColumns.toArray(new String[0]), indexInfo);
 
     return indexInfo.toString();
-}
+  }
 
   public static String getPartitionInformation(Partition part) {
     StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
@@ -176,7 +217,7 @@ public final class MetaDataFormatUtils {
   }
 
   private static void getStorageDescriptorInfo(StringBuilder tableInfo,
-                                               StorageDescriptor storageDesc) {
+      StorageDescriptor storageDesc) {
 
     formatOutput("SerDe Library:", storageDesc.getSerdeInfo().getSerializationLib(), tableInfo);
     formatOutput("InputFormat:", storageDesc.getInputFormat(), tableInfo);
@@ -293,13 +334,13 @@ public final class MetaDataFormatUtils {
   }
 
   private static void formatOutput(String name, String value,
-                                   StringBuilder tableInfo) {
+      StringBuilder tableInfo) {
     tableInfo.append(String.format("%-" + ALIGNMENT + "s", name)).append(FIELD_DELIM);
     tableInfo.append(String.format("%-" + ALIGNMENT + "s", value)).append(LINE_DELIM);
   }
 
-  private static void formatOutput(String colName, String colType, String colComment,
-                                   StringBuilder tableInfo) {
+  private static void formatWithIndentation(String colName, String colType, String colComment,
+      StringBuilder tableInfo) {
     tableInfo.append(String.format("%-" + ALIGNMENT + "s", colName)).append(FIELD_DELIM);
     tableInfo.append(String.format("%-" + ALIGNMENT + "s", colType)).append(FIELD_DELIM);
 
@@ -313,7 +354,7 @@ public final class MetaDataFormatUtils {
     int colTypeLength = ALIGNMENT > colType.length() ? ALIGNMENT : colType.length();
     for (int i = 1; i < commentSegments.length; i++) {
       tableInfo.append(String.format("%" + colNameLength + "s" + FIELD_DELIM + "%"
-        + colTypeLength + "s" + FIELD_DELIM + "%s", "", "", commentSegments[i])).append(LINE_DELIM);
+          + colTypeLength + "s" + FIELD_DELIM + "%s", "", "", commentSegments[i])).append(LINE_DELIM);
     }
   }
 

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java Fri Feb 28 02:13:45 2014
@@ -23,13 +23,13 @@ import java.io.OutputStream;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 
 /**
  * Interface to format table and index information.  We can format it
@@ -37,64 +37,75 @@ import org.apache.hadoop.hive.ql.session
  * (json).
  */
 public interface MetaDataFormatter {
-    /**
-     * Write an error message.
-     * @param sqlState if {@code null}, will be ignored
-     */
-    public void error(OutputStream out, String msg, int errorCode, String sqlState)
-        throws HiveException;
+  /**
+   * Write an error message.
+   * @param sqlState if {@code null}, will be ignored
+   */
+  public void error(OutputStream out, String msg, int errorCode, String sqlState)
+      throws HiveException;
 
   /**
    * @param sqlState if {@code null}, will be skipped in output
    * @param errorDetail usually string version of some Exception, if {@code null}, will be ignored
    */
-    public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail)
+  public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail)
+      throws HiveException;
+
+  /**
+   * Show a list of tables.
+   */
+  public void showTables(DataOutputStream out, Set<String> tables)
+      throws HiveException;
+
+  /**
+   * Describe table.
+   * @param out
+   * @param colPath
+   * @param tableName
+   * @param tbl
+   * @param part
+   * @param cols
+   * @param isFormatted - describe with formatted keyword
+   * @param isExt
+   * @param isPretty
+   * @param isOutputPadded - if true, add spacing and indentation
+   * @throws HiveException
+   */
+  public void describeTable(DataOutputStream out, String colPath,
+      String tableName, Table tbl, Partition part, List<FieldSchema> cols,
+      boolean isFormatted, boolean isExt, boolean isPretty,
+      boolean isOutputPadded)
           throws HiveException;
 
-    /**
-     * Show a list of tables.
-     */
-    public void showTables(DataOutputStream out, Set<String> tables)
-        throws HiveException;
-
-    /**
-     * Describe table.
-     */
-    public void describeTable(DataOutputStream out,
-                              String colPath, String tableName,
-                              Table tbl, Partition part, List<FieldSchema> cols,
-                              boolean isFormatted, boolean isExt, boolean isPretty)
-        throws HiveException;
-
-   /**
-     * Show the table status.
-     */
-    public void showTableStatus(DataOutputStream out,
-                                Hive db,
-                                HiveConf conf,
-                                List<Table> tbls,
-                                Map<String, String> part,
-                                Partition par)
-        throws HiveException;
-
-    /**
-     * Show the table partitions.
-     */
-    public void showTablePartitons(DataOutputStream out,
-                                   List<String> parts)
-        throws HiveException;
-
-    /**
-     * Show the databases
-     */
-    public void showDatabases(DataOutputStream out, List<String> databases)
-        throws HiveException;
-
-    /**
-     * Describe a database.
-     */
-    public void showDatabaseDescription (DataOutputStream out, String database, String comment,
+  /**
+   * Show the table status.
+   */
+  public void showTableStatus(DataOutputStream out,
+      Hive db,
+      HiveConf conf,
+      List<Table> tbls,
+      Map<String, String> part,
+      Partition par)
+          throws HiveException;
+
+  /**
+   * Show the table partitions.
+   */
+  public void showTablePartitons(DataOutputStream out,
+      List<String> parts)
+          throws HiveException;
+
+  /**
+   * Show the databases
+   */
+  public void showDatabases(DataOutputStream out, List<String> databases)
+      throws HiveException;
+
+  /**
+   * Describe a database.
+   */
+  public void showDatabaseDescription (DataOutputStream out, String database, String comment,
       String location, String ownerName, String ownerType, Map<String, String> params)
-     throws HiveException;
+          throws HiveException;
 }
 

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java Fri Feb 28 02:13:45 2014
@@ -48,416 +48,415 @@ import org.apache.hadoop.hive.ql.session
  * simple lines of text.
  */
 class TextMetaDataFormatter implements MetaDataFormatter {
-    private static final Log LOG = LogFactory.getLog(TextMetaDataFormatter.class);
+  private static final Log LOG = LogFactory.getLog(TextMetaDataFormatter.class);
 
-    private static final int separator = Utilities.tabCode;
-    private static final int terminator = Utilities.newLineCode;
+  private static final int separator = Utilities.tabCode;
+  private static final int terminator = Utilities.newLineCode;
 
-    /** The number of columns to be used in pretty formatting metadata output.
-     * If -1, then the current terminal width is auto-detected and used.
-     */
-    private final int prettyOutputNumCols;
+  /** The number of columns to be used in pretty formatting metadata output.
+   * If -1, then the current terminal width is auto-detected and used.
+   */
+  private final int prettyOutputNumCols;
 
-    public TextMetaDataFormatter(int prettyOutputNumCols) {
-      this.prettyOutputNumCols = prettyOutputNumCols;
-    }
+  public TextMetaDataFormatter(int prettyOutputNumCols) {
+    this.prettyOutputNumCols = prettyOutputNumCols;
+  }
 
-    /**
-     * Write an error message.
-     */
-    @Override
-    public void error(OutputStream out, String msg, int errorCode, String sqlState)
-        throws HiveException
-    {
-        error(out, msg, errorCode, sqlState, null);
-    }
+  /**
+   * Write an error message.
+   */
+  @Override
+  public void error(OutputStream out, String msg, int errorCode, String sqlState)
+      throws HiveException
+      {
+    error(out, msg, errorCode, sqlState, null);
+      }
 
-    @Override
-    public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail)
-          throws HiveException
-    {
-      try {
-        out.write(errorMessage.getBytes("UTF-8"));
-        if(errorDetail != null) {
-          out.write(errorDetail.getBytes("UTF-8"));
-        }
-        out.write(errorCode);
-        if(sqlState != null) {
-          out.write(sqlState.getBytes("UTF-8"));//this breaks all the tests in .q files
-        }
-        out.write(terminator);
-      } catch (Exception e) {
-          throw new HiveException(e);
-        }
+  @Override
+  public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail)
+      throws HiveException
+      {
+    try {
+      out.write(errorMessage.getBytes("UTF-8"));
+      if(errorDetail != null) {
+        out.write(errorDetail.getBytes("UTF-8"));
+      }
+      out.write(errorCode);
+      if(sqlState != null) {
+        out.write(sqlState.getBytes("UTF-8"));//this breaks all the tests in .q files
+      }
+      out.write(terminator);
+    } catch (Exception e) {
+      throw new HiveException(e);
     }
-    /**
-     * Show a list of tables.
-     */
-    @Override
-    public void showTables(DataOutputStream out, Set<String> tables)
-        throws HiveException
-    {
-        Iterator<String> iterTbls = tables.iterator();
-
-        try {
-            while (iterTbls.hasNext()) {
-                // create a row per table name
-                out.writeBytes(iterTbls.next());
-                out.write(terminator);
-            }
-        } catch (IOException e) {
-           throw new HiveException(e);
-        }
+      }
+  /**
+   * Show a list of tables.
+   */
+  @Override
+  public void showTables(DataOutputStream out, Set<String> tables)
+      throws HiveException
+      {
+    Iterator<String> iterTbls = tables.iterator();
+
+    try {
+      while (iterTbls.hasNext()) {
+        // create a row per table name
+        out.writeBytes(iterTbls.next());
+        out.write(terminator);
+      }
+    } catch (IOException e) {
+      throw new HiveException(e);
     }
+      }
 
-    @Override
-    public void describeTable(DataOutputStream outStream,
-                              String colPath, String tableName,
-                              Table tbl, Partition part, List<FieldSchema> cols,
-                              boolean isFormatted, boolean isExt, boolean isPretty)
-         throws HiveException {
-        try {
-          String output;
-          if (colPath.equals(tableName)) {
-            List<FieldSchema> partCols = tbl.isPartitioned() ? tbl.getPartCols() : null;
-            output = isPretty ?
-                MetaDataPrettyFormatUtils.getAllColumnsInformation(
-                    cols, partCols, prettyOutputNumCols)
+  @Override
+  public void describeTable(DataOutputStream outStream,  String colPath,
+      String tableName, Table tbl, Partition part, List<FieldSchema> cols,
+      boolean isFormatted, boolean isExt, boolean isPretty,
+      boolean isOutputPadded) throws HiveException {
+    try {
+      String output;
+      if (colPath.equals(tableName)) {
+        List<FieldSchema> partCols = tbl.isPartitioned() ? tbl.getPartCols() : null;
+        output = isPretty ?
+            MetaDataPrettyFormatUtils.getAllColumnsInformation(
+                cols, partCols, prettyOutputNumCols)
                 :
-                MetaDataFormatUtils.getAllColumnsInformation(cols, partCols, isFormatted);
+                  MetaDataFormatUtils.getAllColumnsInformation(cols, partCols, isFormatted, isOutputPadded);
+      } else {
+        output = MetaDataFormatUtils.getAllColumnsInformation(cols, isFormatted, isOutputPadded);
+      }
+      outStream.write(output.getBytes("UTF-8"));
+
+      if (tableName.equals(colPath)) {
+        if (isFormatted) {
+          if (part != null) {
+            output = MetaDataFormatUtils.getPartitionInformation(part);
           } else {
-            output = MetaDataFormatUtils.getAllColumnsInformation(cols, isFormatted);
+            output = MetaDataFormatUtils.getTableInformation(tbl);
           }
           outStream.write(output.getBytes("UTF-8"));
+        }
 
-          if (tableName.equals(colPath)) {
-            if (isFormatted) {
-              if (part != null) {
-                output = MetaDataFormatUtils.getPartitionInformation(part);
-              } else {
-                output = MetaDataFormatUtils.getTableInformation(tbl);
-              }
-              outStream.write(output.getBytes("UTF-8"));
-            }
-
-          // if extended desc table then show the complete details of the table
-            if (isExt) {
-              // add empty line
-              outStream.write(terminator);
-              if (part != null) {
-                // show partition information
-                outStream.writeBytes("Detailed Partition Information");
-                outStream.write(separator);
-                outStream.write(part.getTPartition().toString().getBytes("UTF-8"));
-                outStream.write(separator);
-                // comment column is empty
-                outStream.write(terminator);
-              } else {
-                // show table information
-                outStream.writeBytes("Detailed Table Information");
-                outStream.write(separator);
-                outStream.write(tbl.getTTable().toString().getBytes("UTF-8"));
-                outStream.write(separator);
-                outStream.write(terminator);
-              }
-            }
+        // if extended desc table then show the complete details of the table
+        if (isExt) {
+          // add empty line
+          outStream.write(terminator);
+          if (part != null) {
+            // show partition information
+            outStream.writeBytes("Detailed Partition Information");
+            outStream.write(separator);
+            outStream.write(part.getTPartition().toString().getBytes("UTF-8"));
+            outStream.write(separator);
+            // comment column is empty
+            outStream.write(terminator);
+          } else {
+            // show table information
+            outStream.writeBytes("Detailed Table Information");
+            outStream.write(separator);
+            outStream.write(tbl.getTTable().toString().getBytes("UTF-8"));
+            outStream.write(separator);
+            outStream.write(terminator);
           }
-        } catch (IOException e) {
-          throw new HiveException(e);
         }
+      }
+    } catch (IOException e) {
+      throw new HiveException(e);
     }
+  }
 
-    @Override
-    public void showTableStatus(DataOutputStream outStream,
-                                Hive db,
-                                HiveConf conf,
-                                List<Table> tbls,
-                                Map<String, String> part,
-                                Partition par)
-        throws HiveException
-    {
-        try {
-            Iterator<Table> iterTables = tbls.iterator();
-            while (iterTables.hasNext()) {
-              // create a row per table name
-              Table tbl = iterTables.next();
-              String tableName = tbl.getTableName();
-              String tblLoc = null;
-              String inputFormattCls = null;
-              String outputFormattCls = null;
-              if (part != null) {
-                if (par != null) {
-                  if (par.getLocation() != null) {
-                    tblLoc = par.getDataLocation().toString();
-                  }
-                  inputFormattCls = par.getInputFormatClass().getName();
-                  outputFormattCls = par.getOutputFormatClass().getName();
-                }
-              } else {
-                if (tbl.getPath() != null) {
-                  tblLoc = tbl.getDataLocation().toString();
-                }
-                inputFormattCls = tbl.getInputFormatClass().getName();
-                outputFormattCls = tbl.getOutputFormatClass().getName();
-              }
-
-              String owner = tbl.getOwner();
-              List<FieldSchema> cols = tbl.getCols();
-              String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
-              boolean isPartitioned = tbl.isPartitioned();
-              String partitionCols = "";
-              if (isPartitioned) {
-                partitionCols = MetaStoreUtils.getDDLFromFieldSchema(
-                    "partition_columns", tbl.getPartCols());
-              }
+  @Override
+  public void showTableStatus(DataOutputStream outStream,
+      Hive db,
+      HiveConf conf,
+      List<Table> tbls,
+      Map<String, String> part,
+      Partition par)
+          throws HiveException
+          {
+    try {
+      Iterator<Table> iterTables = tbls.iterator();
+      while (iterTables.hasNext()) {
+        // create a row per table name
+        Table tbl = iterTables.next();
+        String tableName = tbl.getTableName();
+        String tblLoc = null;
+        String inputFormattCls = null;
+        String outputFormattCls = null;
+        if (part != null) {
+          if (par != null) {
+            if (par.getLocation() != null) {
+              tblLoc = par.getDataLocation().toString();
+            }
+            inputFormattCls = par.getInputFormatClass().getName();
+            outputFormattCls = par.getOutputFormatClass().getName();
+          }
+        } else {
+          if (tbl.getPath() != null) {
+            tblLoc = tbl.getDataLocation().toString();
+          }
+          inputFormattCls = tbl.getInputFormatClass().getName();
+          outputFormattCls = tbl.getOutputFormatClass().getName();
+        }
 
-              outStream.writeBytes("tableName:" + tableName);
-              outStream.write(terminator);
-              outStream.writeBytes("owner:" + owner);
-              outStream.write(terminator);
-              outStream.writeBytes("location:" + tblLoc);
-              outStream.write(terminator);
-              outStream.writeBytes("inputformat:" + inputFormattCls);
-              outStream.write(terminator);
-              outStream.writeBytes("outputformat:" + outputFormattCls);
-              outStream.write(terminator);
-              outStream.writeBytes("columns:" + ddlCols);
-              outStream.write(terminator);
-              outStream.writeBytes("partitioned:" + isPartitioned);
-              outStream.write(terminator);
-              outStream.writeBytes("partitionColumns:" + partitionCols);
-              outStream.write(terminator);
-              // output file system information
-              Path tblPath = tbl.getPath();
-              List<Path> locations = new ArrayList<Path>();
-              if (isPartitioned) {
-                if (par == null) {
-                  for (Partition curPart : db.getPartitions(tbl)) {
-                    if (curPart.getLocation() != null) {
-                      locations.add(new Path(curPart.getLocation()));
-                    }
-                  }
-                } else {
-                  if (par.getLocation() != null) {
-                    locations.add(new Path(par.getLocation()));
-                  }
-                }
-              } else {
-                if (tblPath != null) {
-                  locations.add(tblPath);
-                }
+        String owner = tbl.getOwner();
+        List<FieldSchema> cols = tbl.getCols();
+        String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
+        boolean isPartitioned = tbl.isPartitioned();
+        String partitionCols = "";
+        if (isPartitioned) {
+          partitionCols = MetaStoreUtils.getDDLFromFieldSchema(
+              "partition_columns", tbl.getPartCols());
+        }
+
+        outStream.writeBytes("tableName:" + tableName);
+        outStream.write(terminator);
+        outStream.writeBytes("owner:" + owner);
+        outStream.write(terminator);
+        outStream.writeBytes("location:" + tblLoc);
+        outStream.write(terminator);
+        outStream.writeBytes("inputformat:" + inputFormattCls);
+        outStream.write(terminator);
+        outStream.writeBytes("outputformat:" + outputFormattCls);
+        outStream.write(terminator);
+        outStream.writeBytes("columns:" + ddlCols);
+        outStream.write(terminator);
+        outStream.writeBytes("partitioned:" + isPartitioned);
+        outStream.write(terminator);
+        outStream.writeBytes("partitionColumns:" + partitionCols);
+        outStream.write(terminator);
+        // output file system information
+        Path tblPath = tbl.getPath();
+        List<Path> locations = new ArrayList<Path>();
+        if (isPartitioned) {
+          if (par == null) {
+            for (Partition curPart : db.getPartitions(tbl)) {
+              if (curPart.getLocation() != null) {
+                locations.add(new Path(curPart.getLocation()));
               }
-              if (!locations.isEmpty()) {
-                writeFileSystemStats(outStream, conf, locations, tblPath, false, 0);
-              }
-
-              outStream.write(terminator);
             }
-        } catch (IOException e) {
-            throw new HiveException(e);
+          } else {
+            if (par.getLocation() != null) {
+              locations.add(new Path(par.getLocation()));
+            }
+          }
+        } else {
+          if (tblPath != null) {
+            locations.add(tblPath);
+          }
+        }
+        if (!locations.isEmpty()) {
+          writeFileSystemStats(outStream, conf, locations, tblPath, false, 0);
         }
+
+        outStream.write(terminator);
+      }
+    } catch (IOException e) {
+      throw new HiveException(e);
     }
+          }
 
-    private void writeFileSystemStats(DataOutputStream outStream,
-                                      HiveConf conf,
-                                      List<Path> locations,
-                                      Path tblPath, boolean partSpecified, int indent)
-        throws IOException
-    {
-      long totalFileSize = 0;
-      long maxFileSize = 0;
-      long minFileSize = Long.MAX_VALUE;
-      long lastAccessTime = 0;
-      long lastUpdateTime = 0;
-      int numOfFiles = 0;
-
-      boolean unknown = false;
-      FileSystem fs = tblPath.getFileSystem(conf);
-      // in case all files in locations do not exist
-      try {
-        FileStatus tmpStatus = fs.getFileStatus(tblPath);
-        lastAccessTime = tmpStatus.getAccessTime();
-        lastUpdateTime = tmpStatus.getModificationTime();
-        if (partSpecified) {
-          // check whether the part exists or not in fs
-          tmpStatus = fs.getFileStatus(locations.get(0));
-        }
-      } catch (IOException e) {
-        LOG.warn(
-            "Cannot access File System. File System status will be unknown: ", e);
-        unknown = true;
-      }
-
-      if (!unknown) {
-        for (Path loc : locations) {
-          try {
-            FileStatus status = fs.getFileStatus(tblPath);
-            FileStatus[] files = fs.listStatus(loc);
-            long accessTime = status.getAccessTime();
-            long updateTime = status.getModificationTime();
-            // no matter loc is the table location or part location, it must be a
-            // directory.
-            if (!status.isDir()) {
+  private void writeFileSystemStats(DataOutputStream outStream,
+      HiveConf conf,
+      List<Path> locations,
+      Path tblPath, boolean partSpecified, int indent)
+          throws IOException
+          {
+    long totalFileSize = 0;
+    long maxFileSize = 0;
+    long minFileSize = Long.MAX_VALUE;
+    long lastAccessTime = 0;
+    long lastUpdateTime = 0;
+    int numOfFiles = 0;
+
+    boolean unknown = false;
+    FileSystem fs = tblPath.getFileSystem(conf);
+    // in case all files in locations do not exist
+    try {
+      FileStatus tmpStatus = fs.getFileStatus(tblPath);
+      lastAccessTime = tmpStatus.getAccessTime();
+      lastUpdateTime = tmpStatus.getModificationTime();
+      if (partSpecified) {
+        // check whether the part exists or not in fs
+        tmpStatus = fs.getFileStatus(locations.get(0));
+      }
+    } catch (IOException e) {
+      LOG.warn(
+          "Cannot access File System. File System status will be unknown: ", e);
+      unknown = true;
+    }
+
+    if (!unknown) {
+      for (Path loc : locations) {
+        try {
+          FileStatus status = fs.getFileStatus(tblPath);
+          FileStatus[] files = fs.listStatus(loc);
+          long accessTime = status.getAccessTime();
+          long updateTime = status.getModificationTime();
+          // no matter loc is the table location or part location, it must be a
+          // directory.
+          if (!status.isDir()) {
+            continue;
+          }
+          if (accessTime > lastAccessTime) {
+            lastAccessTime = accessTime;
+          }
+          if (updateTime > lastUpdateTime) {
+            lastUpdateTime = updateTime;
+          }
+          for (FileStatus currentStatus : files) {
+            if (currentStatus.isDir()) {
               continue;
             }
+            numOfFiles++;
+            long fileLen = currentStatus.getLen();
+            totalFileSize += fileLen;
+            if (fileLen > maxFileSize) {
+              maxFileSize = fileLen;
+            }
+            if (fileLen < minFileSize) {
+              minFileSize = fileLen;
+            }
+            accessTime = currentStatus.getAccessTime();
+            updateTime = currentStatus.getModificationTime();
             if (accessTime > lastAccessTime) {
               lastAccessTime = accessTime;
             }
             if (updateTime > lastUpdateTime) {
               lastUpdateTime = updateTime;
             }
-            for (FileStatus currentStatus : files) {
-              if (currentStatus.isDir()) {
-                continue;
-              }
-              numOfFiles++;
-              long fileLen = currentStatus.getLen();
-              totalFileSize += fileLen;
-              if (fileLen > maxFileSize) {
-                maxFileSize = fileLen;
-              }
-              if (fileLen < minFileSize) {
-                minFileSize = fileLen;
-              }
-              accessTime = currentStatus.getAccessTime();
-              updateTime = currentStatus.getModificationTime();
-              if (accessTime > lastAccessTime) {
-                lastAccessTime = accessTime;
-              }
-              if (updateTime > lastUpdateTime) {
-                lastUpdateTime = updateTime;
-              }
-            }
-          } catch (IOException e) {
-            // ignore
           }
+        } catch (IOException e) {
+          // ignore
         }
       }
-      String unknownString = "unknown";
+    }
+    String unknownString = "unknown";
 
-      for (int k = 0; k < indent; k++) {
-        outStream.writeBytes(Utilities.INDENT);
-      }
-      outStream.writeBytes("totalNumberFiles:");
-      outStream.writeBytes(unknown ? unknownString : "" + numOfFiles);
-      outStream.write(terminator);
+    for (int k = 0; k < indent; k++) {
+      outStream.writeBytes(Utilities.INDENT);
+    }
+    outStream.writeBytes("totalNumberFiles:");
+    outStream.writeBytes(unknown ? unknownString : "" + numOfFiles);
+    outStream.write(terminator);
 
-      for (int k = 0; k < indent; k++) {
-        outStream.writeBytes(Utilities.INDENT);
-      }
-      outStream.writeBytes("totalFileSize:");
-      outStream.writeBytes(unknown ? unknownString : "" + totalFileSize);
-      outStream.write(terminator);
+    for (int k = 0; k < indent; k++) {
+      outStream.writeBytes(Utilities.INDENT);
+    }
+    outStream.writeBytes("totalFileSize:");
+    outStream.writeBytes(unknown ? unknownString : "" + totalFileSize);
+    outStream.write(terminator);
 
-      for (int k = 0; k < indent; k++) {
-        outStream.writeBytes(Utilities.INDENT);
-      }
-      outStream.writeBytes("maxFileSize:");
-      outStream.writeBytes(unknown ? unknownString : "" + maxFileSize);
-      outStream.write(terminator);
+    for (int k = 0; k < indent; k++) {
+      outStream.writeBytes(Utilities.INDENT);
+    }
+    outStream.writeBytes("maxFileSize:");
+    outStream.writeBytes(unknown ? unknownString : "" + maxFileSize);
+    outStream.write(terminator);
 
-      for (int k = 0; k < indent; k++) {
-        outStream.writeBytes(Utilities.INDENT);
-      }
-      outStream.writeBytes("minFileSize:");
-      if (numOfFiles > 0) {
-        outStream.writeBytes(unknown ? unknownString : "" + minFileSize);
-      } else {
-        outStream.writeBytes(unknown ? unknownString : "" + 0);
-      }
-      outStream.write(terminator);
+    for (int k = 0; k < indent; k++) {
+      outStream.writeBytes(Utilities.INDENT);
+    }
+    outStream.writeBytes("minFileSize:");
+    if (numOfFiles > 0) {
+      outStream.writeBytes(unknown ? unknownString : "" + minFileSize);
+    } else {
+      outStream.writeBytes(unknown ? unknownString : "" + 0);
+    }
+    outStream.write(terminator);
 
-      for (int k = 0; k < indent; k++) {
-        outStream.writeBytes(Utilities.INDENT);
-      }
-      outStream.writeBytes("lastAccessTime:");
-      outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : ""
-          + lastAccessTime);
-      outStream.write(terminator);
+    for (int k = 0; k < indent; k++) {
+      outStream.writeBytes(Utilities.INDENT);
+    }
+    outStream.writeBytes("lastAccessTime:");
+    outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : ""
+        + lastAccessTime);
+    outStream.write(terminator);
 
-      for (int k = 0; k < indent; k++) {
-        outStream.writeBytes(Utilities.INDENT);
-      }
-      outStream.writeBytes("lastUpdateTime:");
-      outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime);
-      outStream.write(terminator);
-  }
+    for (int k = 0; k < indent; k++) {
+      outStream.writeBytes(Utilities.INDENT);
+    }
+    outStream.writeBytes("lastUpdateTime:");
+    outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime);
+    outStream.write(terminator);
+          }
 
-    /**
-     * Show the table partitions.
-     */
-    @Override
-    public void showTablePartitons(DataOutputStream outStream, List<String> parts)
-        throws HiveException
-    {
-        try {
-            for (String part : parts) {
-                // Partition names are URL encoded. We decode the names unless Hive
-                // is configured to use the encoded names.
-                SessionState ss = SessionState.get();
-                if (ss != null && ss.getConf() != null &&
-                      !ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_DECODE_PARTITION_NAME)) {
-                    outStream.writeBytes(part);
-                } else {
-                    outStream.writeBytes(FileUtils.unescapePathName(part));
-                }
-                outStream.write(terminator);
-            }
-        } catch (IOException e) {
-            throw new HiveException(e);
+  /**
+   * Show the table partitions.
+   */
+  @Override
+  public void showTablePartitons(DataOutputStream outStream, List<String> parts)
+      throws HiveException
+      {
+    try {
+      for (String part : parts) {
+        // Partition names are URL encoded. We decode the names unless Hive
+        // is configured to use the encoded names.
+        SessionState ss = SessionState.get();
+        if (ss != null && ss.getConf() != null &&
+            !ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_DECODE_PARTITION_NAME)) {
+          outStream.writeBytes(part);
+        } else {
+          outStream.writeBytes(FileUtils.unescapePathName(part));
         }
+        outStream.write(terminator);
+      }
+    } catch (IOException e) {
+      throw new HiveException(e);
     }
+      }
 
-    /**
-     * Show the list of databases
-     */
-    @Override
-    public void showDatabases(DataOutputStream outStream, List<String> databases)
-        throws HiveException
-        {
-        try {
-            for (String database : databases) {
-                // create a row per database name
-                outStream.writeBytes(database);
-                outStream.write(terminator);
-              }
-        } catch (IOException e) {
-            throw new HiveException(e);
-        }
+  /**
+   * Show the list of databases
+   */
+  @Override
+  public void showDatabases(DataOutputStream outStream, List<String> databases)
+      throws HiveException
+      {
+    try {
+      for (String database : databases) {
+        // create a row per database name
+        outStream.writeBytes(database);
+        outStream.write(terminator);
+      }
+    } catch (IOException e) {
+      throw new HiveException(e);
     }
+      }
 
-    /**
-     * Describe a database
-     */
-    @Override
-    public void showDatabaseDescription(DataOutputStream outStream, String database, String comment,
+  /**
+   * Describe a database
+   */
+  @Override
+  public void showDatabaseDescription(DataOutputStream outStream, String database, String comment,
       String location, String ownerName, String ownerType, Map<String, String> params)
-      throws HiveException {
-        try {
-            outStream.writeBytes(database);
-            outStream.write(separator);
-            if (comment != null) {
-              outStream.write(comment.getBytes("UTF-8"));
-            }
-            outStream.write(separator);
-            if (location != null) {
-              outStream.writeBytes(location);
-            }
-            outStream.write(separator);
-            if (ownerName != null) {
-              outStream.writeBytes(ownerName);
-            }
-            outStream.write(separator);
-            if (ownerType != null) {
-              outStream.writeBytes(ownerType);
-            }
-            outStream.write(separator);
-            if (params != null && !params.isEmpty()) {
-                outStream.writeBytes(params.toString());
-            }
-            outStream.write(terminator);
-        } catch (IOException e) {
-            throw new HiveException(e);
-        }
+          throws HiveException {
+    try {
+      outStream.writeBytes(database);
+      outStream.write(separator);
+      if (comment != null) {
+        outStream.write(comment.getBytes("UTF-8"));
+      }
+      outStream.write(separator);
+      if (location != null) {
+        outStream.writeBytes(location);
+      }
+      outStream.write(separator);
+      if (ownerName != null) {
+        outStream.writeBytes(ownerName);
+      }
+      outStream.write(separator);
+      if (ownerType != null) {
+        outStream.writeBytes(ownerType);
+      }
+      outStream.write(separator);
+      if (params != null && !params.isEmpty()) {
+        outStream.writeBytes(params.toString());
+      }
+      outStream.write(terminator);
+    } catch (IOException e) {
+      throw new HiveException(e);
     }
+  }
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java Fri Feb 28 02:13:45 2014
@@ -88,6 +88,11 @@ public class SessionState {
    */
   protected boolean isVerbose;
 
+  /**
+   * Is the query served from HiveServer2
+   */
+  private boolean isHiveServerQuery = false;
+
   /*
    * HiveHistory Object
    */
@@ -193,6 +198,10 @@ public class SessionState {
     }
   }
 
+  public boolean isHiveServerQuery() {
+    return this.isHiveServerQuery;
+  }
+
   public void setIsSilent(boolean isSilent) {
     if(conf != null) {
       conf.setBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT, isSilent);
@@ -208,6 +217,10 @@ public class SessionState {
     this.isVerbose = isVerbose;
   }
 
+  public void setIsHiveServerQuery(boolean isHiveServerQuery) {
+    this.isHiveServerQuery = isHiveServerQuery;
+  }
+
   public SessionState(HiveConf conf) {
     this(conf, null);
   }
@@ -330,7 +343,7 @@ public class SessionState {
         throw new RuntimeException(e);
       }
     } else {
-       LOG.info("No Tez session required at this point. hive.execution.engine=mr.");
+      LOG.info("No Tez session required at this point. hive.execution.engine=mr.");
     }
     return startSs;
   }
@@ -383,7 +396,7 @@ public class SessionState {
     if(LOG.isDebugEnabled()){
       Object authorizationClass = getAuthorizationMode() == AuthorizationMode.V1 ?
           getAuthorizer() : getAuthorizerV2();
-      LOG.debug("Session is using authorization class " + authorizationClass.getClass());
+          LOG.debug("Session is using authorization class " + authorizationClass.getClass());
     }
     return;
   }

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinEqualityTableContainer.java Fri Feb 28 02:13:45 2014
@@ -38,7 +38,7 @@ public class TestMapJoinEqualityTableCon
   private MapJoinRowContainer rowContainer;
   @Before
   public void setup() throws Exception {
-    rowContainer = new MapJoinRowContainer();
+    rowContainer = new MapJoinEagerRowContainer();
     rowContainer.add(VALUE);
     container = new HashMapWrapper();
   }

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinRowContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinRowContainer.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinRowContainer.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinRowContainer.java Fri Feb 28 02:13:45 2014
@@ -29,14 +29,14 @@ public class TestMapJoinRowContainer {
   
   @Test
   public void testSerialization() throws Exception {
-    MapJoinRowContainer container1 = new MapJoinRowContainer();
+    MapJoinRowContainer container1 = new MapJoinEagerRowContainer();
     container1.add(new Object[]{ new Text("f0"), null, new ShortWritable((short)0xf)});
     container1.add(Arrays.asList(new Object[]{ null, new Text("f1"), new ShortWritable((short)0xf)}));
     container1.add(new Object[]{ null, null, new ShortWritable((short)0xf)});
     container1.add(Arrays.asList(new Object[]{ new Text("f0"), new Text("f1"), new ShortWritable((short)0x1)}));
     MapJoinRowContainer container2 = Utilities.serde(container1, "f0,f1,filter", "string,string,smallint");
     Utilities.testEquality(container1, container2);
-    Assert.assertEquals(4, container1.size());
+    Assert.assertEquals(4, container1.rowCount());
     Assert.assertEquals(1, container2.getAliasFilter());
   }
 

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestMapJoinTableContainer.java Fri Feb 28 02:13:45 2014
@@ -45,7 +45,7 @@ public class TestMapJoinTableContainer {
   @Before
   public void setup() throws Exception {
     key = new MapJoinKey(KEY);
-    rowContainer = new MapJoinRowContainer();
+    rowContainer = new MapJoinEagerRowContainer();
     rowContainer.add(VALUE);
     baos = new ByteArrayOutputStream();
     out = new ObjectOutputStream(baos);

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java Fri Feb 28 02:13:45 2014
@@ -85,7 +85,7 @@ public class TestPTFRowContainer {
     }
 
     // test forward scan
-    assert(rc.size() == sz);
+    assert(rc.rowCount() == sz);
     i = 0;
     row = new ArrayList<Object>();
     row = rc.first();

Modified: hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java (original)
+++ hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/Utilities.java Fri Feb 28 02:13:45 2014
@@ -26,6 +26,7 @@ import java.util.Properties;
 
 import junit.framework.Assert;
 
+import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe;
 import org.apache.hadoop.io.BytesWritable;
@@ -62,20 +63,22 @@ class Utilities {
     result.read(context, in, new BytesWritable());
     return result;
   }
-  
-  
-  static void testEquality(MapJoinRowContainer container1, MapJoinRowContainer container2) {
-    Assert.assertEquals(container1.size(), container2.size());
-    List<Object> row1 = container1.first();
-    List<Object> row2 = container2.first();
-    for (; row1 != null && row2 != null; row1 = container1.next(), row2 = container2.next()) {
+
+
+  static void testEquality(MapJoinRowContainer container1, MapJoinRowContainer container2)
+      throws HiveException {
+    Assert.assertEquals(container1.rowCount(), container2.rowCount());
+    AbstractRowContainer.RowIterator<List<Object>> iter1 = container1.rowIter(),
+        iter2 = container2.rowIter();
+    for (List<Object> row1 = iter1.first(), row2 = iter2.first();
+        row1 != null && row2 != null; row1 = iter1.next(), row2 = iter2.next()) {
       Assert.assertEquals(row1, row2);
     }
   }
-  
-  static MapJoinRowContainer serde(MapJoinRowContainer container, String columns, String types) 
-  throws Exception {
-    MapJoinRowContainer result = new MapJoinRowContainer();
+
+  static MapJoinEagerRowContainer serde(
+      MapJoinRowContainer container, String columns, String types) throws Exception {
+    MapJoinEagerRowContainer result = new MapJoinEagerRowContainer();
     ByteArrayInputStream bais;
     ObjectInputStream in;
     ByteArrayOutputStream baos = new ByteArrayOutputStream();

Modified: hive/branches/tez/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q (original)
+++ hive/branches/tez/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q Fri Feb 28 02:13:45 2014
@@ -9,3 +9,7 @@ explain select srcpart.key from srcpart 
 explain select count(*) from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) group by ds;
 
 select count(*) from srcpart join src src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds;
+
+set hive.mapjoin.lazy.hashtable=false;
+
+select count(*) from srcpart join src src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds;

Modified: hive/branches/tez/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out Fri Feb 28 02:13:45 2014
@@ -187,3 +187,23 @@ POSTHOOK: Input: default@srcpart@ds=2008
 #### A masked pattern was here ####
 5308
 5308
+PREHOOK: query: select count(*) from srcpart join src src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join src src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+5308
+5308

Modified: hive/branches/tez/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out Fri Feb 28 02:13:45 2014
@@ -203,3 +203,23 @@ POSTHOOK: Input: default@srcpart@ds=2008
 #### A masked pattern was here ####
 5308
 5308
+PREHOOK: query: select count(*) from srcpart join src src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join src src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+5308
+5308

Modified: hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java (original)
+++ hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java Fri Feb 28 02:13:45 2014
@@ -61,6 +61,7 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.BinaryComparable;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
@@ -159,8 +160,8 @@ public class LazyBinarySerDe extends Abs
     if (byteArrayRef == null) {
       byteArrayRef = new ByteArrayRef();
     }
-    if (field instanceof BytesWritable) {
-      BytesWritable b = (BytesWritable) field;
+    if (field instanceof BinaryComparable) {
+      BinaryComparable b = (BinaryComparable) field;
       if (b.getLength() == 0) {
         return null;
       }
@@ -176,7 +177,7 @@ public class LazyBinarySerDe extends Abs
       cachedLazyBinaryStruct.init(byteArrayRef, 0, t.getLength());
     } else {
       throw new SerDeException(getClass().toString()
-          + ": expects either BytesWritable or Text object!");
+          + ": expects either BinaryComparable or Text object!");
     }
     lastOperationSerialize = false;
     lastOperationDeserialize = true;

Modified: hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java (original)
+++ hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java Fri Feb 28 02:13:45 2014
@@ -30,6 +30,7 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.apache.hadoop.hive.serde2.io.HiveCharWritable;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
@@ -254,8 +255,35 @@ public final class ObjectInspectorUtils 
     return copyToStandardObject(o, oi, ObjectInspectorCopyOption.JAVA);
   }
 
-  public static Object copyToStandardObject(Object o, ObjectInspector oi,
-      ObjectInspectorCopyOption objectInspectorOption) {
+  public static int getStructSize(ObjectInspector oi) throws SerDeException {
+    if (oi.getCategory() != Category.STRUCT) {
+      throw new SerDeException("Unexpected category " + oi.getCategory());
+    }
+    return ((StructObjectInspector)oi).getAllStructFieldRefs().size();
+  }
+
+  public static void copyStructToArray(Object o, ObjectInspector oi,
+      ObjectInspectorCopyOption objectInspectorOption, Object[] dest, int offset)
+          throws SerDeException {
+    if (o == null) {
+      return;
+    }
+
+    if (oi.getCategory() != Category.STRUCT) {
+      throw new SerDeException("Unexpected category " + oi.getCategory());
+    }
+
+    StructObjectInspector soi = (StructObjectInspector) oi;
+    List<? extends StructField> fields = soi.getAllStructFieldRefs();
+    for (int i = 0; i < fields.size(); ++i) {
+      StructField f = fields.get(i);
+      dest[offset + i] = copyToStandardObject(soi.getStructFieldData(o, f), f
+          .getFieldObjectInspector(), objectInspectorOption);
+    }
+  }
+
+  public static Object copyToStandardObject(
+      Object o, ObjectInspector oi, ObjectInspectorCopyOption objectInspectorOption) {
     if (o == null) {
       return null;
     }

Modified: hive/branches/tez/service/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/service/pom.xml?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/service/pom.xml (original)
+++ hive/branches/tez/service/pom.xml Fri Feb 28 02:13:45 2014
@@ -55,6 +55,11 @@
       <artifactId>commons-cli</artifactId>
       <version>${commons-cli.version}</version>
     </dependency>
+    <dependency>
+      <groupId>net.sf.jpam</groupId>
+      <artifactId>jpam</artifactId>
+      <version>${jpam.version}</version>
+    </dependency>
     <!-- used by thrift generated code -->
     <dependency>
       <groupId>commons-lang</groupId>

Modified: hive/branches/tez/service/src/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/service/src/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/service/src/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java (original)
+++ hive/branches/tez/service/src/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java Fri Feb 28 02:13:45 2014
@@ -23,6 +23,7 @@ public class AuthenticationProviderFacto
 
   public static enum AuthMethods {
     LDAP("LDAP"),
+    PAM("PAM"),
     CUSTOM("CUSTOM"),
     NONE("NONE");
 
@@ -50,14 +51,20 @@ public class AuthenticationProviderFacto
   }
 
   public static PasswdAuthenticationProvider getAuthenticationProvider(AuthMethods authMethod)
-            throws AuthenticationException {
+      throws AuthenticationException {
     if (authMethod.equals(AuthMethods.LDAP)) {
       return new LdapAuthenticationProviderImpl();
-    } else if (authMethod.equals(AuthMethods.CUSTOM)) {
+    }
+    else if (authMethod.equals(AuthMethods.PAM)) {
+      return new PamAuthenticationProviderImpl();
+    }
+    else if (authMethod.equals(AuthMethods.CUSTOM)) {
       return new CustomAuthenticationProviderImpl();
-    } else if (authMethod.equals(AuthMethods.NONE)) {
+    }
+    else if (authMethod.equals(AuthMethods.NONE)) {
       return new AnonymousAuthenticationProviderImpl();
-    } else {
+    }
+    else {
       throw new AuthenticationException("Unsupported authentication method");
     }
   }

Modified: hive/branches/tez/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java?rev=1572806&r1=1572805&r2=1572806&view=diff
==============================================================================
--- hive/branches/tez/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java (original)
+++ hive/branches/tez/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java Fri Feb 28 02:13:45 2014
@@ -85,6 +85,7 @@ public class HiveSessionImpl implements 
     this.sessionHandle = new SessionHandle(protocol);
     this.hiveConf = new HiveConf(serverhiveConf);
 
+    //set conf properties specified by user from client side
     if (sessionConfMap != null) {
       for (Map.Entry<String, String> entry : sessionConfMap.entrySet()) {
         hiveConf.set(entry.getKey(), entry.getValue());
@@ -98,6 +99,7 @@ public class HiveSessionImpl implements 
         FetchFormatter.ThriftFormatter.class.getName());
     hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue());
     sessionState = new SessionState(hiveConf, username);
+    sessionState.setIsHiveServerQuery(true);
     SessionState.start(sessionState);
   }
 
@@ -210,12 +212,12 @@ public class HiveSessionImpl implements 
 
   private OperationHandle executeStatementInternal(String statement, Map<String, String> confOverlay,
       boolean runAsync)
-      throws HiveSQLException {
+          throws HiveSQLException {
     acquire();
 
     OperationManager operationManager = getOperationManager();
     ExecuteStatementOperation operation = operationManager
-          .newExecuteStatementOperation(getSession(), statement, confOverlay, runAsync);
+        .newExecuteStatementOperation(getSession(), statement, confOverlay, runAsync);
     OperationHandle opHandle = operation.getHandle();
     try {
       operation.run();
@@ -297,7 +299,7 @@ public class HiveSessionImpl implements 
   @Override
   public OperationHandle getTables(String catalogName, String schemaName, String tableName,
       List<String> tableTypes)
-      throws HiveSQLException {
+          throws HiveSQLException {
     acquire();
 
     OperationManager operationManager = getOperationManager();
@@ -346,9 +348,9 @@ public class HiveSessionImpl implements 
         catalogName, schemaName, tableName, columnName);
     OperationHandle opHandle = operation.getHandle();
     try {
-    operation.run();
-    opHandleSet.add(opHandle);
-    return opHandle;
+      operation.run();
+      opHandleSet.add(opHandle);
+      return opHandle;
     } catch (HiveSQLException e) {
       operationManager.closeOperation(opHandle);
       throw e;