You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/04/25 07:50:36 UTC

svn commit: r1589931 - in /hive/trunk: contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/ hbase-handler/src/java/org/apache/hadoop/hive/hbase/ itests/util/src/main/java/org/apache/hadoop/hive/ql/io/udf/ ql/src/java/org/apache/hadoop/hiv...

Author: hashutosh
Date: Fri Apr 25 05:50:35 2014
New Revision: 1589931

URL: http://svn.apache.org/r1589931
Log:
HIVE-6952 : Hive 0.13 HiveOutputFormat breaks backwards compatibility (Ashutosh Chauhan via Prasanth J)

Added:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/StatsProvidingRecordWriter.java
Removed:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/FSRecordWriter.java
Modified:
    hive/trunk/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java
    hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
    hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/io/udf/Rot13OutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughRecordWriter.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroContainerOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordWriter.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java

Modified: hive/trunk/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java (original)
+++ hive/trunk/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -24,7 +24,7 @@ import java.util.Properties;
 
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
@@ -53,13 +53,13 @@ public class Base64TextOutputFormat<K ex
    * Base64RecordWriter.
    *
    */
-  public static class Base64RecordWriter implements FSRecordWriter,
+  public static class Base64RecordWriter implements RecordWriter,
       JobConfigurable {
 
-    FSRecordWriter writer;
+    RecordWriter writer;
     BytesWritable bytesWritable;
 
-    public Base64RecordWriter(FSRecordWriter writer) {
+    public Base64RecordWriter(RecordWriter writer) {
       this.writer = writer;
       bytesWritable = new BytesWritable();
     }
@@ -119,7 +119,7 @@ public class Base64TextOutputFormat<K ex
   }
 
   @Override
-  public FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+  public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
 

Modified: hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java (original)
+++ hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.Text;
@@ -71,7 +71,7 @@ public class HiveHFileOutputFormat exten
   }
 
   @Override
-  public FSRecordWriter getHiveRecordWriter(
+  public RecordWriter getHiveRecordWriter(
     final JobConf jc,
     final Path finalOutPath,
     Class<? extends Writable> valueClass,
@@ -120,7 +120,7 @@ public class HiveHFileOutputFormat exten
       ++i;
     }
 
-    return new FSRecordWriter() {
+    return new RecordWriter() {
 
       @Override
       public void close(boolean abort) throws IOException {

Modified: hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/io/udf/Rot13OutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/io/udf/Rot13OutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/io/udf/Rot13OutputFormat.java (original)
+++ hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/io/udf/Rot13OutputFormat.java Fri Apr 25 05:50:35 2014
@@ -22,7 +22,7 @@ import java.io.IOException;
 import java.util.Properties;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.LongWritable;
@@ -36,20 +36,20 @@ public class Rot13OutputFormat
   extends HiveIgnoreKeyTextOutputFormat<LongWritable,Text> {
 
   @Override
-  public FSRecordWriter
+  public RecordWriter
     getHiveRecordWriter(JobConf jc,
                         Path outPath,
                         Class<? extends Writable> valueClass,
                         boolean isCompressed,
                         Properties tableProperties,
                         Progressable progress) throws IOException {
-    final FSRecordWriter result =
+    final RecordWriter result =
       super.getHiveRecordWriter(jc,outPath,valueClass,isCompressed,
         tableProperties,progress);
     final Reporter reporter = (Reporter) progress;
     reporter.setStatus("got here");
     System.out.println("Got a reporter " + reporter);
-    return new FSRecordWriter() {
+    return new RecordWriter() {
       @Override
       public void write(Writable w) throws IOException {
         if (w instanceof Text) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java Fri Apr 25 05:50:35 2014
@@ -35,8 +35,7 @@ import org.apache.hadoop.hive.common.Fil
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter.StatsProvidingRecordWriter;
+import org.apache.hadoop.hive.ql.io.StatsProvidingRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
@@ -88,7 +87,7 @@ public class FileSinkOperator extends Te
   protected transient int dpStartCol; // start column # for DP columns
   protected transient List<String> dpVals; // array of values corresponding to DP columns
   protected transient List<Object> dpWritables;
-  protected transient FSRecordWriter[] rowOutWriters; // row specific RecordWriters
+  protected transient RecordWriter[] rowOutWriters; // row specific RecordWriters
   protected transient int maxPartitions;
   protected transient ListBucketingCtx lbCtx;
   protected transient boolean isSkewedStoredAsSubDirectories;
@@ -117,7 +116,7 @@ public class FileSinkOperator extends Te
     Path taskOutputTempPath;
     Path[] outPaths;
     Path[] finalPaths;
-    FSRecordWriter[] outWriters;
+    RecordWriter[] outWriters;
     Stat stat;
 
     public FSPaths() {
@@ -128,7 +127,7 @@ public class FileSinkOperator extends Te
       taskOutputTempPath = Utilities.toTaskTempPath(specPath);
       outPaths = new Path[numFiles];
       finalPaths = new Path[numFiles];
-      outWriters = new FSRecordWriter[numFiles];
+      outWriters = new RecordWriter[numFiles];
       stat = new Stat();
     }
 
@@ -150,11 +149,11 @@ public class FileSinkOperator extends Te
       }
     }
 
-    public void setOutWriters(FSRecordWriter[] out) {
+    public void setOutWriters(RecordWriter[] out) {
       outWriters = out;
     }
 
-    public FSRecordWriter[] getOutWriters() {
+    public RecordWriter[] getOutWriters() {
       return outWriters;
     }
 
@@ -599,7 +598,7 @@ public class FileSinkOperator extends Te
       }
 
 
-      FSRecordWriter rowOutWriter = null;
+      RecordWriter rowOutWriter = null;
 
       if (row_count != null) {
         row_count.set(row_count.get() + 1);
@@ -757,7 +756,7 @@ public class FileSinkOperator extends Te
           // since we are closing the previous fsp's record writers, we need to see if we can get
           // stats from the record writer and store in the previous fsp that is cached
           if (conf.isGatherStats() && isCollectRWStats) {
-            FSRecordWriter outWriter = prevFsp.outWriters[0];
+            RecordWriter outWriter = prevFsp.outWriters[0];
             if (outWriter != null) {
               SerDeStats stats = ((StatsProvidingRecordWriter) outWriter).getStats();
               if (stats != null) {
@@ -851,7 +850,7 @@ public class FileSinkOperator extends Te
         // accumulated statistics which will be aggregated in case of spray writers
         if (conf.isGatherStats() && isCollectRWStats) {
           for (int idx = 0; idx < fsp.outWriters.length; idx++) {
-            FSRecordWriter outWriter = fsp.outWriters[idx];
+            RecordWriter outWriter = fsp.outWriters[idx];
             if (outWriter != null) {
               SerDeStats stats = ((StatsProvidingRecordWriter) outWriter).getStats();
               if (stats != null) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Fri Apr 25 05:50:35 2014
@@ -98,7 +98,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveInterruptCallback;
 import org.apache.hadoop.hive.common.HiveInterruptUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
@@ -111,13 +110,13 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryPlan;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapper;
 import org.apache.hadoop.hive.ql.exec.mr.ExecReducer;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
 import org.apache.hadoop.hive.ql.io.ContentSummaryInputFormat;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
@@ -1774,7 +1773,7 @@ public final class Utilities {
 
     for (String p : paths) {
       Path path = new Path(p);
-      FSRecordWriter writer = HiveFileFormatUtils.getRecordWriter(
+      RecordWriter writer = HiveFileFormatUtils.getRecordWriter(
           jc, hiveOutputFormat, outputClass, isCompressed,
           tableInfo.getProperties(), path, reporter);
       writer.close(false);
@@ -3058,7 +3057,7 @@ public final class Utilities {
     String newFile = newDir + File.separator + "emptyFile";
     Path newFilePath = new Path(newFile);
 
-    FSRecordWriter recWriter = outFileFormat.newInstance().getHiveRecordWriter(job, newFilePath,
+    RecordWriter recWriter = outFileFormat.newInstance().getHiveRecordWriter(job, newFilePath,
         Text.class, false, props, null);
     if (dummyRow) {
       // empty files are omitted at CombineHiveInputFormat.

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java Fri Apr 25 05:50:35 2014
@@ -28,8 +28,8 @@ import java.util.Properties;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.PTFDeserializer;
@@ -240,7 +240,7 @@ public class PTFRowContainer<Row extends
   }
 
 
-  private static class PTFRecordWriter implements FSRecordWriter {
+  private static class PTFRecordWriter implements RecordWriter {
     BytesWritable EMPTY_KEY = new BytesWritable();
 
     SequenceFile.Writer outStream;
@@ -249,10 +249,12 @@ public class PTFRowContainer<Row extends
       this.outStream = outStream;
     }
 
+    @Override
     public void write(Writable r) throws IOException {
       outStream.append(EMPTY_KEY, r);
     }
 
+    @Override
     public void close(boolean abort) throws IOException {
       outStream.close();
     }
@@ -262,7 +264,7 @@ public class PTFRowContainer<Row extends
     extends HiveSequenceFileOutputFormat<K,V> {
 
     @Override
-    public FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+    public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
         Class<? extends Writable> valueClass, boolean isCompressed,
         Properties tableProperties, Progressable progress) throws IOException {
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java Fri Apr 25 05:50:35 2014
@@ -30,8 +30,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -106,7 +106,7 @@ public class RowContainer<ROW extends Li
   int acutalSplitNum = 0;
   int currentSplitPointer = 0;
   org.apache.hadoop.mapred.RecordReader rr = null; // record reader
-  FSRecordWriter rw = null;
+  RecordWriter rw = null;
   InputFormat<WritableComparable, Writable> inputFormat = null;
   InputSplit[] inputSplits = null;
   private ROW dummyRow = null;
@@ -213,7 +213,7 @@ public class RowContainer<ROW extends Li
         JobConf localJc = getLocalFSJobConfClone(jc);
         if (inputSplits == null) {
           if (this.inputFormat == null) {
-            inputFormat = (InputFormat<WritableComparable, Writable>) ReflectionUtils.newInstance(
+            inputFormat = ReflectionUtils.newInstance(
                 tblDesc.getInputFileFormatClass(), localJc);
           }
 
@@ -537,7 +537,7 @@ public class RowContainer<ROW extends Li
 
   }
 
-  protected FSRecordWriter getRecordWriter() {
+  protected RecordWriter getRecordWriter() {
     return rw;
   }
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.io;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapred.Reporter;
@@ -239,6 +240,6 @@ public interface AcidOutputFormat<V> ext
    * @return a record writer
    * @throws IOException
    */
-  public FSRecordWriter getRawRecordWriter(Path path,
+  public RecordWriter getRawRecordWriter(Path path,
                                            Options options) throws IOException;
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveBinaryOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -24,6 +24,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
@@ -58,14 +59,15 @@ public class HiveBinaryOutputFormat<K ex
    * @return the RecordWriter
    */
   @Override
-  public FSRecordWriter getHiveRecordWriter(JobConf jc, Path outPath,
+  public RecordWriter getHiveRecordWriter(JobConf jc, Path outPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
 
     FileSystem fs = outPath.getFileSystem(jc);
     final OutputStream outStream = fs.create(outPath);
 
-    return new FSRecordWriter() {
+    return new RecordWriter() {
+      @Override
       public void write(Writable r) throws IOException {
         if (r instanceof Text) {
           Text tr = (Text) r;
@@ -77,6 +79,7 @@ public class HiveBinaryOutputFormat<K ex
         }
       }
 
+      @Override
       public void close(boolean abort) throws IOException {
         outStream.close();
       }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java Fri Apr 25 05:50:35 2014
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat;
@@ -116,7 +117,7 @@ public final class HiveFileFormatUtils {
    * get a RealOutputFormatClassName corresponding to the HivePassThroughOutputFormat
    */
   @SuppressWarnings("unchecked")
-  public static String getRealOutputFormatClassName() 
+  public static String getRealOutputFormatClassName()
   {
     return realoutputFormat;
   }
@@ -245,7 +246,7 @@ public final class HiveFileFormatUtils {
     return true;
   }
 
-  public static FSRecordWriter getHiveRecordWriter(JobConf jc,
+  public static RecordWriter getHiveRecordWriter(JobConf jc,
       TableDesc tableInfo, Class<? extends Writable> outputClass,
       FileSinkDesc conf, Path outPath, Reporter reporter) throws HiveException {
     boolean storagehandlerofhivepassthru = false;
@@ -286,7 +287,7 @@ public final class HiveFileFormatUtils {
     }
   }
 
-  public static FSRecordWriter getRecordWriter(JobConf jc,
+  public static RecordWriter getRecordWriter(JobConf jc,
       HiveOutputFormat<?, ?> hiveOutputFormat,
       final Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProp, Path outPath, Reporter reporter

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -24,6 +24,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.io.BytesWritable;
@@ -62,7 +63,7 @@ public class HiveIgnoreKeyTextOutputForm
    * @return the RecordWriter
    */
   @Override
-  public FSRecordWriter getHiveRecordWriter(JobConf jc, Path outPath,
+  public RecordWriter getHiveRecordWriter(JobConf jc, Path outPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
     int rowSeparator = 0;
@@ -78,7 +79,8 @@ public class HiveIgnoreKeyTextOutputForm
     FileSystem fs = outPath.getFileSystem(jc);
     final OutputStream outStream = Utilities.createCompressedStream(jc, fs
         .create(outPath), isCompressed);
-    return new FSRecordWriter() {
+    return new RecordWriter() {
+      @Override
       public void write(Writable r) throws IOException {
         if (r instanceof Text) {
           Text tr = (Text) r;
@@ -92,6 +94,7 @@ public class HiveIgnoreKeyTextOutputForm
         }
       }
 
+      @Override
       public void close(boolean abort) throws IOException {
         outStream.close();
       }
@@ -107,10 +110,12 @@ public class HiveIgnoreKeyTextOutputForm
       this.mWriter = writer;
     }
 
+    @Override
     public synchronized void write(K key, V value) throws IOException {
       this.mWriter.write(null, value);
     }
 
+    @Override
     public void close(Reporter reporter) throws IOException {
       this.mWriter.close(reporter);
     }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -23,6 +23,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.NullWritable;
@@ -47,7 +48,7 @@ public class HiveNullValueSequenceFileOu
   private boolean keyIsText;
 
   @Override
-  public FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+  public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
 
@@ -57,7 +58,8 @@ public class HiveNullValueSequenceFileOu
 
     keyWritable = new HiveKey();
     keyIsText = valueClass.equals(Text.class);
-    return new FSRecordWriter() {
+    return new RecordWriter() {
+      @Override
       public void write(Writable r) throws IOException {
         if (keyIsText) {
           Text text = (Text) r;
@@ -73,6 +75,7 @@ public class HiveNullValueSequenceFileOu
         outStream.append(keyWritable, NULL_WRITABLE);
       }
 
+      @Override
       public void close(boolean abort) throws IOException {
         outStream.close();
       }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.Properties;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.OutputFormat;
@@ -57,7 +58,7 @@ public interface HiveOutputFormat<K, V> 
    *          progress used for status report
    * @return the RecordWriter for the output file
    */
-  FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+  RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
       final Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException;
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -73,8 +73,7 @@ public class HivePassThroughOutputFormat
       throw new IOException(e);
     }
     OutputFormat<? super WritableComparable<?>, ? super Writable> actualOF =
-         (OutputFormat<? super WritableComparable, ? super Writable>)
-            ReflectionUtils.newInstance(cls, this.getConf());
+         ReflectionUtils.newInstance(cls, this.getConf());
     this.actualOutputFormat = actualOF;
   }
 
@@ -99,7 +98,7 @@ public class HivePassThroughOutputFormat
   }
 
   @Override
-  public FSRecordWriter getHiveRecordWriter(
+  public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
       JobConf jc, Path finalOutPath, Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
     if (this.initialized == false) {
@@ -117,10 +116,12 @@ public class HivePassThroughOutputFormat
     }
   }
 
+  @Override
   public Configuration getConf() {
     return conf;
   }
 
+  @Override
   public void setConf(Configuration config) {
     if (config.get(HIVE_PASSTHROUGH_STORAGEHANDLER_OF_JOBCONFKEY) != null) {
       actualOutputFormatClass = config.get(HIVE_PASSTHROUGH_STORAGEHANDLER_OF_JOBCONFKEY);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughRecordWriter.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughRecordWriter.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughRecordWriter.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HivePassThroughRecordWriter.java Fri Apr 25 05:50:35 2014
@@ -20,12 +20,13 @@ package org.apache.hadoop.hive.ql.io;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 
 
 public class HivePassThroughRecordWriter <K extends WritableComparable<?>, V extends Writable>
-implements FSRecordWriter {
+implements RecordWriter {
 
   private final org.apache.hadoop.mapred.RecordWriter<K, V> mWriter;
 
@@ -33,11 +34,13 @@ implements FSRecordWriter {
     this.mWriter = writer;
   }
 
+  @Override
   @SuppressWarnings("unchecked")
   public void write(Writable r) throws IOException {
     mWriter.write(null, (V) r);
   }
 
+  @Override
   public void close(boolean abort) throws IOException {
     //close with null reporter
     mWriter.close(null);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -23,6 +23,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.SequenceFile;
@@ -55,7 +56,7 @@ public class HiveSequenceFileOutputForma
    * @return the RecordWriter for the output file
    */
   @Override
-  public FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+  public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
       Class<? extends Writable> valueClass, boolean isCompressed,
       Properties tableProperties, Progressable progress) throws IOException {
 
@@ -63,11 +64,13 @@ public class HiveSequenceFileOutputForma
     final SequenceFile.Writer outStream = Utilities.createSequenceWriter(jc,
         fs, finalOutPath, BytesWritable.class, valueClass, isCompressed);
 
-    return new FSRecordWriter() {
+    return new RecordWriter() {
+      @Override
       public void write(Writable r) throws IOException {
         outStream.append(EMPTY_KEY, r);
       }
 
+      @Override
       public void close(boolean abort) throws IOException {
         outStream.close();
       }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -118,7 +118,7 @@ public class RCFileOutputFormat extends
    * @throws IOException
    */
   @Override
-  public FSRecordWriter getHiveRecordWriter(
+  public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
       JobConf jc, Path finalOutPath, Class<? extends Writable> valueClass,
       boolean isCompressed, Properties tableProperties, Progressable progress) throws IOException {
 
@@ -135,11 +135,13 @@ public class RCFileOutputFormat extends
       (jc, finalOutPath.getFileSystem(jc),
        finalOutPath, isCompressed);
 
-    return new FSRecordWriter() {
+    return new org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter() {
+      @Override
       public void write(Writable r) throws IOException {
         outWriter.append(r);
       }
 
+      @Override
       public void close(boolean abort) throws IOException {
         outWriter.close();
       }

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/StatsProvidingRecordWriter.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/StatsProvidingRecordWriter.java?rev=1589931&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/StatsProvidingRecordWriter.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/StatsProvidingRecordWriter.java Fri Apr 25 05:50:35 2014
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io;
+
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
+import org.apache.hadoop.hive.serde2.SerDeStats;
+
+  /**
+   * If a file format internally gathers statistics (like ORC) while writing then
+   * it can expose the statistics through this record writer interface. Writer side
+   * statistics is useful for updating the metastore with table/partition level
+   * statistics.
+   * StatsProvidingRecordWriter.
+   *
+   */
+  public interface StatsProvidingRecordWriter extends RecordWriter{
+    /**
+     * Returns the statistics information
+     * @return SerDeStats
+     */
+    SerDeStats getStats();
+  }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroContainerOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroContainerOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroContainerOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroContainerOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -32,7 +32,6 @@ import org.apache.avro.generic.GenericDa
 import org.apache.avro.generic.GenericRecord;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.serde2.avro.AvroGenericRecordWritable;
 import org.apache.hadoop.hive.serde2.avro.AvroSerdeException;
@@ -51,7 +50,7 @@ public class AvroContainerOutputFormat
         implements HiveOutputFormat<LongWritable, AvroGenericRecordWritable> {
 
   @Override
-  public FSRecordWriter getHiveRecordWriter(JobConf jobConf,
+  public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(JobConf jobConf,
          Path path, Class<? extends Writable> valueClass, boolean isCompressed,
          Properties properties, Progressable progressable) throws IOException {
     Schema schema;
@@ -82,10 +81,12 @@ public class AvroContainerOutputFormat
   getRecordWriter(FileSystem ignored, JobConf job, String name,
       Progressable progress) {
     return new RecordWriter<LongWritable, AvroGenericRecordWritable>() {
+      @Override
       public void write(LongWritable key, AvroGenericRecordWritable value) {
         throw new RuntimeException("Should not be called");
       }
 
+      @Override
       public void close(Reporter reporter) {
       }
     };

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordWriter.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordWriter.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordWriter.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordWriter.java Fri Apr 25 05:50:35 2014
@@ -22,14 +22,14 @@ import java.io.IOException;
 
 import org.apache.avro.file.DataFileWriter;
 import org.apache.avro.generic.GenericRecord;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.serde2.avro.AvroGenericRecordWritable;
 import org.apache.hadoop.io.Writable;
 
 /**
  * Write an Avro GenericRecord to an Avro data file.
  */
-public class AvroGenericRecordWriter implements FSRecordWriter{
+public class AvroGenericRecordWriter implements RecordWriter{
   final private DataFileWriter<GenericRecord> dfw;
 
   public AvroGenericRecordWriter(DataFileWriter<GenericRecord> dfw) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -21,7 +21,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
+import org.apache.hadoop.hive.ql.io.StatsProvidingRecordWriter;
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.ql.io.orc.OrcSerde.OrcSerdeRow;
 import org.apache.hadoop.hive.serde2.SerDeStats;
@@ -53,8 +53,7 @@ public class OrcOutputFormat extends Fil
 
   private static class OrcRecordWriter
       implements RecordWriter<NullWritable, OrcSerdeRow>,
-                 FSRecordWriter,
-                 FSRecordWriter.StatsProvidingRecordWriter {
+                 StatsProvidingRecordWriter {
     private Writer writer = null;
     private final Path path;
     private final OrcFile.WriterOptions options;
@@ -178,7 +177,7 @@ public class OrcOutputFormat extends Fil
 
 
   @Override
-  public FSRecordWriter
+  public StatsProvidingRecordWriter
      getHiveRecordWriter(JobConf conf,
                          Path path,
                          Class<? extends Writable> valueClass,
@@ -283,7 +282,7 @@ public class OrcOutputFormat extends Fil
   }
 
   @Override
-  public FSRecordWriter getRawRecordWriter(Path path,
+  public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getRawRecordWriter(Path path,
                                            Options options) throws IOException {
     final Path filename = AcidUtils.createFilename(path, options);
     final OrcFile.WriterOptions opts =
@@ -300,7 +299,7 @@ public class OrcOutputFormat extends Fil
     opts.inspector(options.getInspector())
         .callback(watcher);
     final Writer writer = OrcFile.createWriter(filename, opts);
-    return new FSRecordWriter() {
+    return new org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter() {
       @Override
       public void write(Writable w) throws IOException {
         OrcStruct orc = (OrcStruct) w;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -23,7 +23,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import org.apache.hadoop.hive.ql.io.parquet.convert.HiveSchemaConverter;
@@ -83,7 +82,7 @@ public class MapredParquetOutputFormat e
    * contains the real output format
    */
   @Override
-  public FSRecordWriter getHiveRecordWriter(
+  public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
       final JobConf jobConf,
       final Path finalOutPath,
       final Class<? extends Writable> valueClass,

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java Fri Apr 25 05:50:35 2014
@@ -27,13 +27,12 @@ import org.apache.hadoop.mapreduce.Outpu
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 
 import parquet.hadoop.ParquetOutputFormat;
 import parquet.hadoop.util.ContextUtil;
 
 public class ParquetRecordWriterWrapper implements RecordWriter<Void, ArrayWritable>,
-  FSRecordWriter {
+  org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter {
 
   public static final Log LOG = LogFactory.getLog(ParquetRecordWriterWrapper.class);
 
@@ -54,8 +53,7 @@ public class ParquetRecordWriterWrapper 
       taskContext = ContextUtil.newTaskAttemptContext(jobConf, taskAttemptID);
 
       LOG.info("creating real writer to write at " + name);
-      realWriter = (org.apache.hadoop.mapreduce.RecordWriter<Void, ArrayWritable>)
-          ((ParquetOutputFormat) realOutputFormat).getRecordWriter(taskContext, new Path(name));
+      realWriter = ((ParquetOutputFormat) realOutputFormat).getRecordWriter(taskContext, new Path(name));
       LOG.info("real writer: " + realWriter);
     } catch (final InterruptedException e) {
       throw new IOException(e);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java Fri Apr 25 05:50:35 2014
@@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidTxnListImpl;
@@ -31,28 +30,25 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Writable;
-
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobContext;
-import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.Mapper;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.OutputCommitter;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hadoop.mapred.TaskAttemptContext;
 import org.apache.hadoop.mapred.lib.NullOutputFormat;
 import org.apache.hadoop.util.StringUtils;
@@ -343,6 +339,7 @@ public class CompactorMR {
       return deltas;
     }
 
+    @Override
     public String toString() {
       StringBuilder builder = new StringBuilder();
       builder.append("CompactorInputSplit{base: ");
@@ -491,7 +488,7 @@ public class CompactorMR {
       implements Mapper<NullWritable, CompactorInputSplit,  NullWritable,  NullWritable> {
 
     JobConf jobConf;
-    FSRecordWriter writer;
+    RecordWriter writer;
 
     @Override
     public void map(NullWritable key, CompactorInputSplit split,

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java Fri Apr 25 05:50:35 2014
@@ -64,7 +64,6 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.InputFormatChecker;
@@ -440,6 +439,7 @@ public class TestInputOutputFormat {
       this.hosts = hosts;
     }
 
+    @Override
     public String toString() {
       StringBuilder buffer = new StringBuilder();
       buffer.append("block{offset: ");
@@ -480,6 +480,7 @@ public class TestInputOutputFormat {
       }
     }
 
+    @Override
     public String toString() {
       StringBuilder buffer = new StringBuilder();
       buffer.append("mockFile{path: ");
@@ -564,6 +565,7 @@ public class TestInputOutputFormat {
       }
     }
 
+    @Override
     public void close() throws IOException {
       super.close();
       DataOutputBuffer buf = (DataOutputBuffer) getWrappedStream();
@@ -581,6 +583,7 @@ public class TestInputOutputFormat {
       // empty
     }
 
+    @Override
     public void initialize(URI uri, Configuration conf) {
       setConf(conf);
     }
@@ -924,7 +927,7 @@ public class TestInputOutputFormat {
     }
     SerDe serde = new OrcSerde();
     HiveOutputFormat<?, ?> outFormat = new OrcOutputFormat();
-    FSRecordWriter writer =
+    org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter writer =
         outFormat.getHiveRecordWriter(conf, testFilePath, MyRow.class, true,
             properties, Reporter.NULL);
     writer.write(serde.serialize(new MyRow(1,2), inspector));
@@ -1088,7 +1091,7 @@ public class TestInputOutputFormat {
   public void testEmptyFile() throws Exception {
     Properties properties = new Properties();
     HiveOutputFormat<?, ?> outFormat = new OrcOutputFormat();
-    FSRecordWriter writer =
+    org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter writer =
         outFormat.getHiveRecordWriter(conf, testFilePath, MyRow.class, true,
             properties, Reporter.NULL);
     writer.close(true);
@@ -1133,7 +1136,7 @@ public class TestInputOutputFormat {
     }
     SerDe serde = new OrcSerde();
     HiveOutputFormat<?, ?> outFormat = new OrcOutputFormat();
-    FSRecordWriter writer =
+    org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter writer =
         outFormat.getHiveRecordWriter(conf, testFilePath, StringRow.class,
             true, properties, Reporter.NULL);
     writer.write(serde.serialize(new StringRow("owen"), inspector));

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java?rev=1589931&r1=1589930&r2=1589931&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java Fri Apr 25 05:50:35 2014
@@ -32,7 +32,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.io.FSRecordWriter;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
@@ -65,8 +64,8 @@ public abstract class CompactorTest {
   protected IMetaStoreClient ms;
   protected long sleepTime = 1000;
 
-  private MetaStoreThread.BooleanPointer stop = new MetaStoreThread.BooleanPointer();
-  private File tmpdir;
+  private final MetaStoreThread.BooleanPointer stop = new MetaStoreThread.BooleanPointer();
+  private final File tmpdir;
 
   protected CompactorTest() throws Exception {
     HiveConf conf = new HiveConf();
@@ -329,10 +328,10 @@ public abstract class CompactorTest {
   }
 
   static class MockRawReader implements AcidInputFormat.RawReader<Text> {
-    private Stack<Path> filesToRead;
-    private Configuration conf;
+    private final Stack<Path> filesToRead;
+    private final Configuration conf;
     private FSDataInputStream is = null;
-    private FileSystem fs;
+    private final FileSystem fs;
 
     MockRawReader(Configuration conf, List<Path> files) throws IOException {
       filesToRead = new Stack<Path>();
@@ -408,12 +407,12 @@ public abstract class CompactorTest {
     }
 
     @Override
-    public FSRecordWriter getRawRecordWriter(Path path, Options options) throws IOException {
+    public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getRawRecordWriter(Path path, Options options) throws IOException {
       return new MockRecordWriter(path, options);
     }
 
     @Override
-    public FSRecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
+    public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
                                               Class<? extends Writable> valueClass,
                                               boolean isCompressed, Properties tableProperties,
                                               Progressable progress) throws IOException {
@@ -437,8 +436,8 @@ public abstract class CompactorTest {
   // This class isn't used and I suspect does totally the wrong thing.  It's only here so that I
   // can provide some output format to the tables and partitions I create.  I actually write to
   // those tables directory.
-  static class MockRecordWriter implements FSRecordWriter {
-    private FSDataOutputStream os;
+  static class MockRecordWriter implements org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter {
+    private final FSDataOutputStream os;
 
     MockRecordWriter(Path basedir, AcidOutputFormat.Options options) throws IOException {
       FileSystem fs = FileSystem.get(options.getConfiguration());