You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2016/07/18 11:23:29 UTC
[40/52] [partial] hbase-site git commit: Published site at
9bc7ecfb9dec6bfe14a12b6d3bfd11392d7752b8.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5f066b2e/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
----------------------------------------------------------------------
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
index 3d4d3d3..37af0c3 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
@@ -75,734 +75,765 @@
<span class="sourceLineNo">067</span>import org.apache.hadoop.hbase.regionserver.StoreFile;<a name="line.67"></a>
<span class="sourceLineNo">068</span>import org.apache.hadoop.hbase.regionserver.StoreFileWriter;<a name="line.68"></a>
<span class="sourceLineNo">069</span>import org.apache.hadoop.hbase.util.Bytes;<a name="line.69"></a>
-<span class="sourceLineNo">070</span>import org.apache.hadoop.io.NullWritable;<a name="line.70"></a>
-<span class="sourceLineNo">071</span>import org.apache.hadoop.io.SequenceFile;<a name="line.71"></a>
-<span class="sourceLineNo">072</span>import org.apache.hadoop.io.Text;<a name="line.72"></a>
-<span class="sourceLineNo">073</span>import org.apache.hadoop.mapreduce.Job;<a name="line.73"></a>
-<span class="sourceLineNo">074</span>import org.apache.hadoop.mapreduce.OutputFormat;<a name="line.74"></a>
-<span class="sourceLineNo">075</span>import org.apache.hadoop.mapreduce.RecordWriter;<a name="line.75"></a>
-<span class="sourceLineNo">076</span>import org.apache.hadoop.mapreduce.TaskAttemptContext;<a name="line.76"></a>
-<span class="sourceLineNo">077</span>import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;<a name="line.77"></a>
-<span class="sourceLineNo">078</span>import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;<a name="line.78"></a>
-<span class="sourceLineNo">079</span>import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;<a name="line.79"></a>
-<span class="sourceLineNo">080</span><a name="line.80"></a>
-<span class="sourceLineNo">081</span>import com.google.common.annotations.VisibleForTesting;<a name="line.81"></a>
-<span class="sourceLineNo">082</span><a name="line.82"></a>
-<span class="sourceLineNo">083</span>/**<a name="line.83"></a>
-<span class="sourceLineNo">084</span> * Writes HFiles. Passed Cells must arrive in order.<a name="line.84"></a>
-<span class="sourceLineNo">085</span> * Writes current time as the sequence id for the file. Sets the major compacted<a name="line.85"></a>
-<span class="sourceLineNo">086</span> * attribute on created @{link {@link HFile}s. Calling write(null,null) will forcibly roll<a name="line.86"></a>
-<span class="sourceLineNo">087</span> * all HFiles being written.<a name="line.87"></a>
-<span class="sourceLineNo">088</span> * <p><a name="line.88"></a>
-<span class="sourceLineNo">089</span> * Using this class as part of a MapReduce job is best done<a name="line.89"></a>
-<span class="sourceLineNo">090</span> * using {@link #configureIncrementalLoad(Job, HTableDescriptor, RegionLocator, Class)}.<a name="line.90"></a>
-<span class="sourceLineNo">091</span> */<a name="line.91"></a>
-<span class="sourceLineNo">092</span>@InterfaceAudience.Public<a name="line.92"></a>
-<span class="sourceLineNo">093</span>@InterfaceStability.Evolving<a name="line.93"></a>
-<span class="sourceLineNo">094</span>public class HFileOutputFormat2<a name="line.94"></a>
-<span class="sourceLineNo">095</span> extends FileOutputFormat<ImmutableBytesWritable, Cell> {<a name="line.95"></a>
-<span class="sourceLineNo">096</span> private static final Log LOG = LogFactory.getLog(HFileOutputFormat2.class);<a name="line.96"></a>
-<span class="sourceLineNo">097</span><a name="line.97"></a>
-<span class="sourceLineNo">098</span> // The following constants are private since these are used by<a name="line.98"></a>
-<span class="sourceLineNo">099</span> // HFileOutputFormat2 to internally transfer data between job setup and<a name="line.99"></a>
-<span class="sourceLineNo">100</span> // reducer run using conf.<a name="line.100"></a>
-<span class="sourceLineNo">101</span> // These should not be changed by the client.<a name="line.101"></a>
-<span class="sourceLineNo">102</span> private static final String COMPRESSION_FAMILIES_CONF_KEY =<a name="line.102"></a>
-<span class="sourceLineNo">103</span> "hbase.hfileoutputformat.families.compression";<a name="line.103"></a>
-<span class="sourceLineNo">104</span> private static final String BLOOM_TYPE_FAMILIES_CONF_KEY =<a name="line.104"></a>
-<span class="sourceLineNo">105</span> "hbase.hfileoutputformat.families.bloomtype";<a name="line.105"></a>
-<span class="sourceLineNo">106</span> private static final String BLOCK_SIZE_FAMILIES_CONF_KEY =<a name="line.106"></a>
-<span class="sourceLineNo">107</span> "hbase.mapreduce.hfileoutputformat.blocksize";<a name="line.107"></a>
-<span class="sourceLineNo">108</span> private static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY =<a name="line.108"></a>
-<span class="sourceLineNo">109</span> "hbase.mapreduce.hfileoutputformat.families.datablock.encoding";<a name="line.109"></a>
-<span class="sourceLineNo">110</span><a name="line.110"></a>
-<span class="sourceLineNo">111</span> // This constant is public since the client can modify this when setting<a name="line.111"></a>
-<span class="sourceLineNo">112</span> // up their conf object and thus refer to this symbol.<a name="line.112"></a>
-<span class="sourceLineNo">113</span> // It is present for backwards compatibility reasons. Use it only to<a name="line.113"></a>
-<span class="sourceLineNo">114</span> // override the auto-detection of datablock encoding.<a name="line.114"></a>
-<span class="sourceLineNo">115</span> public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =<a name="line.115"></a>
-<span class="sourceLineNo">116</span> "hbase.mapreduce.hfileoutputformat.datablock.encoding";<a name="line.116"></a>
-<span class="sourceLineNo">117</span><a name="line.117"></a>
-<span class="sourceLineNo">118</span> /**<a name="line.118"></a>
-<span class="sourceLineNo">119</span> * Keep locality while generating HFiles for bulkload. See HBASE-12596<a name="line.119"></a>
-<span class="sourceLineNo">120</span> */<a name="line.120"></a>
-<span class="sourceLineNo">121</span> public static final String LOCALITY_SENSITIVE_CONF_KEY =<a name="line.121"></a>
-<span class="sourceLineNo">122</span> "hbase.bulkload.locality.sensitive.enabled";<a name="line.122"></a>
-<span class="sourceLineNo">123</span> private static final boolean DEFAULT_LOCALITY_SENSITIVE = true;<a name="line.123"></a>
-<span class="sourceLineNo">124</span> private static final String OUTPUT_TABLE_NAME_CONF_KEY =<a name="line.124"></a>
-<span class="sourceLineNo">125</span> "hbase.mapreduce.hfileoutputformat.table.name";<a name="line.125"></a>
-<span class="sourceLineNo">126</span><a name="line.126"></a>
-<span class="sourceLineNo">127</span> @Override<a name="line.127"></a>
-<span class="sourceLineNo">128</span> public RecordWriter<ImmutableBytesWritable, Cell> getRecordWriter(<a name="line.128"></a>
-<span class="sourceLineNo">129</span> final TaskAttemptContext context) throws IOException, InterruptedException {<a name="line.129"></a>
-<span class="sourceLineNo">130</span> return createRecordWriter(context);<a name="line.130"></a>
-<span class="sourceLineNo">131</span> }<a name="line.131"></a>
-<span class="sourceLineNo">132</span><a name="line.132"></a>
-<span class="sourceLineNo">133</span> static <V extends Cell> RecordWriter<ImmutableBytesWritable, V><a name="line.133"></a>
-<span class="sourceLineNo">134</span> createRecordWriter(final TaskAttemptContext context)<a name="line.134"></a>
-<span class="sourceLineNo">135</span> throws IOException {<a name="line.135"></a>
-<span class="sourceLineNo">136</span><a name="line.136"></a>
-<span class="sourceLineNo">137</span> // Get the path of the temporary output file<a name="line.137"></a>
-<span class="sourceLineNo">138</span> final Path outputPath = FileOutputFormat.getOutputPath(context);<a name="line.138"></a>
-<span class="sourceLineNo">139</span> final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();<a name="line.139"></a>
-<span class="sourceLineNo">140</span> final Configuration conf = context.getConfiguration();<a name="line.140"></a>
-<span class="sourceLineNo">141</span> final FileSystem fs = outputdir.getFileSystem(conf);<a name="line.141"></a>
-<span class="sourceLineNo">142</span> // These configs. are from hbase-*.xml<a name="line.142"></a>
-<span class="sourceLineNo">143</span> final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,<a name="line.143"></a>
-<span class="sourceLineNo">144</span> HConstants.DEFAULT_MAX_FILE_SIZE);<a name="line.144"></a>
-<span class="sourceLineNo">145</span> // Invented config. Add to hbase-*.xml if other than default compression.<a name="line.145"></a>
-<span class="sourceLineNo">146</span> final String defaultCompressionStr = conf.get("hfile.compression",<a name="line.146"></a>
-<span class="sourceLineNo">147</span> Compression.Algorithm.NONE.getName());<a name="line.147"></a>
-<span class="sourceLineNo">148</span> final Algorithm defaultCompression = HFileWriterImpl<a name="line.148"></a>
-<span class="sourceLineNo">149</span> .compressionByName(defaultCompressionStr);<a name="line.149"></a>
-<span class="sourceLineNo">150</span> final boolean compactionExclude = conf.getBoolean(<a name="line.150"></a>
-<span class="sourceLineNo">151</span> "hbase.mapreduce.hfileoutputformat.compaction.exclude", false);<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span> // create a map from column family to the compression algorithm<a name="line.153"></a>
-<span class="sourceLineNo">154</span> final Map<byte[], Algorithm> compressionMap = createFamilyCompressionMap(conf);<a name="line.154"></a>
-<span class="sourceLineNo">155</span> final Map<byte[], BloomType> bloomTypeMap = createFamilyBloomTypeMap(conf);<a name="line.155"></a>
-<span class="sourceLineNo">156</span> final Map<byte[], Integer> blockSizeMap = createFamilyBlockSizeMap(conf);<a name="line.156"></a>
-<span class="sourceLineNo">157</span><a name="line.157"></a>
-<span class="sourceLineNo">158</span> String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);<a name="line.158"></a>
-<span class="sourceLineNo">159</span> final Map<byte[], DataBlockEncoding> datablockEncodingMap<a name="line.159"></a>
-<span class="sourceLineNo">160</span> = createFamilyDataBlockEncodingMap(conf);<a name="line.160"></a>
-<span class="sourceLineNo">161</span> final DataBlockEncoding overriddenEncoding;<a name="line.161"></a>
-<span class="sourceLineNo">162</span> if (dataBlockEncodingStr != null) {<a name="line.162"></a>
-<span class="sourceLineNo">163</span> overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);<a name="line.163"></a>
-<span class="sourceLineNo">164</span> } else {<a name="line.164"></a>
-<span class="sourceLineNo">165</span> overriddenEncoding = null;<a name="line.165"></a>
-<span class="sourceLineNo">166</span> }<a name="line.166"></a>
-<span class="sourceLineNo">167</span><a name="line.167"></a>
-<span class="sourceLineNo">168</span> return new RecordWriter<ImmutableBytesWritable, V>() {<a name="line.168"></a>
-<span class="sourceLineNo">169</span> // Map of families to writers and how much has been output on the writer.<a name="line.169"></a>
-<span class="sourceLineNo">170</span> private final Map<byte [], WriterLength> writers =<a name="line.170"></a>
-<span class="sourceLineNo">171</span> new TreeMap<byte [], WriterLength>(Bytes.BYTES_COMPARATOR);<a name="line.171"></a>
-<span class="sourceLineNo">172</span> private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> private final byte [] now = Bytes.toBytes(System.currentTimeMillis());<a name="line.173"></a>
-<span class="sourceLineNo">174</span> private boolean rollRequested = false;<a name="line.174"></a>
+<span class="sourceLineNo">070</span>import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;<a name="line.70"></a>
+<span class="sourceLineNo">071</span>import org.apache.hadoop.io.NullWritable;<a name="line.71"></a>
+<span class="sourceLineNo">072</span>import org.apache.hadoop.io.SequenceFile;<a name="line.72"></a>
+<span class="sourceLineNo">073</span>import org.apache.hadoop.io.Text;<a name="line.73"></a>
+<span class="sourceLineNo">074</span>import org.apache.hadoop.mapreduce.Job;<a name="line.74"></a>
+<span class="sourceLineNo">075</span>import org.apache.hadoop.mapreduce.OutputFormat;<a name="line.75"></a>
+<span class="sourceLineNo">076</span>import org.apache.hadoop.mapreduce.RecordWriter;<a name="line.76"></a>
+<span class="sourceLineNo">077</span>import org.apache.hadoop.mapreduce.TaskAttemptContext;<a name="line.77"></a>
+<span class="sourceLineNo">078</span>import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;<a name="line.78"></a>
+<span class="sourceLineNo">079</span>import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;<a name="line.79"></a>
+<span class="sourceLineNo">080</span>import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;<a name="line.80"></a>
+<span class="sourceLineNo">081</span><a name="line.81"></a>
+<span class="sourceLineNo">082</span>import com.google.common.annotations.VisibleForTesting;<a name="line.82"></a>
+<span class="sourceLineNo">083</span><a name="line.83"></a>
+<span class="sourceLineNo">084</span>/**<a name="line.84"></a>
+<span class="sourceLineNo">085</span> * Writes HFiles. Passed Cells must arrive in order.<a name="line.85"></a>
+<span class="sourceLineNo">086</span> * Writes current time as the sequence id for the file. Sets the major compacted<a name="line.86"></a>
+<span class="sourceLineNo">087</span> * attribute on created @{link {@link HFile}s. Calling write(null,null) will forcibly roll<a name="line.87"></a>
+<span class="sourceLineNo">088</span> * all HFiles being written.<a name="line.88"></a>
+<span class="sourceLineNo">089</span> * <p><a name="line.89"></a>
+<span class="sourceLineNo">090</span> * Using this class as part of a MapReduce job is best done<a name="line.90"></a>
+<span class="sourceLineNo">091</span> * using {@link #configureIncrementalLoad(Job, HTableDescriptor, RegionLocator, Class)}.<a name="line.91"></a>
+<span class="sourceLineNo">092</span> */<a name="line.92"></a>
+<span class="sourceLineNo">093</span>@InterfaceAudience.Public<a name="line.93"></a>
+<span class="sourceLineNo">094</span>@InterfaceStability.Evolving<a name="line.94"></a>
+<span class="sourceLineNo">095</span>public class HFileOutputFormat2<a name="line.95"></a>
+<span class="sourceLineNo">096</span> extends FileOutputFormat<ImmutableBytesWritable, Cell> {<a name="line.96"></a>
+<span class="sourceLineNo">097</span> private static final Log LOG = LogFactory.getLog(HFileOutputFormat2.class);<a name="line.97"></a>
+<span class="sourceLineNo">098</span><a name="line.98"></a>
+<span class="sourceLineNo">099</span> // The following constants are private since these are used by<a name="line.99"></a>
+<span class="sourceLineNo">100</span> // HFileOutputFormat2 to internally transfer data between job setup and<a name="line.100"></a>
+<span class="sourceLineNo">101</span> // reducer run using conf.<a name="line.101"></a>
+<span class="sourceLineNo">102</span> // These should not be changed by the client.<a name="line.102"></a>
+<span class="sourceLineNo">103</span> private static final String COMPRESSION_FAMILIES_CONF_KEY =<a name="line.103"></a>
+<span class="sourceLineNo">104</span> "hbase.hfileoutputformat.families.compression";<a name="line.104"></a>
+<span class="sourceLineNo">105</span> private static final String BLOOM_TYPE_FAMILIES_CONF_KEY =<a name="line.105"></a>
+<span class="sourceLineNo">106</span> "hbase.hfileoutputformat.families.bloomtype";<a name="line.106"></a>
+<span class="sourceLineNo">107</span> private static final String BLOCK_SIZE_FAMILIES_CONF_KEY =<a name="line.107"></a>
+<span class="sourceLineNo">108</span> "hbase.mapreduce.hfileoutputformat.blocksize";<a name="line.108"></a>
+<span class="sourceLineNo">109</span> private static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY =<a name="line.109"></a>
+<span class="sourceLineNo">110</span> "hbase.mapreduce.hfileoutputformat.families.datablock.encoding";<a name="line.110"></a>
+<span class="sourceLineNo">111</span><a name="line.111"></a>
+<span class="sourceLineNo">112</span> // This constant is public since the client can modify this when setting<a name="line.112"></a>
+<span class="sourceLineNo">113</span> // up their conf object and thus refer to this symbol.<a name="line.113"></a>
+<span class="sourceLineNo">114</span> // It is present for backwards compatibility reasons. Use it only to<a name="line.114"></a>
+<span class="sourceLineNo">115</span> // override the auto-detection of datablock encoding.<a name="line.115"></a>
+<span class="sourceLineNo">116</span> public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =<a name="line.116"></a>
+<span class="sourceLineNo">117</span> "hbase.mapreduce.hfileoutputformat.datablock.encoding";<a name="line.117"></a>
+<span class="sourceLineNo">118</span><a name="line.118"></a>
+<span class="sourceLineNo">119</span> /**<a name="line.119"></a>
+<span class="sourceLineNo">120</span> * Keep locality while generating HFiles for bulkload. See HBASE-12596<a name="line.120"></a>
+<span class="sourceLineNo">121</span> */<a name="line.121"></a>
+<span class="sourceLineNo">122</span> public static final String LOCALITY_SENSITIVE_CONF_KEY =<a name="line.122"></a>
+<span class="sourceLineNo">123</span> "hbase.bulkload.locality.sensitive.enabled";<a name="line.123"></a>
+<span class="sourceLineNo">124</span> private static final boolean DEFAULT_LOCALITY_SENSITIVE = true;<a name="line.124"></a>
+<span class="sourceLineNo">125</span> private static final String OUTPUT_TABLE_NAME_CONF_KEY =<a name="line.125"></a>
+<span class="sourceLineNo">126</span> "hbase.mapreduce.hfileoutputformat.table.name";<a name="line.126"></a>
+<span class="sourceLineNo">127</span><a name="line.127"></a>
+<span class="sourceLineNo">128</span> @Override<a name="line.128"></a>
+<span class="sourceLineNo">129</span> public RecordWriter<ImmutableBytesWritable, Cell> getRecordWriter(<a name="line.129"></a>
+<span class="sourceLineNo">130</span> final TaskAttemptContext context) throws IOException, InterruptedException {<a name="line.130"></a>
+<span class="sourceLineNo">131</span> return createRecordWriter(context);<a name="line.131"></a>
+<span class="sourceLineNo">132</span> }<a name="line.132"></a>
+<span class="sourceLineNo">133</span><a name="line.133"></a>
+<span class="sourceLineNo">134</span> static <V extends Cell> RecordWriter<ImmutableBytesWritable, V><a name="line.134"></a>
+<span class="sourceLineNo">135</span> createRecordWriter(final TaskAttemptContext context) throws IOException {<a name="line.135"></a>
+<span class="sourceLineNo">136</span> return new HFileRecordWriter<V>(context, null);<a name="line.136"></a>
+<span class="sourceLineNo">137</span> }<a name="line.137"></a>
+<span class="sourceLineNo">138</span><a name="line.138"></a>
+<span class="sourceLineNo">139</span> protected static class HFileRecordWriter<V extends Cell><a name="line.139"></a>
+<span class="sourceLineNo">140</span> extends RecordWriter<ImmutableBytesWritable, V> {<a name="line.140"></a>
+<span class="sourceLineNo">141</span> private final TaskAttemptContext context;<a name="line.141"></a>
+<span class="sourceLineNo">142</span> private final Path outputPath;<a name="line.142"></a>
+<span class="sourceLineNo">143</span> private final Path outputDir;<a name="line.143"></a>
+<span class="sourceLineNo">144</span> private final Configuration conf;<a name="line.144"></a>
+<span class="sourceLineNo">145</span> private final FileSystem fs;<a name="line.145"></a>
+<span class="sourceLineNo">146</span><a name="line.146"></a>
+<span class="sourceLineNo">147</span> private final long maxsize;<a name="line.147"></a>
+<span class="sourceLineNo">148</span><a name="line.148"></a>
+<span class="sourceLineNo">149</span> private final Algorithm defaultCompression;<a name="line.149"></a>
+<span class="sourceLineNo">150</span> private final boolean compactionExclude;<a name="line.150"></a>
+<span class="sourceLineNo">151</span><a name="line.151"></a>
+<span class="sourceLineNo">152</span> private final Map<byte[], Algorithm> compressionMap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span> private final Map<byte[], BloomType> bloomTypeMap;<a name="line.153"></a>
+<span class="sourceLineNo">154</span> private final Map<byte[], Integer> blockSizeMap;<a name="line.154"></a>
+<span class="sourceLineNo">155</span><a name="line.155"></a>
+<span class="sourceLineNo">156</span> private final Map<byte[], DataBlockEncoding> datablockEncodingMap;<a name="line.156"></a>
+<span class="sourceLineNo">157</span> private final DataBlockEncoding overriddenEncoding;<a name="line.157"></a>
+<span class="sourceLineNo">158</span><a name="line.158"></a>
+<span class="sourceLineNo">159</span> private final Map<byte[], WriterLength> writers;<a name="line.159"></a>
+<span class="sourceLineNo">160</span> private byte[] previousRow;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> private final byte[] now;<a name="line.161"></a>
+<span class="sourceLineNo">162</span> private boolean rollRequested;<a name="line.162"></a>
+<span class="sourceLineNo">163</span><a name="line.163"></a>
+<span class="sourceLineNo">164</span> /**<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Mapredue job will create a temp path for outputting results. If out != null, it means that<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * the caller has set the temp working dir; If out == null, it means we need to set it here.<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * Used by HFileOutputFormat2 and MultiHFileOutputFormat. MultiHFileOutputFormat will give us<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * temp working dir at the table level and HFileOutputFormat2 has to set it here within this<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * constructor.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> */<a name="line.170"></a>
+<span class="sourceLineNo">171</span> public HFileRecordWriter(final TaskAttemptContext taContext, final Path out)<a name="line.171"></a>
+<span class="sourceLineNo">172</span> throws IOException {<a name="line.172"></a>
+<span class="sourceLineNo">173</span> // Get the path of the temporary output file<a name="line.173"></a>
+<span class="sourceLineNo">174</span> context = taContext;<a name="line.174"></a>
<span class="sourceLineNo">175</span><a name="line.175"></a>
-<span class="sourceLineNo">176</span> @Override<a name="line.176"></a>
-<span class="sourceLineNo">177</span> public void write(ImmutableBytesWritable row, V cell)<a name="line.177"></a>
-<span class="sourceLineNo">178</span> throws IOException {<a name="line.178"></a>
-<span class="sourceLineNo">179</span> KeyValue kv = KeyValueUtil.ensureKeyValue(cell);<a name="line.179"></a>
-<span class="sourceLineNo">180</span><a name="line.180"></a>
-<span class="sourceLineNo">181</span> // null input == user explicitly wants to flush<a name="line.181"></a>
-<span class="sourceLineNo">182</span> if (row == null && kv == null) {<a name="line.182"></a>
-<span class="sourceLineNo">183</span> rollWriters();<a name="line.183"></a>
-<span class="sourceLineNo">184</span> return;<a name="line.184"></a>
-<span class="sourceLineNo">185</span> }<a name="line.185"></a>
+<span class="sourceLineNo">176</span> if (out == null) {<a name="line.176"></a>
+<span class="sourceLineNo">177</span> outputPath = FileOutputFormat.getOutputPath(context);<a name="line.177"></a>
+<span class="sourceLineNo">178</span> outputDir = new FileOutputCommitter(outputPath, context).getWorkPath();<a name="line.178"></a>
+<span class="sourceLineNo">179</span> } else {<a name="line.179"></a>
+<span class="sourceLineNo">180</span> outputPath = out;<a name="line.180"></a>
+<span class="sourceLineNo">181</span> outputDir = outputPath;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> }<a name="line.182"></a>
+<span class="sourceLineNo">183</span><a name="line.183"></a>
+<span class="sourceLineNo">184</span> conf = context.getConfiguration();<a name="line.184"></a>
+<span class="sourceLineNo">185</span> fs = outputDir.getFileSystem(conf);<a name="line.185"></a>
<span class="sourceLineNo">186</span><a name="line.186"></a>
-<span class="sourceLineNo">187</span> byte [] rowKey = CellUtil.cloneRow(kv);<a name="line.187"></a>
-<span class="sourceLineNo">188</span> long length = kv.getLength();<a name="line.188"></a>
-<span class="sourceLineNo">189</span> byte [] family = CellUtil.cloneFamily(kv);<a name="line.189"></a>
-<span class="sourceLineNo">190</span> WriterLength wl = this.writers.get(family);<a name="line.190"></a>
-<span class="sourceLineNo">191</span><a name="line.191"></a>
-<span class="sourceLineNo">192</span> // If this is a new column family, verify that the directory exists<a name="line.192"></a>
-<span class="sourceLineNo">193</span> if (wl == null) {<a name="line.193"></a>
-<span class="sourceLineNo">194</span> fs.mkdirs(new Path(outputdir, Bytes.toString(family)));<a name="line.194"></a>
-<span class="sourceLineNo">195</span> }<a name="line.195"></a>
-<span class="sourceLineNo">196</span><a name="line.196"></a>
-<span class="sourceLineNo">197</span> // If any of the HFiles for the column families has reached<a name="line.197"></a>
-<span class="sourceLineNo">198</span> // maxsize, we need to roll all the writers<a name="line.198"></a>
-<span class="sourceLineNo">199</span> if (wl != null && wl.written + length >= maxsize) {<a name="line.199"></a>
-<span class="sourceLineNo">200</span> this.rollRequested = true;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> }<a name="line.201"></a>
-<span class="sourceLineNo">202</span><a name="line.202"></a>
-<span class="sourceLineNo">203</span> // This can only happen once a row is finished though<a name="line.203"></a>
-<span class="sourceLineNo">204</span> if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {<a name="line.204"></a>
-<span class="sourceLineNo">205</span> rollWriters();<a name="line.205"></a>
-<span class="sourceLineNo">206</span> }<a name="line.206"></a>
-<span class="sourceLineNo">207</span><a name="line.207"></a>
-<span class="sourceLineNo">208</span> // create a new WAL writer, if necessary<a name="line.208"></a>
-<span class="sourceLineNo">209</span> if (wl == null || wl.writer == null) {<a name="line.209"></a>
-<span class="sourceLineNo">210</span> if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {<a name="line.210"></a>
-<span class="sourceLineNo">211</span> HRegionLocation loc = null;<a name="line.211"></a>
-<span class="sourceLineNo">212</span> String tableName = conf.get(OUTPUT_TABLE_NAME_CONF_KEY);<a name="line.212"></a>
-<span class="sourceLineNo">213</span> if (tableName != null) {<a name="line.213"></a>
-<span class="sourceLineNo">214</span> try (Connection connection = ConnectionFactory.createConnection(conf);<a name="line.214"></a>
-<span class="sourceLineNo">215</span> RegionLocator locator =<a name="line.215"></a>
-<span class="sourceLineNo">216</span> connection.getRegionLocator(TableName.valueOf(tableName))) {<a name="line.216"></a>
-<span class="sourceLineNo">217</span> loc = locator.getRegionLocation(rowKey);<a name="line.217"></a>
-<span class="sourceLineNo">218</span> } catch (Throwable e) {<a name="line.218"></a>
-<span class="sourceLineNo">219</span> LOG.warn("there's something wrong when locating rowkey: " +<a name="line.219"></a>
-<span class="sourceLineNo">220</span> Bytes.toString(rowKey), e);<a name="line.220"></a>
-<span class="sourceLineNo">221</span> loc = null;<a name="line.221"></a>
-<span class="sourceLineNo">222</span> }<a name="line.222"></a>
-<span class="sourceLineNo">223</span> }<a name="line.223"></a>
-<span class="sourceLineNo">224</span><a name="line.224"></a>
-<span class="sourceLineNo">225</span> if (null == loc) {<a name="line.225"></a>
-<span class="sourceLineNo">226</span> if (LOG.isTraceEnabled()) {<a name="line.226"></a>
-<span class="sourceLineNo">227</span> LOG.trace("failed to get region location, so use default writer: " +<a name="line.227"></a>
-<span class="sourceLineNo">228</span> Bytes.toString(rowKey));<a name="line.228"></a>
-<span class="sourceLineNo">229</span> }<a name="line.229"></a>
-<span class="sourceLineNo">230</span> wl = getNewWriter(family, conf, null);<a name="line.230"></a>
-<span class="sourceLineNo">231</span> } else {<a name="line.231"></a>
-<span class="sourceLineNo">232</span> if (LOG.isDebugEnabled()) {<a name="line.232"></a>
-<span class="sourceLineNo">233</span> LOG.debug("first rowkey: [" + Bytes.toString(rowKey) + "]");<a name="line.233"></a>
-<span class="sourceLineNo">234</span> }<a name="line.234"></a>
-<span class="sourceLineNo">235</span> InetSocketAddress initialIsa =<a name="line.235"></a>
-<span class="sourceLineNo">236</span> new InetSocketAddress(loc.getHostname(), loc.getPort());<a name="line.236"></a>
-<span class="sourceLineNo">237</span> if (initialIsa.isUnresolved()) {<a name="line.237"></a>
-<span class="sourceLineNo">238</span> if (LOG.isTraceEnabled()) {<a name="line.238"></a>
-<span class="sourceLineNo">239</span> LOG.trace("failed to resolve bind address: " + loc.getHostname() + ":"<a name="line.239"></a>
-<span class="sourceLineNo">240</span> + loc.getPort() + ", so use default writer");<a name="line.240"></a>
-<span class="sourceLineNo">241</span> }<a name="line.241"></a>
-<span class="sourceLineNo">242</span> wl = getNewWriter(family, conf, null);<a name="line.242"></a>
-<span class="sourceLineNo">243</span> } else {<a name="line.243"></a>
-<span class="sourceLineNo">244</span> if(LOG.isDebugEnabled()) {<a name="line.244"></a>
-<span class="sourceLineNo">245</span> LOG.debug("use favored nodes writer: " + initialIsa.getHostString());<a name="line.245"></a>
-<span class="sourceLineNo">246</span> }<a name="line.246"></a>
-<span class="sourceLineNo">247</span> wl = getNewWriter(family, conf, new InetSocketAddress[] { initialIsa });<a name="line.247"></a>
-<span class="sourceLineNo">248</span> }<a name="line.248"></a>
-<span class="sourceLineNo">249</span> }<a name="line.249"></a>
-<span class="sourceLineNo">250</span> } else {<a name="line.250"></a>
-<span class="sourceLineNo">251</span> wl = getNewWriter(family, conf, null);<a name="line.251"></a>
-<span class="sourceLineNo">252</span> }<a name="line.252"></a>
-<span class="sourceLineNo">253</span> }<a name="line.253"></a>
-<span class="sourceLineNo">254</span><a name="line.254"></a>
-<span class="sourceLineNo">255</span> // we now have the proper WAL writer. full steam ahead<a name="line.255"></a>
-<span class="sourceLineNo">256</span> kv.updateLatestStamp(this.now);<a name="line.256"></a>
-<span class="sourceLineNo">257</span> wl.writer.append(kv);<a name="line.257"></a>
-<span class="sourceLineNo">258</span> wl.written += length;<a name="line.258"></a>
-<span class="sourceLineNo">259</span><a name="line.259"></a>
-<span class="sourceLineNo">260</span> // Copy the row so we know when a row transition.<a name="line.260"></a>
-<span class="sourceLineNo">261</span> this.previousRow = rowKey;<a name="line.261"></a>
-<span class="sourceLineNo">262</span> }<a name="line.262"></a>
-<span class="sourceLineNo">263</span><a name="line.263"></a>
-<span class="sourceLineNo">264</span> private void rollWriters() throws IOException {<a name="line.264"></a>
-<span class="sourceLineNo">265</span> for (WriterLength wl : this.writers.values()) {<a name="line.265"></a>
-<span class="sourceLineNo">266</span> if (wl.writer != null) {<a name="line.266"></a>
-<span class="sourceLineNo">267</span> LOG.info("Writer=" + wl.writer.getPath() +<a name="line.267"></a>
-<span class="sourceLineNo">268</span> ((wl.written == 0)? "": ", wrote=" + wl.written));<a name="line.268"></a>
-<span class="sourceLineNo">269</span> close(wl.writer);<a name="line.269"></a>
-<span class="sourceLineNo">270</span> }<a name="line.270"></a>
-<span class="sourceLineNo">271</span> wl.writer = null;<a name="line.271"></a>
-<span class="sourceLineNo">272</span> wl.written = 0;<a name="line.272"></a>
-<span class="sourceLineNo">273</span> }<a name="line.273"></a>
-<span class="sourceLineNo">274</span> this.rollRequested = false;<a name="line.274"></a>
-<span class="sourceLineNo">275</span> }<a name="line.275"></a>
-<span class="sourceLineNo">276</span><a name="line.276"></a>
-<span class="sourceLineNo">277</span> /* Create a new StoreFile.Writer.<a name="line.277"></a>
-<span class="sourceLineNo">278</span> * @param family<a name="line.278"></a>
-<span class="sourceLineNo">279</span> * @return A WriterLength, containing a new StoreFile.Writer.<a name="line.279"></a>
-<span class="sourceLineNo">280</span> * @throws IOException<a name="line.280"></a>
-<span class="sourceLineNo">281</span> */<a name="line.281"></a>
-<span class="sourceLineNo">282</span> @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED",<a name="line.282"></a>
-<span class="sourceLineNo">283</span> justification="Not important")<a name="line.283"></a>
-<span class="sourceLineNo">284</span> private WriterLength getNewWriter(byte[] family, Configuration conf,<a name="line.284"></a>
-<span class="sourceLineNo">285</span> InetSocketAddress[] favoredNodes) throws IOException {<a name="line.285"></a>
-<span class="sourceLineNo">286</span> WriterLength wl = new WriterLength();<a name="line.286"></a>
-<span class="sourceLineNo">287</span> Path familydir = new Path(outputdir, Bytes.toString(family));<a name="line.287"></a>
-<span class="sourceLineNo">288</span> Algorithm compression = compressionMap.get(family);<a name="line.288"></a>
-<span class="sourceLineNo">289</span> compression = compression == null ? defaultCompression : compression;<a name="line.289"></a>
-<span class="sourceLineNo">290</span> BloomType bloomType = bloomTypeMap.get(family);<a name="line.290"></a>
-<span class="sourceLineNo">291</span> bloomType = bloomType == null ? BloomType.NONE : bloomType;<a name="line.291"></a>
-<span class="sourceLineNo">292</span> Integer blockSize = blockSizeMap.get(family);<a name="line.292"></a>
-<span class="sourceLineNo">293</span> blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize;<a name="line.293"></a>
-<span class="sourceLineNo">294</span> DataBlockEncoding encoding = overriddenEncoding;<a name="line.294"></a>
-<span class="sourceLineNo">295</span> encoding = encoding == null ? datablockEncodingMap.get(family) : encoding;<a name="line.295"></a>
-<span class="sourceLineNo">296</span> encoding = encoding == null ? DataBlockEncoding.NONE : encoding;<a name="line.296"></a>
-<span class="sourceLineNo">297</span> Configuration tempConf = new Configuration(conf);<a name="line.297"></a>
-<span class="sourceLineNo">298</span> tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);<a name="line.298"></a>
-<span class="sourceLineNo">299</span> HFileContextBuilder contextBuilder = new HFileContextBuilder()<a name="line.299"></a>
-<span class="sourceLineNo">300</span> .withCompression(compression)<a name="line.300"></a>
-<span class="sourceLineNo">301</span> .withChecksumType(HStore.getChecksumType(conf))<a name="line.301"></a>
-<span class="sourceLineNo">302</span> .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))<a name="line.302"></a>
-<span class="sourceLineNo">303</span> .withBlockSize(blockSize);<a name="line.303"></a>
-<span class="sourceLineNo">304</span><a name="line.304"></a>
-<span class="sourceLineNo">305</span> if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {<a name="line.305"></a>
-<span class="sourceLineNo">306</span> contextBuilder.withIncludesTags(true);<a name="line.306"></a>
+<span class="sourceLineNo">187</span> // These configs. are from hbase-*.xml<a name="line.187"></a>
+<span class="sourceLineNo">188</span> maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);<a name="line.188"></a>
+<span class="sourceLineNo">189</span><a name="line.189"></a>
+<span class="sourceLineNo">190</span> // Invented config. Add to hbase-*.xml if other than default compression.<a name="line.190"></a>
+<span class="sourceLineNo">191</span> String defaultCompressionStr = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());<a name="line.191"></a>
+<span class="sourceLineNo">192</span> defaultCompression = HFileWriterImpl.compressionByName(defaultCompressionStr);<a name="line.192"></a>
+<span class="sourceLineNo">193</span> compactionExclude =<a name="line.193"></a>
+<span class="sourceLineNo">194</span> conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", false);<a name="line.194"></a>
+<span class="sourceLineNo">195</span><a name="line.195"></a>
+<span class="sourceLineNo">196</span> // create a map from column family to the compression algorithm<a name="line.196"></a>
+<span class="sourceLineNo">197</span> compressionMap = createFamilyCompressionMap(conf);<a name="line.197"></a>
+<span class="sourceLineNo">198</span> bloomTypeMap = createFamilyBloomTypeMap(conf);<a name="line.198"></a>
+<span class="sourceLineNo">199</span> blockSizeMap = createFamilyBlockSizeMap(conf);<a name="line.199"></a>
+<span class="sourceLineNo">200</span><a name="line.200"></a>
+<span class="sourceLineNo">201</span> // Config for data block encoding<a name="line.201"></a>
+<span class="sourceLineNo">202</span> String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);<a name="line.202"></a>
+<span class="sourceLineNo">203</span> datablockEncodingMap = createFamilyDataBlockEncodingMap(conf);<a name="line.203"></a>
+<span class="sourceLineNo">204</span> if (dataBlockEncodingStr != null) {<a name="line.204"></a>
+<span class="sourceLineNo">205</span> overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);<a name="line.205"></a>
+<span class="sourceLineNo">206</span> } else {<a name="line.206"></a>
+<span class="sourceLineNo">207</span> overriddenEncoding = null;<a name="line.207"></a>
+<span class="sourceLineNo">208</span> }<a name="line.208"></a>
+<span class="sourceLineNo">209</span><a name="line.209"></a>
+<span class="sourceLineNo">210</span> writers = new TreeMap<byte[], WriterLength>(Bytes.BYTES_COMPARATOR);<a name="line.210"></a>
+<span class="sourceLineNo">211</span> previousRow = HConstants.EMPTY_BYTE_ARRAY;<a name="line.211"></a>
+<span class="sourceLineNo">212</span> now = Bytes.toBytes(EnvironmentEdgeManager.currentTime());<a name="line.212"></a>
+<span class="sourceLineNo">213</span> rollRequested = false;<a name="line.213"></a>
+<span class="sourceLineNo">214</span> }<a name="line.214"></a>
+<span class="sourceLineNo">215</span><a name="line.215"></a>
+<span class="sourceLineNo">216</span> @Override<a name="line.216"></a>
+<span class="sourceLineNo">217</span> public void write(ImmutableBytesWritable row, V cell) throws IOException {<a name="line.217"></a>
+<span class="sourceLineNo">218</span> KeyValue kv = KeyValueUtil.ensureKeyValue(cell);<a name="line.218"></a>
+<span class="sourceLineNo">219</span><a name="line.219"></a>
+<span class="sourceLineNo">220</span> // null input == user explicitly wants to flush<a name="line.220"></a>
+<span class="sourceLineNo">221</span> if (row == null && kv == null) {<a name="line.221"></a>
+<span class="sourceLineNo">222</span> rollWriters();<a name="line.222"></a>
+<span class="sourceLineNo">223</span> return;<a name="line.223"></a>
+<span class="sourceLineNo">224</span> }<a name="line.224"></a>
+<span class="sourceLineNo">225</span><a name="line.225"></a>
+<span class="sourceLineNo">226</span> byte[] rowKey = CellUtil.cloneRow(kv);<a name="line.226"></a>
+<span class="sourceLineNo">227</span> long length = kv.getLength();<a name="line.227"></a>
+<span class="sourceLineNo">228</span> byte[] family = CellUtil.cloneFamily(kv);<a name="line.228"></a>
+<span class="sourceLineNo">229</span> WriterLength wl = this.writers.get(family);<a name="line.229"></a>
+<span class="sourceLineNo">230</span><a name="line.230"></a>
+<span class="sourceLineNo">231</span> // If this is a new column family, verify that the directory exists<a name="line.231"></a>
+<span class="sourceLineNo">232</span> if (wl == null) {<a name="line.232"></a>
+<span class="sourceLineNo">233</span> fs.mkdirs(new Path(outputDir, Bytes.toString(family)));<a name="line.233"></a>
+<span class="sourceLineNo">234</span> }<a name="line.234"></a>
+<span class="sourceLineNo">235</span><a name="line.235"></a>
+<span class="sourceLineNo">236</span> // If any of the HFiles for the column families has reached<a name="line.236"></a>
+<span class="sourceLineNo">237</span> // maxsize, we need to roll all the writers<a name="line.237"></a>
+<span class="sourceLineNo">238</span> if (wl != null && wl.written + length >= maxsize) {<a name="line.238"></a>
+<span class="sourceLineNo">239</span> this.rollRequested = true;<a name="line.239"></a>
+<span class="sourceLineNo">240</span> }<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span> // This can only happen once a row is finished though<a name="line.242"></a>
+<span class="sourceLineNo">243</span> if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {<a name="line.243"></a>
+<span class="sourceLineNo">244</span> rollWriters();<a name="line.244"></a>
+<span class="sourceLineNo">245</span> }<a name="line.245"></a>
+<span class="sourceLineNo">246</span><a name="line.246"></a>
+<span class="sourceLineNo">247</span> // create a new WAL writer, if necessary<a name="line.247"></a>
+<span class="sourceLineNo">248</span> if (wl == null || wl.writer == null) {<a name="line.248"></a>
+<span class="sourceLineNo">249</span> if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {<a name="line.249"></a>
+<span class="sourceLineNo">250</span> HRegionLocation loc = null;<a name="line.250"></a>
+<span class="sourceLineNo">251</span> String tableName = conf.get(OUTPUT_TABLE_NAME_CONF_KEY);<a name="line.251"></a>
+<span class="sourceLineNo">252</span> if (tableName != null) {<a name="line.252"></a>
+<span class="sourceLineNo">253</span> try (Connection connection = ConnectionFactory.createConnection(conf);<a name="line.253"></a>
+<span class="sourceLineNo">254</span> RegionLocator locator = connection.getRegionLocator(TableName.valueOf(tableName))) {<a name="line.254"></a>
+<span class="sourceLineNo">255</span> loc = locator.getRegionLocation(rowKey);<a name="line.255"></a>
+<span class="sourceLineNo">256</span> } catch (Throwable e) {<a name="line.256"></a>
+<span class="sourceLineNo">257</span> LOG.warn("there's something wrong when locating rowkey: " + Bytes.toString(rowKey),<a name="line.257"></a>
+<span class="sourceLineNo">258</span> e);<a name="line.258"></a>
+<span class="sourceLineNo">259</span> loc = null;<a name="line.259"></a>
+<span class="sourceLineNo">260</span> }<a name="line.260"></a>
+<span class="sourceLineNo">261</span> }<a name="line.261"></a>
+<span class="sourceLineNo">262</span><a name="line.262"></a>
+<span class="sourceLineNo">263</span> if (null == loc) {<a name="line.263"></a>
+<span class="sourceLineNo">264</span> if (LOG.isTraceEnabled()) {<a name="line.264"></a>
+<span class="sourceLineNo">265</span> LOG.trace(<a name="line.265"></a>
+<span class="sourceLineNo">266</span> "failed to get region location, so use default writer: " + Bytes.toString(rowKey));<a name="line.266"></a>
+<span class="sourceLineNo">267</span> }<a name="line.267"></a>
+<span class="sourceLineNo">268</span> wl = getNewWriter(family, conf, null);<a name="line.268"></a>
+<span class="sourceLineNo">269</span> } else {<a name="line.269"></a>
+<span class="sourceLineNo">270</span> if (LOG.isDebugEnabled()) {<a name="line.270"></a>
+<span class="sourceLineNo">271</span> LOG.debug("first rowkey: [" + Bytes.toString(rowKey) + "]");<a name="line.271"></a>
+<span class="sourceLineNo">272</span> }<a name="line.272"></a>
+<span class="sourceLineNo">273</span> InetSocketAddress initialIsa = new InetSocketAddress(loc.getHostname(), loc.getPort());<a name="line.273"></a>
+<span class="sourceLineNo">274</span> if (initialIsa.isUnresolved()) {<a name="line.274"></a>
+<span class="sourceLineNo">275</span> if (LOG.isTraceEnabled()) {<a name="line.275"></a>
+<span class="sourceLineNo">276</span> LOG.trace("failed to resolve bind address: " + loc.getHostname() + ":"<a name="line.276"></a>
+<span class="sourceLineNo">277</span> + loc.getPort() + ", so use default writer");<a name="line.277"></a>
+<span class="sourceLineNo">278</span> }<a name="line.278"></a>
+<span class="sourceLineNo">279</span> wl = getNewWriter(family, conf, null);<a name="line.279"></a>
+<span class="sourceLineNo">280</span> } else {<a name="line.280"></a>
+<span class="sourceLineNo">281</span> if (LOG.isDebugEnabled()) {<a name="line.281"></a>
+<span class="sourceLineNo">282</span> LOG.debug("use favored nodes writer: " + initialIsa.getHostString());<a name="line.282"></a>
+<span class="sourceLineNo">283</span> }<a name="line.283"></a>
+<span class="sourceLineNo">284</span> wl = getNewWriter(family, conf, new InetSocketAddress[] { initialIsa });<a name="line.284"></a>
+<span class="sourceLineNo">285</span> }<a name="line.285"></a>
+<span class="sourceLineNo">286</span> }<a name="line.286"></a>
+<span class="sourceLineNo">287</span> } else {<a name="line.287"></a>
+<span class="sourceLineNo">288</span> wl = getNewWriter(family, conf, null);<a name="line.288"></a>
+<span class="sourceLineNo">289</span> }<a name="line.289"></a>
+<span class="sourceLineNo">290</span> }<a name="line.290"></a>
+<span class="sourceLineNo">291</span><a name="line.291"></a>
+<span class="sourceLineNo">292</span> // we now have the proper WAL writer. full steam ahead<a name="line.292"></a>
+<span class="sourceLineNo">293</span> kv.updateLatestStamp(this.now);<a name="line.293"></a>
+<span class="sourceLineNo">294</span> wl.writer.append(kv);<a name="line.294"></a>
+<span class="sourceLineNo">295</span> wl.written += length;<a name="line.295"></a>
+<span class="sourceLineNo">296</span><a name="line.296"></a>
+<span class="sourceLineNo">297</span> // Copy the row so we know when a row transition.<a name="line.297"></a>
+<span class="sourceLineNo">298</span> this.previousRow = rowKey;<a name="line.298"></a>
+<span class="sourceLineNo">299</span> }<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span> private void rollWriters() throws IOException {<a name="line.301"></a>
+<span class="sourceLineNo">302</span> for (WriterLength wl : this.writers.values()) {<a name="line.302"></a>
+<span class="sourceLineNo">303</span> if (wl.writer != null) {<a name="line.303"></a>
+<span class="sourceLineNo">304</span> LOG.info(<a name="line.304"></a>
+<span class="sourceLineNo">305</span> "Writer=" + wl.writer.getPath() + ((wl.written == 0) ? "" : ", wrote=" + wl.written));<a name="line.305"></a>
+<span class="sourceLineNo">306</span> close(wl.writer);<a name="line.306"></a>
<span class="sourceLineNo">307</span> }<a name="line.307"></a>
-<span class="sourceLineNo">308</span><a name="line.308"></a>
-<span class="sourceLineNo">309</span> contextBuilder.withDataBlockEncoding(encoding);<a name="line.309"></a>
-<span class="sourceLineNo">310</span> HFileContext hFileContext = contextBuilder.build();<a name="line.310"></a>
-<span class="sourceLineNo">311</span> <a name="line.311"></a>
-<span class="sourceLineNo">312</span> if (null == favoredNodes) {<a name="line.312"></a>
-<span class="sourceLineNo">313</span> wl.writer =<a name="line.313"></a>
-<span class="sourceLineNo">314</span> new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs)<a name="line.314"></a>
-<span class="sourceLineNo">315</span> .withOutputDir(familydir).withBloomType(bloomType)<a name="line.315"></a>
-<span class="sourceLineNo">316</span> .withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext).build();<a name="line.316"></a>
-<span class="sourceLineNo">317</span> } else {<a name="line.317"></a>
-<span class="sourceLineNo">318</span> wl.writer =<a name="line.318"></a>
-<span class="sourceLineNo">319</span> new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs))<a name="line.319"></a>
-<span class="sourceLineNo">320</span> .withOutputDir(familydir).withBloomType(bloomType)<a name="line.320"></a>
-<span class="sourceLineNo">321</span> .withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext)<a name="line.321"></a>
-<span class="sourceLineNo">322</span> .withFavoredNodes(favoredNodes).build();<a name="line.322"></a>
-<span class="sourceLineNo">323</span> }<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span> this.writers.put(family, wl);<a name="line.325"></a>
-<span class="sourceLineNo">326</span> return wl;<a name="line.326"></a>
-<span class="sourceLineNo">327</span> }<a name="line.327"></a>
-<span class="sourceLineNo">328</span><a name="line.328"></a>
-<span class="sourceLineNo">329</span> private void close(final StoreFileWriter w) throws IOException {<a name="line.329"></a>
-<span class="sourceLineNo">330</span> if (w != null) {<a name="line.330"></a>
-<span class="sourceLineNo">331</span> w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,<a name="line.331"></a>
-<span class="sourceLineNo">332</span> Bytes.toBytes(System.currentTimeMillis()));<a name="line.332"></a>
-<span class="sourceLineNo">333</span> w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,<a name="line.333"></a>
-<span class="sourceLineNo">334</span> Bytes.toBytes(context.getTaskAttemptID().toString()));<a name="line.334"></a>
-<span class="sourceLineNo">335</span> w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY,<a name="line.335"></a>
-<span class="sourceLineNo">336</span> Bytes.toBytes(true));<a name="line.336"></a>
-<span class="sourceLineNo">337</span> w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY,<a name="line.337"></a>
-<span class="sourceLineNo">338</span> Bytes.toBytes(compactionExclude));<a name="line.338"></a>
-<span class="sourceLineNo">339</span> w.appendTrackedTimestampsToMetadata();<a name="line.339"></a>
-<span class="sourceLineNo">340</span> w.close();<a name="line.340"></a>
-<span class="sourceLineNo">341</span> }<a name="line.341"></a>
-<span class="sourceLineNo">342</span> }<a name="line.342"></a>
-<span class="sourceLineNo">343</span><a name="line.343"></a>
-<span class="sourceLineNo">344</span> @Override<a name="line.344"></a>
-<span class="sourceLineNo">345</span> public void close(TaskAttemptContext c)<a name="line.345"></a>
-<span class="sourceLineNo">346</span> throws IOException, InterruptedException {<a name="line.346"></a>
-<span class="sourceLineNo">347</span> for (WriterLength wl: this.writers.values()) {<a name="line.347"></a>
-<span class="sourceLineNo">348</span> close(wl.writer);<a name="line.348"></a>
-<span class="sourceLineNo">349</span> }<a name="line.349"></a>
-<span class="sourceLineNo">350</span> }<a name="line.350"></a>
-<span class="sourceLineNo">351</span> };<a name="line.351"></a>
-<span class="sourceLineNo">352</span> }<a name="line.352"></a>
-<span class="sourceLineNo">353</span><a name="line.353"></a>
-<span class="sourceLineNo">354</span> /*<a name="line.354"></a>
-<span class="sourceLineNo">355</span> * Data structure to hold a Writer and amount of data written on it.<a name="line.355"></a>
-<span class="sourceLineNo">356</span> */<a name="line.356"></a>
-<span class="sourceLineNo">357</span> static class WriterLength {<a name="line.357"></a>
-<span class="sourceLineNo">358</span> long written = 0;<a name="line.358"></a>
-<span class="sourceLineNo">359</span> StoreFileWriter writer = null;<a name="line.359"></a>
-<span class="sourceLineNo">360</span> }<a name="line.360"></a>
-<span class="sourceLineNo">361</span><a name="line.361"></a>
-<span class="sourceLineNo">362</span> /**<a name="line.362"></a>
-<span class="sourceLineNo">363</span> * Return the start keys of all of the regions in this table,<a name="line.363"></a>
-<span class="sourceLineNo">364</span> * as a list of ImmutableBytesWritable.<a name="line.364"></a>
-<span class="sourceLineNo">365</span> */<a name="line.365"></a>
-<span class="sourceLineNo">366</span> private static List<ImmutableBytesWritable> getRegionStartKeys(RegionLocator table)<a name="line.366"></a>
-<span class="sourceLineNo">367</span> throws IOException {<a name="line.367"></a>
-<span class="sourceLineNo">368</span> byte[][] byteKeys = table.getStartKeys();<a name="line.368"></a>
-<span class="sourceLineNo">369</span> ArrayList<ImmutableBytesWritable> ret =<a name="line.369"></a>
-<span class="sourceLineNo">370</span> new ArrayList<ImmutableBytesWritable>(byteKeys.length);<a name="line.370"></a>
-<span class="sourceLineNo">371</span> for (byte[] byteKey : byteKeys) {<a name="line.371"></a>
-<span class="sourceLineNo">372</span> ret.add(new ImmutableBytesWritable(byteKey));<a name="line.372"></a>
-<span class="sourceLineNo">373</span> }<a name="line.373"></a>
-<span class="sourceLineNo">374</span> return ret;<a name="line.374"></a>
-<span class="sourceLineNo">375</span> }<a name="line.375"></a>
+<span class="sourceLineNo">308</span> wl.writer = null;<a name="line.308"></a>
+<span class="sourceLineNo">309</span> wl.written = 0;<a name="line.309"></a>
+<span class="sourceLineNo">310</span> }<a name="line.310"></a>
+<span class="sourceLineNo">311</span> this.rollRequested = false;<a name="line.311"></a>
+<span class="sourceLineNo">312</span> }<a name="line.312"></a>
+<span class="sourceLineNo">313</span><a name="line.313"></a>
+<span class="sourceLineNo">314</span> /*<a name="line.314"></a>
+<span class="sourceLineNo">315</span> * Create a new StoreFile.Writer.<a name="line.315"></a>
+<span class="sourceLineNo">316</span> * @param family<a name="line.316"></a>
+<span class="sourceLineNo">317</span> * @return A WriterLength, containing a new StoreFile.Writer.<a name="line.317"></a>
+<span class="sourceLineNo">318</span> * @throws IOException<a name="line.318"></a>
+<span class="sourceLineNo">319</span> */<a name="line.319"></a>
+<span class="sourceLineNo">320</span> @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "BX_UNBOXING_IMMEDIATELY_REBOXED",<a name="line.320"></a>
+<span class="sourceLineNo">321</span> justification = "Not important")<a name="line.321"></a>
+<span class="sourceLineNo">322</span> private WriterLength getNewWriter(byte[] family, Configuration conf,<a name="line.322"></a>
+<span class="sourceLineNo">323</span> InetSocketAddress[] favoredNodes) throws IOException {<a name="line.323"></a>
+<span class="sourceLineNo">324</span> WriterLength wl = new WriterLength();<a name="line.324"></a>
+<span class="sourceLineNo">325</span> Path familyDir = new Path(outputDir, Bytes.toString(family));<a name="line.325"></a>
+<span class="sourceLineNo">326</span> Algorithm compression = compressionMap.get(family);<a name="line.326"></a>
+<span class="sourceLineNo">327</span> compression = compression == null ? defaultCompression : compression;<a name="line.327"></a>
+<span class="sourceLineNo">328</span> BloomType bloomType = bloomTypeMap.get(family);<a name="line.328"></a>
+<span class="sourceLineNo">329</span> bloomType = bloomType == null ? BloomType.NONE : bloomType;<a name="line.329"></a>
+<span class="sourceLineNo">330</span> Integer blockSize = blockSizeMap.get(family);<a name="line.330"></a>
+<span class="sourceLineNo">331</span> blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize;<a name="line.331"></a>
+<span class="sourceLineNo">332</span> DataBlockEncoding encoding = overriddenEncoding;<a name="line.332"></a>
+<span class="sourceLineNo">333</span> encoding = encoding == null ? datablockEncodingMap.get(family) : encoding;<a name="line.333"></a>
+<span class="sourceLineNo">334</span> encoding = encoding == null ? DataBlockEncoding.NONE : encoding;<a name="line.334"></a>
+<span class="sourceLineNo">335</span> Configuration tempConf = new Configuration(conf);<a name="line.335"></a>
+<span class="sourceLineNo">336</span> tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);<a name="line.336"></a>
+<span class="sourceLineNo">337</span> HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression)<a name="line.337"></a>
+<span class="sourceLineNo">338</span> .withChecksumType(HStore.getChecksumType(conf))<a name="line.338"></a>
+<span class="sourceLineNo">339</span> .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blockSize);<a name="line.339"></a>
+<span class="sourceLineNo">340</span><a name="line.340"></a>
+<span class="sourceLineNo">341</span> if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {<a name="line.341"></a>
+<span class="sourceLineNo">342</span> contextBuilder.withIncludesTags(true);<a name="line.342"></a>
+<span class="sourceLineNo">343</span> }<a name="line.343"></a>
+<span class="sourceLineNo">344</span><a name="line.344"></a>
+<span class="sourceLineNo">345</span> contextBuilder.withDataBlockEncoding(encoding);<a name="line.345"></a>
+<span class="sourceLineNo">346</span> HFileContext hFileContext = contextBuilder.build();<a name="line.346"></a>
+<span class="sourceLineNo">347</span><a name="line.347"></a>
+<span class="sourceLineNo">348</span> if (null == favoredNodes) {<a name="line.348"></a>
+<span class="sourceLineNo">349</span> wl.writer = new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs)<a name="line.349"></a>
+<span class="sourceLineNo">350</span> .withOutputDir(familyDir).withBloomType(bloomType)<a name="line.350"></a>
+<span class="sourceLineNo">351</span> .withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext).build();<a name="line.351"></a>
+<span class="sourceLineNo">352</span> } else {<a name="line.352"></a>
+<span class="sourceLineNo">353</span> wl.writer =<a name="line.353"></a>
+<span class="sourceLineNo">354</span> new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs))<a name="line.354"></a>
+<span class="sourceLineNo">355</span> .withOutputDir(familyDir).withBloomType(bloomType)<a name="line.355"></a>
+<span class="sourceLineNo">356</span> .withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext)<a name="line.356"></a>
+<span class="sourceLineNo">357</span> .withFavoredNodes(favoredNodes).build();<a name="line.357"></a>
+<span class="sourceLineNo">358</span> }<a name="line.358"></a>
+<span class="sourceLineNo">359</span><a name="line.359"></a>
+<span class="sourceLineNo">360</span> this.writers.put(family, wl);<a name="line.360"></a>
+<span class="sourceLineNo">361</span> return wl;<a name="line.361"></a>
+<span class="sourceLineNo">362</span> }<a name="line.362"></a>
+<span class="sourceLineNo">363</span><a name="line.363"></a>
+<span class="sourceLineNo">364</span> private void close(final StoreFileWriter w) throws IOException {<a name="line.364"></a>
+<span class="sourceLineNo">365</span> if (w != null) {<a name="line.365"></a>
+<span class="sourceLineNo">366</span> w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime()));<a name="line.366"></a>
+<span class="sourceLineNo">367</span> w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,<a name="line.367"></a>
+<span class="sourceLineNo">368</span> Bytes.toBytes(context.getTaskAttemptID().toString()));<a name="line.368"></a>
+<span class="sourceLineNo">369</span> w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));<a name="line.369"></a>
+<span class="sourceLineNo">370</span> w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY,<a name="line.370"></a>
+<span class="sourceLineNo">371</span> Bytes.toBytes(compactionExclude));<a name="line.371"></a>
+<span class="sourceLineNo">372</span> w.appendTrackedTimestampsToMetadata();<a name="line.372"></a>
+<span class="sourceLineNo">373</span> w.close();<a name="line.373"></a>
+<span class="sourceLineNo">374</span> }<a name="line.374"></a>
+<span class="sourceLineNo">375</span> }<a name="line.375"></a>
<span class="sourceLineNo">376</span><a name="line.376"></a>
-<span class="sourceLineNo">377</span> /**<a name="line.377"></a>
-<span class="sourceLineNo">378</span> * Write out a {@link SequenceFile} that can be read by<a name="line.378"></a>
-<span class="sourceLineNo">379</span> * {@link TotalOrderPartitioner} that contains the split points in startKeys.<a name="line.379"></a>
-<span class="sourceLineNo">380</span> */<a name="line.380"></a>
-<span class="sourceLineNo">381</span> @SuppressWarnings("deprecation")<a name="line.381"></a>
-<span class="sourceLineNo">382</span> private static void writePartitions(Configuration conf, Path partitionsPath,<a name="line.382"></a>
-<span class="sourceLineNo">383</span> List<ImmutableBytesWritable> startKeys) throws IOException {<a name="line.383"></a>
-<span class="sourceLineNo">384</span> LOG.info("Writing partition information to " + partitionsPath);<a name="line.384"></a>
-<span class="sourceLineNo">385</span> if (startKeys.isEmpty()) {<a name="line.385"></a>
-<span class="sourceLineNo">386</span> throw new IllegalArgumentException("No regions passed");<a name="line.386"></a>
-<span class="sourceLineNo">387</span> }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span> // We're generating a list of split points, and we don't ever<a name="line.389"></a>
-<span class="sourceLineNo">390</span> // have keys < the first region (which has an empty start key)<a name="line.390"></a>
-<span class="sourceLineNo">391</span> // so we need to remove it. Otherwise we would end up with an<a name="line.391"></a>
-<span class="sourceLineNo">392</span> // empty reducer with index 0<a name="line.392"></a>
-<span class="sourceLineNo">393</span> TreeSet<ImmutableBytesWritable> sorted =<a name="line.393"></a>
-<span class="sourceLineNo">394</span> new TreeSet<ImmutableBytesWritable>(startKeys);<a name="line.394"></a>
-<span class="sourceLineNo">395</span><a name="line.395"></a>
-<span class="sourceLineNo">396</span> ImmutableBytesWritable first = sorted.first();<a name="line.396"></a>
-<span class="sourceLineNo">397</span> if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {<a name="line.397"></a>
-<span class="sourceLineNo">398</span> throw new IllegalArgumentException(<a name="line.398"></a>
-<span class="sourceLineNo">399</span> "First region of table should have empty start key. Instead has: "<a name="line.399"></a>
-<span class="sourceLineNo">400</span> + Bytes.toStringBinary(first.get()));<a name="line.400"></a>
-<span class="sourceLineNo">401</span> }<a name="line.401"></a>
-<span class="sourceLineNo">402</span> sorted.remove(first);<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span> // Write the actual file<a name="line.404"></a>
-<span class="sourceLineNo">405</span> FileSystem fs = partitionsPath.getFileSystem(conf);<a name="line.405"></a>
-<span class="sourceLineNo">406</span> SequenceFile.Writer writer = SequenceFile.createWriter(<a name="line.406"></a>
-<span class="sourceLineNo">407</span> fs, conf, partitionsPath, ImmutableBytesWritable.class,<a name="line.407"></a>
-<span class="sourceLineNo">408</span> NullWritable.class);<a name="line.408"></a>
-<span class="sourceLineNo">409</span><a name="line.409"></a>
-<span class="sourceLineNo">410</span> try {<a name="line.410"></a>
-<span class="sourceLineNo">411</span> for (ImmutableBytesWritable startKey : sorted) {<a name="line.411"></a>
-<span class="sourceLineNo">412</span> writer.append(startKey, NullWritable.get());<a name="line.412"></a>
-<span class="sourceLineNo">413</span> }<a name="line.413"></a>
-<span class="sourceLineNo">414</span> } finally {<a name="line.414"></a>
-<span class="sourceLineNo">415</span> writer.close();<a name="line.415"></a>
-<span class="sourceLineNo">416</span> }<a name="line.416"></a>
-<span class="sourceLineNo">417</span> }<a name="line.417"></a>
-<span class="sourceLineNo">418</span><a name="line.418"></a>
-<span class="sourceLineNo">419</span> /**<a name="line.419"></a>
-<span class="sourceLineNo">420</span> * Configure a MapReduce Job to perform an incremental load into the given<a name="line.420"></a>
-<span class="sourceLineNo">421</span> * table. This<a name="line.421"></a>
-<span class="sourceLineNo">422</span> * <ul><a name="line.422"></a>
-<span class="sourceLineNo">423</span> * <li>Inspects the table to configure a total order partitioner</li><a name="line.423"></a>
-<span class="sourceLineNo">424</span> * <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li><a name="line.424"></a>
-<span class="sourceLineNo">425</span> * <li>Sets the number of reduce tasks to match the current number of regions</li><a name="line.425"></a>
-<span class="sourceLineNo">426</span> * <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li><a name="line.426"></a>
-<span class="sourceLineNo">427</span> * <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or<a name="line.427"></a>
-<span class="sourceLineNo">428</span> * PutSortReducer)</li><a name="line.428"></a>
-<span class="sourceLineNo">429</span> * </ul><a name="line.429"></a>
-<span class="sourceLineNo">430</span> * The user should be sure to set the map output value class to either KeyValue or Put before<a name="line.430"></a>
-<span class="sourceLineNo">431</span> * running this function.<a name="line.431"></a>
-<span class="sourceLineNo">432</span> */<a name="line.432"></a>
-<span class="sourceLineNo">433</span> public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator)<a name="line.433"></a>
-<span class="sourceLineNo">434</span> throws IOException {<a name="line.434"></a>
-<span class="sourceLineNo">435</span> configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);<a name="line.435"></a>
-<span class="sourceLineNo">436</span> }<a name="line.436"></a>
-<span class="sourceLineNo">437</span><a name="line.437"></a>
-<span class="sourceLineNo">438</span> /**<a name="line.438"></a>
-<span class="sourceLineNo">439</span> * Configure a MapReduce Job to perform an incremental load into the given<a name="line.439"></a>
-<span class="sourceLineNo">440</span> * table. This<a name="line.440"></a>
-<span class="sourceLineNo">441</span> * <ul><a name="line.441"></a>
-<span class="sourceLineNo">442</span> * <li>Inspects the table to configure a total order partitioner</li><a name="line.442"></a>
-<span class="sourceLineNo">443</span> * <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li><a name="line.443"></a>
-<span class="sourceLineNo">444</span> * <li>Sets the number of reduce tasks to match the current number of regions</li><a name="line.444"></a>
-<span class="sourceLineNo">445</span> * <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li><a name="line.445"></a>
-<span class="sourceLineNo">446</span> * <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or<a name="line.446"></a>
-<span class="sourceLineNo">447</span> * PutSortReducer)</li><a name="line.447"></a>
-<span class="sourceLineNo">448</span> * </ul><a name="line.448"></a>
-<span class="sourceLineNo">449</span> * The user should be sure to set the map output value class to either KeyValue or Put before<a name="line.449"></a>
-<span class="sourceLineNo">450</span> * running this function.<a name="line.450"></a>
-<span class="sourceLineNo">451</span> */<a name="line.451"></a>
-<span class="sourceLineNo">452</span> public static void configureIncrementalLoad(Job job, HTableDescriptor tableDescriptor,<a name="line.452"></a>
-<span class="sourceLineNo">453</span> RegionLocator regionLocator) throws IOException {<a name="line.453"></a>
-<span class="sourceLineNo">454</span> configureIncrementalLoad(job, tableDescriptor, regionLocator, HFileOutputFormat2.class);<a name="line.454"></a>
-<span class="sourceLineNo">455</span> }<a name="line.455"></a>
-<span class="sourceLineNo">456</span><a name="line.456"></a>
-<span class="sourceLineNo">457</span> static void configureIncrementalLoad(Job job, HTableDescriptor tableDescriptor,<a name="line.457"></a>
-<span class="sourceLineNo">458</span> RegionLocator regionLocator, Class<? extends OutputFormat<?, ?>> cls) throws IOException,<a name="line.458"></a>
-<span class="sourceLineNo">459</span> UnsupportedEncodingException {<a name="line.459"></a>
-<span class="sourceLineNo">460</span> Configuration conf = job.getConfiguration();<a name="line.460"></a>
-<span class="sourceLineNo">461</span> job.setOutputKeyClass(ImmutableBytesWritable.class);<a name="line.461"></a>
-<span class="sourceLineNo">462</span> job.setOutputValueClass(KeyValue.class);<a name="line.462"></a>
-<span class="sourceLineNo">463</span> job.setOutputFormatClass(cls);<a name="line.463"></a>
-<span class="sourceLineNo">464</span><a name="line.464"></a>
-<span class="sourceLineNo">465</span> // Based on the configured map output class, set the correct reducer to properly<a name="line.465"></a>
-<span class="sourceLineNo">466</span> // sort the incoming values.<a name="line.466"></a>
-<span class="sourceLineNo">467</span> // TODO it would be nice to pick one or the other of these formats.<a name="line.467"></a>
-<span class="sourceLineNo">468</span> if (KeyValue.class.equals(job.getMapOutputValueClass())) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span> job.setReducerClass(KeyValueSortReducer.class);<a name="line.469"></a>
-<span class="sourceLineNo">470</span> } else if (Put.class.equals(job.getMapOutputValueClass())) {<a name="line.470"></a>
-<span class="sourceLineNo">471</span> job.setReducerClass(PutSortReducer.class);<a name="line.471"></a>
-<span class="sourceLineNo">472</span> } else if (Text.class.equals(job.getMapOutputValueClass())) {<a name="line.472"></a>
-<span class="sourceLineNo">473</span> job.setReducerClass(TextSortReducer.class);<a name="line.473"></a>
-<span class="sourceLineNo">474</span> } else {<a name="line.474"></a>
-<span class="sourceLineNo">475</span> LOG.warn("Unknown map output value type:" + job.getMapOutputValueClass());<a name="line.475"></a>
-<span class="sourceLineNo">476</span> }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span> conf.setStrings("io.serializations", conf.get("io.serializations"),<a name="line.478"></a>
-<span class="sourceLineNo">479</span> MutationSerialization.class.getName(), ResultSerialization.class.getName(),<a name="line.479"></a>
-<span class="sourceLineNo">480</span> KeyValueSerialization.class.getName());<a name="line.480"></a>
-<span class="sourceLineNo">481</span><a name="line.481"></a>
-<span class="sourceLineNo">482</span> if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {<a name="line.482"></a>
-<span class="sourceLineNo">483</span> // record this table name for creating writer by favored nodes<a name="line.483"></a>
-<span class="sourceLineNo">484</span> LOG.info("bulkload locality sensitive enabled");<a name="line.484"></a>
-<span class="sourceLineNo">485</span> conf.set(OUTPUT_TABLE_NAME_CONF_KEY, regionLocator.getName().getNameAsString());<a name="line.485"></a>
-<span class="sourceLineNo">486</span> }<a name="line.486"></a>
+<span class="sourceLineNo">377</span> @Override<a name="line.377"></a>
+<span class="sourceLineNo">378</span> public void close(TaskAttemptContext c) throws IOException, InterruptedException {<a name="line.378"></a>
+<span class="sourceLineNo">379</span> for (WriterLength wl : this.writers.values()) {<a name="line.379"></a>
+<span class="sourceLineNo">380</span> close(wl.writer);<a name="line.380"></a>
+<span class="sourceLineNo">381</span> }<a name="line.381"></a>
+<span class="sourceLineNo">382</span> }<a name="line.382"></a>
+<span class="sourceLineNo">383</span> }<a name="line.383"></a>
+<span class="sourceLineNo">384</span><a name="line.384"></a>
+<span class="sourceLineNo">385</span> /*<a name="line.385"></a>
+<span class="sourceLineNo">386</span> * Data structure to hold a Writer and amount of data written on it.<a name="line.386"></a>
+<span class="sourceLineNo">387</span> */<a name="line.387"></a>
+<span class="sourceLineNo">388</span> static class WriterLength {<a name="line.388"></a>
+<span class="sourceLineNo">389</span> long written = 0;<a name="line.389"></a>
+<span class="sourceLineNo">390</span> StoreFileWriter writer = null;<a name="line.390"></a>
+<span class="sourceLineNo">391</span> }<a name="line.391"></a>
+<span class="sourceLineNo">392</span><a name="line.392"></a>
+<span class="sourceLineNo">393</span> /**<a name="line.393"></a>
+<span class="sourceLineNo">394</span> * Return the start keys of all of the regions in this table,<a name="line.394"></a>
+<span class="sourceLineNo">395</span> * as a list of ImmutableBytesWritable.<a name="line.395"></a>
+<span class="sourceLineNo">396</span> */<a name="line.396"></a>
+<span class="sourceLineNo">397</span> private static List<ImmutableBytesWritable> getRegionStartKeys(RegionLocator table)<a name="line.397"></a>
+<span class="sourceLineNo">398</span> throws IOException {<a name="line.398"></a>
+<span class="sourceLineNo">399</span> byte[][] byteKeys = table.getStartKeys();<a name="line.399"></a>
+<span class="sourceLineNo">400</span> ArrayList<ImmutableBytesWritable> ret =<a name="line.400"></a>
+<span class="sourceLineNo">401</span> new ArrayList<ImmutableBytesWritable>(byteKeys.length);<a name="line.401"></a>
+<span class="sourceLineNo">402</span> for (byte[] byteKey : byteKeys) {<a name="line.402"></a>
+<span class="sourceLineNo">403</span> ret.add(new ImmutableBytesWritable(byteKey));<a name="line.403"></a>
+<span class="sourceLineNo">404</span> }<a name="line.404"></a>
+<span class="sourceLineNo">405</span> return ret;<a name="line.405"></a>
+<span class="sourceLineNo">406</span> }<a name="line.406"></a>
+<span class="sourceLineNo">407</span><a name="line.407"></a>
+<span class="sourceLineNo">408</span> /**<a name="line.408"></a>
+<span class="sourceLineNo">409</span> * Write out a {@link SequenceFile} that can be read by<a name="line.409"></a>
+<span class="sourceLineNo">410</span> * {@link TotalOrderPartitioner} that contains the split points in startKeys.<a name="line.410"></a>
+<span class="sourceLineNo">411</span> */<a name="line.411"></a>
+<span class="sourceLineNo">412</span> @SuppressWarnings("deprecation")<a name="line.412"></a>
+<span class="sourceLineNo">413</span> private static void writePartitions(Configuration conf, Path partitionsPath,<a name="line.413"></a>
+<span class="sourceLineNo">414</span> List<ImmutableBytesWritable> startKeys) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span> LOG.info("Writing partition information to " + partitionsPath);<a name="line.415"></a>
+<span class="sourceLineNo">416</span> if (startKeys.isEmpty()) {<a name="line.416"></a>
+<span class="sourceLineNo">417</span> throw new IllegalArgumentException("No regions passed");<a name="line.417"></a>
+<span class="sourceLineNo">418</span> }<a name="line.418"></a>
+<span class="sourceLineNo">419</span><a name="line.419"></a>
+<span class="sourceLineNo">420</span> // We're generating a list of split points, and we don't ever<a name="line.420"></a>
+<span class="sourceLineNo">421</span> // have keys < the first region (which has an empty start key)<a name="line.421"></a>
+<span class="sourceLineNo">422</span> // so we need to remove it. Otherwise we would end up with an<a name="line.422"></a>
+<span class="sourceLineNo">423</span> // empty reducer with index 0<a name="line.423"></a>
+<span class="sourceLineNo">424</span> TreeSet<ImmutableBytesWritable> sorted =<a name="line.424"></a>
+<span class="sourceLineNo">425</span> new TreeSet<ImmutableBytesWritable>(startKeys);<a name="line.425"></a>
+<span class="sourceLineNo">426</span><a name="line.426"></a>
+<span class="sourceLineNo">427</span> ImmutableBytesWritable first = sorted.first();<a name="line.427"></a>
+<span class="sourceLineNo">428</span> if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {<a name="line.428"></a>
+<span class="sourceLineNo">429</span> throw new IllegalArgumentException(<a name="line.429"></a>
+<span class="sourceLineNo">430</span> "First region of table should have empty start key. Instead has: "<a name="line.430"></a>
+<span class="sourceLineNo">431</span> + Bytes.toStringBinary(first.get()));<a name="line.431"></a>
+<span class="sourceLineNo">432</span> }<a name="line.432"></a>
+<span class="sourceLineNo">433</span> sorted.remove(first);<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span> // Write the actual file<a name="line.435"></a>
+<span class="sourceLineNo">436</span> FileSystem fs = partitionsPath.getFileSystem(conf);<a name="line.436"></a>
+<span class="sourceLineNo">437</span> SequenceFile.Writer writer = SequenceFile.createWriter(<a name="line.437"></a>
+<span class="sourceLineNo">438</span> fs, conf, partitionsPath, ImmutableBytesWritable.class,<a name="line.438"></a>
+<span class="sourceLineNo">439</span> NullWritable.class);<a name="line.439"></a>
+<span class="sourceLineNo">440</span><a name="line.440"></a>
+<span class="sourceLineNo">441</span> try {<a name="line.441"></a>
+<span class="sourceLineNo">442</span> for (ImmutableBytesWritable startKey : sorted) {<a name="line.442"></a>
+<span class="sourceLineNo">443</span> writer.append(startKey, NullWritable.get());<a name="line.443"></a>
+<span class="sourceLineNo">444</span> }<a name="line.444"></a>
+<span class="sourceLineNo">445</span> } finally {<a name="line.445"></a>
+<span class="sourceLineNo">446</span> writer.close();<a name="line.446"></a>
+<span class="sourceLineNo">447</span> }<a name="line.447"></a>
+<span class="sourceLineNo">448</span> }<a name="line.448"></a>
+<span class="sourceLineNo">449</span><a name="line.449"></a>
+<span class="sourceLineNo">450</span> /**<a name="line.450"></a>
+<span class="sourceLineNo">451</span> * Configure a MapReduce Job to perform an incremental load into the given<a name="line.451"></a>
+<span class="sourceLineNo">452</span> * table. This<a name="line.452"></a>
+<span class="sourceLineNo">453</span> * <ul><a name="line.453"></a>
+<span class="sourceLineNo">454</span> * <li>Inspects the table to configure a total order partitioner</li><a name="line.454"></a>
+<span class="sourceLineNo">455</span> * <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li><a name="line.455"></a>
+<span class="sourceLineNo">456</span> * <li>Sets the number of reduce tasks to match the current number of regions</li><a name="line.456"></a>
+<span class="sourceLineNo">457</span> * <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li><a name="line.457"></a>
+<span class="sourceLineNo">458</span> * <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or<a name="line.458"></a>
+<span class="sourceLineNo">459</span> * PutSortReducer)</li><a name="line.459"></a>
+<span class="sourceLineNo">460</span> * </ul><a name="line.460"></a>
+<span class="sourceLineNo">461</span> * The user should be sure to set the map output value class to either KeyValue or Put before<a name="line.461"></a>
+<span class="sourceLineNo">462</span> * running this function.<a name="line.462"></a>
+<span class="sourceLineNo">463</span> */<a name="line.463"></a>
+<span class="sourceLineNo">464</span> public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator)<a name="line.464"></a>
+<span class="sourceLineNo">465</span> throws IOException {<a name="line.465"></a>
+<span class="sourceLineNo">466</span> configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);<a name="line.466"></a>
+<span class="sourceLineNo">467</span> }<a name="line.467"></a>
+<span class="sourceLineNo">468</span><a name="line.468"></a>
+<span class="sourceLineNo">469</span> /**<a name="line.469"></a>
+<span class="sourceLineNo">470</span> * Configure a MapReduce Job to perform an incremental load into the given<a name="line.470"></a>
+<span class="sourceLineNo">471</span> * table. This<a name="line.471"></a>
+<span class="sourceLineNo">472</span> * <ul><a name="line.472"></a>
+<span class="sourceLineNo">473</span> * <li>Inspects the table to configure a total order partitioner</li><a name="line.473"></a>
+<span class="sourceLineNo">474</span> * <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li><a name="line.474"></a>
+<span class="sourceLineNo">475</span> * <li>Sets the number of reduce tasks to match the current number of regions</li><a name="line.475"></a>
+<span class="sourceLineNo">476</span> * <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li><a name="line.476"></a>
+<span class="sourceLineNo">477</span> * <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or<a name="line.477"></a>
+<span class="sourceLineNo">478</span> * PutSortReducer)</li><a name="line.478"></a>
+<span class="sourceLineNo">479</span> * </ul><a name="line.479"></a>
+<span class="sourceLineNo">480</span> * The user should be sure to set the map output value class to either KeyValue or Put before<a name="line.480"></a>
+<span class="sourceLineNo">481</span> * running this function.<a name="line.481"></a>
+<span class="sourceLineNo">482</span> */<a name="line.482"></a>
+<span class="sourceLineNo">483</span> public static void configureIncrementalLoad(Job job, HTableDescriptor tableDescriptor,<a name="line.483"></a>
+<span class="sourceLineNo">484</span> RegionLocator regionLocator) throws IOException {<a name="line.484"></a>
+<span class="sourceLineNo">485</span> configureIncrementalLoad(job, tableDescriptor, regionLocator, HFileOutputFormat2.class);<a name="line.485"></a>
+<span class="sourceLineNo">486</span> }<a name="line.486"></a>
<span class="sourceLineNo">487</span><a name="line.487"></a>
-<span class="sourceLineNo">488</span> // Use table's region boundaries for TOP split points.<a name="line.488"></a>
-<span class="sourceLineNo">489</span> LOG.info("Looking up current regions for table " + regionLocator.getName());<a name="line.489"></a>
-<span class="sourceLineNo">490</span> List<ImmutableBytesWritable> startKeys = getRegionStartKeys(regionLocator);<a name="line.490"></a>
-<span class="sourceLineNo">491</span> LOG.info("Configuring " + startKeys.size() + " reduce partitions " +<a name="line.491"></a>
-<span class="sourceLineNo">492</span> "to match current region count");<a name="line.492"></a>
-<span class="sourceLineNo">493</span> job.setNumReduceTasks(startKeys.size());<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span> configurePartitioner(job, startKeys);<a name="line.495"></a>
-<span class="sourceLineNo">496</span> // Set compression algorithms based on column families<a name="line.496"></a>
-<span class="sourceLineNo">497</span> configureCompression(conf, tableDescriptor);<a name="line.497"></a>
-<span class="sourceLineNo">498</span> configureBloomType(tableDescriptor, conf);<a name="line.498"></a>
-<span class="sourceLineNo">499</span> configureBlockSize(tableDescriptor, conf);<a name="line.499"></a>
-<span class="sourceLineNo">500</span> configureDataBlockEncoding(tableDescriptor, conf);<a name="line.500"></a>
-<span class="sourceLineNo">501</span><a name="line.501"></a>
-<span class="sourceLineNo">502</span> TableMapReduceUtil.addDependencyJars(job);<a name="line.502"></a>
-<span class="sourceLineNo">503</span> TableMapReduceUtil.initCredentials(job);<a name="line.503"></a>
-<span class="sourceLineNo">504</span>
<TRUNCATED>