You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@parquet.apache.org by ju...@apache.org on 2016/10/26 16:09:55 UTC

[1/4] parquet-mr git commit: PARQUET-423: Replace old Log class with SLF4J Logging

Repository: parquet-mr
Updated Branches:
  refs/heads/master aa416b5e2 -> df9d8e415


http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestParquetToThriftReadWriteAndProjection.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestParquetToThriftReadWriteAndProjection.java b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestParquetToThriftReadWriteAndProjection.java
index 4869460..eaef499 100644
--- a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestParquetToThriftReadWriteAndProjection.java
+++ b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestParquetToThriftReadWriteAndProjection.java
@@ -38,10 +38,11 @@ import org.apache.thrift.protocol.TProtocol;
 import org.apache.thrift.protocol.TProtocolFactory;
 import org.apache.thrift.transport.TIOStreamTransport;
 import org.junit.Test;
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.api.ReadSupport;
 import org.apache.parquet.hadoop.util.ContextUtil;
 import org.apache.parquet.thrift.test.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayOutputStream;
 import java.util.*;
@@ -50,7 +51,7 @@ import static org.junit.Assert.assertEquals;
 
 public class TestParquetToThriftReadWriteAndProjection {
 
-  private static final Log LOG = Log.getLog(TestParquetToThriftReadWriteAndProjection.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestParquetToThriftReadWriteAndProjection.class);
 
   @Test
   public void testThriftOptionalFieldsWithReadProjectionUsingParquetSchema() throws Exception {
@@ -374,7 +375,7 @@ public class TestParquetToThriftReadWriteAndProjection {
       reader.initialize(split, taskAttemptContext);
       if (reader.nextKeyValue()) {
         readValue = reader.getCurrentValue();
-        LOG.info(readValue);
+        LOG.info("{}", readValue);
       }
     }
     assertEquals(exptectedReadResult, readValue);

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java
index 2407e61..0439686 100644
--- a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java
+++ b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java
@@ -50,7 +50,6 @@ import org.apache.thrift.protocol.TProtocolFactory;
 import org.apache.thrift.transport.TIOStreamTransport;
 import org.junit.Test;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.example.data.Group;
 import org.apache.parquet.hadoop.ParquetFileReader;
 import org.apache.parquet.hadoop.ParquetReader;
@@ -65,10 +64,11 @@ import com.twitter.elephantbird.thrift.test.TestListInMap;
 import com.twitter.elephantbird.thrift.test.TestMapInList;
 
 import org.apache.parquet.schema.MessageType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestThriftToParquetFileWriter {
-  private static final Log LOG = Log
-      .getLog(TestThriftToParquetFileWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestThriftToParquetFileWriter.class);
 
   @Test
   public void testWriteFile() throws IOException, InterruptedException, TException {
@@ -270,7 +270,7 @@ public class TestThriftToParquetFileWriter {
 
   private <T extends TBase<?,?>> Path createFile(T... tObjs) throws IOException, InterruptedException, TException  {
     final Path fileToCreate = new Path("target/test/TestThriftToParquetFileWriter/"+tObjs[0].getClass()+".parquet");
-    LOG.info("File created: " + fileToCreate.toString());
+    LOG.info("File created: {}", fileToCreate.toString());
     Configuration conf = new Configuration();
     final FileSystem fs = fileToCreate.getFileSystem(conf);
     if (fs.exists(fileToCreate)) {

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetReadProtocol.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetReadProtocol.java b/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetReadProtocol.java
index 97e0054..b713058 100644
--- a/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetReadProtocol.java
+++ b/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetReadProtocol.java
@@ -32,13 +32,14 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.parquet.column.ParquetProperties;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import thrift.test.OneOfEach;
 
 import org.apache.thrift.TBase;
 import org.apache.thrift.TException;
 import org.junit.Test;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.impl.ColumnWriteStoreV1;
 import org.apache.parquet.column.page.mem.MemPageStore;
 import org.apache.parquet.io.ColumnIOFactory;
@@ -61,7 +62,7 @@ import com.twitter.elephantbird.thrift.test.TestPhoneType;
 import com.twitter.elephantbird.thrift.test.TestStructInMap;
 
 public class TestParquetReadProtocol {
-  private static final Log LOG = Log.getLog(TestParquetReadProtocol.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestParquetReadProtocol.class);
 
   @Test
   public void testList() throws TException {
@@ -146,7 +147,7 @@ public class TestParquetReadProtocol {
     final MemPageStore memPageStore = new MemPageStore(1);
     final ThriftSchemaConverter schemaConverter = new ThriftSchemaConverter();
     final MessageType schema = schemaConverter.convert(thriftClass);
-    LOG.info(schema);
+    LOG.info("{}", schema);
     final MessageColumnIO columnIO = new ColumnIOFactory(true).getColumnIO(schema);
     final ColumnWriteStoreV1 columns = new ColumnWriteStoreV1(memPageStore,
         ParquetProperties.builder()

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetWriteProtocol.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetWriteProtocol.java b/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetWriteProtocol.java
index 320d3a3..df60766 100644
--- a/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetWriteProtocol.java
+++ b/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestParquetWriteProtocol.java
@@ -31,6 +31,8 @@ import java.util.Set;
 import java.util.TreeMap;
 
 import org.junit.ComparisonFailure;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import thrift.test.OneOfEach;
 
 import org.apache.pig.data.Tuple;
@@ -39,7 +41,6 @@ import org.apache.thrift.TBase;
 import org.apache.thrift.TException;
 import org.junit.Test;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.io.ColumnIOFactory;
 import org.apache.parquet.io.ExpectationValidatingRecordConsumer;
 import org.apache.parquet.io.MessageColumnIO;
@@ -67,7 +68,7 @@ import com.twitter.elephantbird.thrift.test.TestStructInMap;
 
 
 public class TestParquetWriteProtocol {
-  private static final Log LOG = Log.getLog(TestParquetWriteProtocol.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestParquetWriteProtocol.class);
   @Test
   public void testMap() throws Exception {
     String[] expectations = {
@@ -523,7 +524,7 @@ public class TestParquetWriteProtocol {
 //      System.out.println(a);
     final Class<TBase<?,?>> class1 = (Class<TBase<?,?>>)a.getClass();
     final MessageType schema = thriftSchemaConverter.convert(class1);
-    LOG.info(schema);
+    LOG.info("{}", schema);
     final StructType structType = thriftSchemaConverter.toStructType(class1);
     ExpectationValidatingRecordConsumer recordConsumer = new ExpectationValidatingRecordConsumer(new ArrayDeque<String>(Arrays.asList(expectations)));
     final MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema);
@@ -535,14 +536,14 @@ public class TestParquetWriteProtocol {
     ThriftToPig<TBase<?,?>> thriftToPig = new ThriftToPig(a.getClass());
     ExpectationValidatingRecordConsumer recordConsumer = new ExpectationValidatingRecordConsumer(new ArrayDeque<String>(Arrays.asList(expectations)));
     Schema pigSchema = thriftToPig.toSchema();
-    LOG.info(pigSchema);
+    LOG.info("{}", pigSchema);
     MessageType schema = new PigSchemaConverter().convert(pigSchema);
-    LOG.info(schema);
+    LOG.info("{}", schema);
     TupleWriteSupport tupleWriteSupport = new TupleWriteSupport(pigSchema);
     tupleWriteSupport.init(null);
     tupleWriteSupport.prepareForWrite(recordConsumer);
     final Tuple pigTuple = thriftToPig.getPigTuple(a);
-    LOG.info(pigTuple);
+    LOG.info("{}", pigTuple);
     tupleWriteSupport.write(pigTuple);
     return schema;
   }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-tools/src/main/java/org/apache/parquet/tools/command/MergeCommand.java
----------------------------------------------------------------------
diff --git a/parquet-tools/src/main/java/org/apache/parquet/tools/command/MergeCommand.java b/parquet-tools/src/main/java/org/apache/parquet/tools/command/MergeCommand.java
index e6d9747..73e9b44 100644
--- a/parquet-tools/src/main/java/org/apache/parquet/tools/command/MergeCommand.java
+++ b/parquet-tools/src/main/java/org/apache/parquet/tools/command/MergeCommand.java
@@ -24,10 +24,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.parquet.hadoop.util.HiddenFileFilter;
-import org.slf4j.Logger;
 import org.apache.parquet.hadoop.ParquetFileWriter;
 import org.apache.parquet.hadoop.metadata.FileMetaData;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 463d40c..d50c132 100644
--- a/pom.xml
+++ b/pom.xml
@@ -168,7 +168,6 @@
           <aggregate>true</aggregate>
           <instrumentation>
             <ignores>
-              <ignore>org.apache.parquet.Log.*</ignore>
               <ignore>java.lang.UnsupportedOperationException.*</ignore>
             </ignores>
             <excludes>


[2/4] parquet-mr git commit: PARQUET-423: Replace old Log class with SLF4J Logging

Posted by ju...@apache.org.
http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputFormat.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputFormat.java
index 1fe57f9..7c5b5be 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputFormat.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetInputFormat.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
-import org.apache.parquet.Log;
 import org.apache.parquet.Preconditions;
 import org.apache.parquet.filter.UnboundRecordFilter;
 import org.apache.parquet.filter2.compat.FilterCompat;
@@ -72,6 +71,8 @@ import org.apache.parquet.hadoop.util.SerializationUtil;
 import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.MessageTypeParser;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The input format to read a Parquet file.
@@ -93,7 +94,7 @@ import org.apache.parquet.schema.MessageTypeParser;
  */
 public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
 
-  private static final Log LOG = Log.getLog(ParquetInputFormat.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetInputFormat.class);
 
   /**
    * key to configure the ReadSupport implementation
@@ -382,7 +383,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
         result.add(file);
       }
     }
-    LOG.info("Total input paths to process : " + result.size());
+    LOG.info("Total input paths to process : {}", result.size());
     return result;
   }
 
@@ -424,7 +425,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
       FileStatusWrapper statusWrapper = new FileStatusWrapper(status);
       FootersCacheValue cacheEntry =
               footersCache.getCurrentValue(statusWrapper);
-      if (Log.DEBUG) {
+      if (LOG.isDebugEnabled()) {
         LOG.debug("Cache entry " + (cacheEntry == null ? "not " : "")
                 + " found for '" + status.getPath() + "'");
       }
@@ -436,10 +437,8 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
         missingStatusesMap.put(status.getPath(), statusWrapper);
       }
     }
-    if (Log.DEBUG) {
-      LOG.debug("found " + footersMap.size() + " footers in cache and adding up "
-              + "to " + missingStatuses.size() + " missing footers to the cache");
-    }
+    LOG.debug("found {} footers in cache and adding up to {} missing footers to the cache",
+            footersMap.size() ,missingStatuses.size());
 
     if (!missingStatuses.isEmpty()) {
       List<Footer> newFooters = getFooters(config, missingStatuses);
@@ -480,7 +479,7 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
    * @throws IOException
    */
   public List<Footer> getFooters(Configuration configuration, Collection<FileStatus> statuses) throws IOException {
-    if (Log.DEBUG) LOG.debug("reading " + statuses.size() + " files");
+    LOG.debug("reading {} files", statuses.size());
     boolean taskSideMetaData = isTaskSideMetaData(configuration);
     return ParquetFileReader.readAllFootersInParallelUsingSummaryFiles(configuration, statuses, taskSideMetaData);
   }
@@ -513,10 +512,9 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
     public boolean isCurrent(FileStatusWrapper key) {
       long currentModTime = key.getModificationTime();
       boolean isCurrent = modificationTime >= currentModTime;
-      if (Log.DEBUG && !isCurrent) {
-        LOG.debug("The cache value for '" + key + "' is not current: "
-                + "cached modification time=" + modificationTime + ", "
-                + "current modification time: " + currentModTime);
+      if (LOG.isDebugEnabled() && !isCurrent) {
+        LOG.debug("The cache value for '{}' is not current: cached modification time={}, current modification time: {}",
+                key, modificationTime, currentModTime);
       }
       return isCurrent;
     }
@@ -689,7 +687,7 @@ class ClientSideMetadataSplitStrategy {
     }
   }
 
-  private static final Log LOG = Log.getLog(ClientSideMetadataSplitStrategy.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ClientSideMetadataSplitStrategy.class);
 
   List<ParquetInputSplit> getSplits(Configuration configuration, List<Footer> footers,
       long maxSplitSize, long minSplitSize, ReadContext readContext)
@@ -702,7 +700,7 @@ class ClientSideMetadataSplitStrategy {
 
     for (Footer footer : footers) {
       final Path file = footer.getFile();
-      LOG.debug(file);
+      LOG.debug("{}", file);
       FileSystem fs = file.getFileSystem(configuration);
       FileStatus fileStatus = fs.getFileStatus(file);
       ParquetMetadata parquetMetaData = footer.getParquetMetadata();
@@ -733,7 +731,7 @@ class ClientSideMetadataSplitStrategy {
 
     if (rowGroupsDropped > 0 && totalRowGroups > 0) {
       int percentDropped = (int) ((((double) rowGroupsDropped) / totalRowGroups) * 100);
-      LOG.info("Dropping " + rowGroupsDropped + " row groups that do not pass filter predicate! (" + percentDropped + "%)");
+      LOG.info("Dropping {} row groups that do not pass filter predicate! ({}%)", rowGroupsDropped, percentDropped);
     } else {
       LOG.info("There were no row groups that could be dropped due to filter predicates");
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputCommitter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputCommitter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputCommitter.java
index 45455ef..facb978 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputCommitter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputCommitter.java
@@ -29,12 +29,13 @@ import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.ParquetOutputFormat.JobSummaryLevel;
 import org.apache.parquet.hadoop.util.ContextUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ParquetOutputCommitter extends FileOutputCommitter {
-  private static final Log LOG = Log.getLog(ParquetOutputCommitter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetOutputCommitter.class);
 
   private final Path outputPath;
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java
index d05d41f..bd20360 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.hadoop;
 
-import static org.apache.parquet.Log.INFO;
 import static org.apache.parquet.Preconditions.checkNotNull;
 import static org.apache.parquet.hadoop.ParquetWriter.DEFAULT_BLOCK_SIZE;
 import static org.apache.parquet.hadoop.util.ContextUtil.getConfiguration;
@@ -35,7 +34,6 @@ import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ParquetProperties;
 import org.apache.parquet.column.ParquetProperties.WriterVersion;
 import org.apache.parquet.hadoop.ParquetFileWriter.Mode;
@@ -44,6 +42,8 @@ import org.apache.parquet.hadoop.api.WriteSupport.WriteContext;
 import org.apache.parquet.hadoop.codec.CodecConfig;
 import org.apache.parquet.hadoop.metadata.CompressionCodecName;
 import org.apache.parquet.hadoop.util.ConfigurationUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * OutputFormat to write to a Parquet file
@@ -101,7 +101,7 @@ import org.apache.parquet.hadoop.util.ConfigurationUtil;
  * @param <T> the type of the materialized records
  */
 public class ParquetOutputFormat<T> extends FileOutputFormat<Void, T> {
-  private static final Log LOG = Log.getLog(ParquetOutputFormat.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetOutputFormat.class);
 
   public static enum JobSummaryLevel {
     /**
@@ -373,17 +373,17 @@ public class ParquetOutputFormat<T> extends FileOutputFormat<Void, T> {
     int maxPaddingSize = getMaxPaddingSize(conf);
     boolean validating = getValidation(conf);
 
-    if (INFO) {
-      LOG.info("Parquet block size to " + blockSize);
-      LOG.info("Parquet page size to " + props.getPageSizeThreshold());
-      LOG.info("Parquet dictionary page size to " + props.getDictionaryPageSizeThreshold());
-      LOG.info("Dictionary is " + (props.isEnableDictionary() ? "on" : "off"));
-      LOG.info("Validation is " + (validating ? "on" : "off"));
-      LOG.info("Writer version is: " + props.getWriterVersion());
-      LOG.info("Maximum row group padding size is " + maxPaddingSize + " bytes");
-      LOG.info("Page size checking is: " + (props.estimateNextSizeCheck() ? "estimated" : "constant"));
-      LOG.info("Min row count for page size check is: " + props.getMinRowCountForPageSizeCheck());
-      LOG.info("Max row count for page size check is: " + props.getMaxRowCountForPageSizeCheck());
+    if (LOG.isInfoEnabled()) {
+      LOG.info("Parquet block size to {}", blockSize);
+      LOG.info("Parquet page size to {}", props.getPageSizeThreshold());
+      LOG.info("Parquet dictionary page size to {}", props.getDictionaryPageSizeThreshold());
+      LOG.info("Dictionary is {}", (props.isEnableDictionary() ? "on" : "off"));
+      LOG.info("Validation is {}", (validating ? "on" : "off"));
+      LOG.info("Writer version is: {}", props.getWriterVersion());
+      LOG.info("Maximum row group padding size is {} bytes", maxPaddingSize);
+      LOG.info("Page size checking is: {}", (props.estimateNextSizeCheck() ? "estimated" : "constant"));
+      LOG.info("Min row count for page size check is: {}", props.getMinRowCountForPageSizeCheck());
+      LOG.info("Max row count for page size check is: {}", props.getMaxRowCountForPageSizeCheck());
     }
 
     WriteContext init = writeSupport.init(conf);

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordReader.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordReader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordReader.java
index eae3b4e..f2f656d 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordReader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetRecordReader.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.mapreduce.TaskInputOutputContext;
 
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 import org.apache.parquet.CorruptDeltaByteArrays;
-import org.apache.parquet.Log;
 import org.apache.parquet.column.Encoding;
 import org.apache.parquet.filter.UnboundRecordFilter;
 import org.apache.parquet.filter2.compat.FilterCompat;
@@ -55,6 +54,8 @@ import org.apache.parquet.hadoop.metadata.FileMetaData;
 import org.apache.parquet.hadoop.util.ContextUtil;
 import org.apache.parquet.hadoop.util.counters.BenchmarkCounter;
 import org.apache.parquet.io.ParquetDecodingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Reads the records from a block of a Parquet file
@@ -67,7 +68,7 @@ import org.apache.parquet.io.ParquetDecodingException;
  */
 public class ParquetRecordReader<T> extends RecordReader<Void, T> {
 
-  private static final Log LOG = Log.getLog(ParquetRecordReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetRecordReader.class);
   private final InternalParquetRecordReader<T> internalReader;
 
   /**

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/UnmaterializableRecordCounter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/UnmaterializableRecordCounter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/UnmaterializableRecordCounter.java
index c4de8f3..4696319 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/UnmaterializableRecordCounter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/UnmaterializableRecordCounter.java
@@ -20,9 +20,10 @@ package org.apache.parquet.hadoop;
 
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.parquet.Log;
 import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.io.api.RecordMaterializer.RecordMaterializationException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 // Essentially taken from:
 // https://github.com/twitter/elephant-bird/blob/master/core/src/main/java/com/twitter/elephantbird/mapreduce/input/LzoRecordReader.java#L124
@@ -43,7 +44,7 @@ public class UnmaterializableRecordCounter {
   /* Tolerated percent bad records */
   public static final String BAD_RECORD_THRESHOLD_CONF_KEY = "parquet.read.bad.record.threshold";
 
-  private static final Log LOG = Log.getLog(UnmaterializableRecordCounter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(UnmaterializableRecordCounter.class);
 
   private static final float DEFAULT_THRESHOLD =  0f;
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/codec/CodecConfig.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/codec/CodecConfig.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/codec/CodecConfig.java
index 9657865..e0907f9 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/codec/CodecConfig.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/codec/CodecConfig.java
@@ -22,13 +22,12 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.ParquetOutputFormat;
 import org.apache.parquet.hadoop.metadata.CompressionCodecName;
 import org.apache.parquet.hadoop.util.ContextUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import static org.apache.parquet.Log.INFO;
-import static org.apache.parquet.Log.WARN;
 import static org.apache.parquet.hadoop.metadata.CompressionCodecName.UNCOMPRESSED;
 
 /**
@@ -39,7 +38,7 @@ import static org.apache.parquet.hadoop.metadata.CompressionCodecName.UNCOMPRESS
  * @author Tianshuo Deng
  */
 public abstract class CodecConfig {
-  private static final Log LOG = Log.getLog(CodecConfig.class);
+  private static final Logger LOG = LoggerFactory.getLogger(CodecConfig.class);
 
   /**
    * @return if a compress flag is set from hadoop
@@ -89,11 +88,11 @@ public abstract class CodecConfig {
     } else if (isHadoopCompressionSet()) { // from hadoop config
       codec = getHadoopCompressionCodec();
     } else {
-      if (INFO) LOG.info("Compression set to false");
+      LOG.info("Compression set to false");
       codec = CompressionCodecName.UNCOMPRESSED;
     }
 
-    if (INFO) LOG.info("Compression: " + codec.name());
+    LOG.info("Compression: {}", codec.name());
     return codec;
   }
 
@@ -102,14 +101,13 @@ public abstract class CodecConfig {
     try {
       // find the right codec
       Class<?> codecClass = getHadoopOutputCompressorClass(CompressionCodecName.UNCOMPRESSED.getHadoopCompressionCodecClass());
-      if (INFO) LOG.info("Compression set through hadoop codec: " + codecClass.getName());
+      LOG.info("Compression set through hadoop codec: {}", codecClass.getName());
       codec = CompressionCodecName.fromCompressionCodec(codecClass);
     } catch (CompressionCodecNotSupportedException e) {
-      if (WARN)
-        LOG.warn("codec defined in hadoop config is not supported by parquet [" + e.getCodecClass().getName() + "] and will use UNCOMPRESSED", e);
+      LOG.warn("codec defined in hadoop config is not supported by parquet [{}] and will use UNCOMPRESSED",e.getCodecClass().getName(), e);
       codec = CompressionCodecName.UNCOMPRESSED;
     } catch (IllegalArgumentException e) {
-      if (WARN) LOG.warn("codec class not found: " + e.getMessage(), e);
+      LOG.warn("codec class not found: {}", e.getMessage(), e);
       codec = CompressionCodecName.UNCOMPRESSED;
     }
     return codec;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/HadoopStreams.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/HadoopStreams.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/HadoopStreams.java
index 7c321cd..8731bd6 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/HadoopStreams.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/HadoopStreams.java
@@ -20,9 +20,11 @@
 package org.apache.parquet.hadoop.util;
 
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.parquet.Log;
 import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.io.SeekableInputStream;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 
@@ -31,7 +33,7 @@ import java.lang.reflect.InvocationTargetException;
  */
 public class HadoopStreams {
 
-  private static final Log LOG = Log.getLog(HadoopStreams.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopStreams.class);
 
   private static final Class<?> byteBufferReadableClass = getReadableClass();
   static final Constructor<SeekableInputStream> h2SeekableConstructor = getH2SeekableConstructor();

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/SerializationUtil.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/SerializationUtil.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/SerializationUtil.java
index ec413ac..ffbe2a7 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/SerializationUtil.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/util/SerializationUtil.java
@@ -30,7 +30,8 @@ import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 
 import org.apache.parquet.Closeables;
-import org.apache.parquet.Log;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Serialization utils copied from:
@@ -39,7 +40,7 @@ import org.apache.parquet.Log;
  * TODO: Refactor elephant-bird so that we can depend on utils like this without extra baggage.
  */
 public final class SerializationUtil {
-  private static final Log LOG = Log.getLog(SerializationUtil.class);
+  private static final Logger LOG = LoggerFactory.getLogger(SerializationUtil.class);
 
   private SerializationUtil() { }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestParquetFileWriter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestParquetFileWriter.java b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestParquetFileWriter.java
index c56515f..00e0c68 100644
--- a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestParquetFileWriter.java
+++ b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestParquetFileWriter.java
@@ -30,7 +30,6 @@ import org.apache.parquet.hadoop.ParquetOutputFormat.JobSummaryLevel;
 import org.junit.Assume;
 import org.junit.Rule;
 import org.junit.Test;
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.Encoding;
@@ -68,10 +67,12 @@ import org.apache.parquet.example.data.simple.SimpleGroup;
 import org.apache.parquet.hadoop.example.GroupWriteSupport;
 import org.junit.rules.TemporaryFolder;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestParquetFileWriter {
 
-  private static final Log LOG = Log.getLog(TestParquetFileWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestParquetFileWriter.class);
 
   private static final MessageType SCHEMA = MessageTypeParser.parseMessageType("" +
       "message m {" +
@@ -608,7 +609,7 @@ public class TestParquetFileWriter {
   }
 
   private void validateFooters(final List<Footer> metadata) {
-    LOG.debug(metadata);
+    LOG.debug("{}", metadata);
     assertEquals(String.valueOf(metadata), 3, metadata.size());
     for (Footer footer : metadata) {
       final File file = new File(footer.getFile().toUri());

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java
index 5ca041b..d1b5267 100644
--- a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java
+++ b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java
@@ -53,7 +53,6 @@ import org.apache.parquet.filter2.predicate.FilterApi;
 import org.junit.Before;
 import org.junit.Test;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.example.data.Group;
 import org.apache.parquet.example.data.simple.SimpleGroupFactory;
 import org.apache.parquet.hadoop.ParquetInputFormat;
@@ -65,9 +64,11 @@ import org.apache.parquet.hadoop.api.ReadSupport;
 import org.apache.parquet.hadoop.metadata.CompressionCodecName;
 import org.apache.parquet.hadoop.util.ContextUtil;
 import org.apache.parquet.schema.MessageTypeParser;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestInputOutputFormat {
-  private static final Log LOG = Log.getLog(TestInputOutputFormat.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestInputOutputFormat.class);
   private static final Charset UTF_8 = Charset.forName("UTF-8");
   final Path parquetPath = new Path("target/test/example/TestInputOutputFormat/parquet");
   final Path inputPath = new Path("src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java");
@@ -348,10 +349,10 @@ public class TestInputOutputFormat {
 
   private void waitForJob(Job job) throws InterruptedException, IOException {
     while (!job.isComplete()) {
-      LOG.debug("waiting for job " + job.getJobName());
+      LOG.debug("waiting for job {}", job.getJobName());
       sleep(100);
     }
-    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
+    LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
     if (!job.isSuccessful()) {
       throw new RuntimeException("job failed " + job.getJobName());
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/resources/log4j.properties b/parquet-hadoop/src/test/resources/log4j.properties
new file mode 100644
index 0000000..678fd66
--- /dev/null
+++ b/parquet-hadoop/src/test/resources/log4j.properties
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+log4j.rootLogger=INFO, stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.Target=System.out
+log4j.appender.stdout.threshold=INFO
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ABSOLUTE} %-5p %30c{1}:%4L - %m%n

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/src/main/java/org/apache/parquet/hive/internal/Hive010Binding.java
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/src/main/java/org/apache/parquet/hive/internal/Hive010Binding.java b/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/src/main/java/org/apache/parquet/hive/internal/Hive010Binding.java
index 7d4ecc0..23ef5d4 100644
--- a/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/src/main/java/org/apache/parquet/hive/internal/Hive010Binding.java
+++ b/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/src/main/java/org/apache/parquet/hive/internal/Hive010Binding.java
@@ -37,8 +37,9 @@ import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.mapred.JobConf;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.parquet.Log;
 
 /**
  * Hive 0.10 implementation of {@link org.apache.parquet.hive.HiveBinding HiveBinding}.
@@ -46,7 +47,7 @@ import org.apache.parquet.Log;
  * <a href="http://bit.ly/1a4tcrb">ManageJobConfig</a> class.
  */
 public class Hive010Binding extends AbstractHiveBinding {
-  private static final Log LOG = Log.getLog(Hive010Binding.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Hive010Binding.class);
   private final Map<String, PartitionDesc> pathToPartitionInfo =
       new LinkedHashMap<String, PartitionDesc>();
   /**

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/src/main/java/org/apache/parquet/hive/internal/Hive012Binding.java
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/src/main/java/org/apache/parquet/hive/internal/Hive012Binding.java b/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/src/main/java/org/apache/parquet/hive/internal/Hive012Binding.java
index 37a2cd4..f65f7a5 100644
--- a/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/src/main/java/org/apache/parquet/hive/internal/Hive012Binding.java
+++ b/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/src/main/java/org/apache/parquet/hive/internal/Hive012Binding.java
@@ -38,8 +38,9 @@ import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.mapred.JobConf;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.parquet.Log;
 
 /**
  * Hive 0.12 implementation of {@link org.apache.parquet.hive.HiveBinding HiveBinding}.
@@ -47,7 +48,7 @@ import org.apache.parquet.Log;
  * <a href="http://bit.ly/1a4tcrb">ManageJobConfig</a> class.
  */
 public class Hive012Binding extends AbstractHiveBinding {
-  private static final Log LOG = Log.getLog(Hive012Binding.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Hive012Binding.class);
   private final Map<String, PartitionDesc> pathToPartitionInfo =
       new LinkedHashMap<String, PartitionDesc>();
   /**

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/src/main/java/org/apache/parquet/hive/HiveBindingFactory.java
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/src/main/java/org/apache/parquet/hive/HiveBindingFactory.java b/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/src/main/java/org/apache/parquet/hive/HiveBindingFactory.java
index ba6003c..5ecc2d1 100644
--- a/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/src/main/java/org/apache/parquet/hive/HiveBindingFactory.java
+++ b/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/src/main/java/org/apache/parquet/hive/HiveBindingFactory.java
@@ -20,9 +20,10 @@ package org.apache.parquet.hive;
 
 import java.lang.reflect.Method;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.hive.internal.Hive010Binding;
 import org.apache.parquet.hive.internal.Hive012Binding;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Factory for creating HiveBinding objects based on the version of Hive
@@ -30,7 +31,7 @@ import org.apache.parquet.hive.internal.Hive012Binding;
  * to enable mocking.
  */
 public class HiveBindingFactory {
-  private static final Log LOG = Log.getLog(HiveBindingFactory.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HiveBindingFactory.class);
   private static final String HIVE_VERSION_CLASS_NAME = "org.apache.hive.common.util.HiveVersionInfo";
   private static final String HIVE_VERSION_METHOD_NAME = "getVersion";
   private static final String HIVE_UTILITIES_CLASS_NAME = "org.apache.hadoop.hive.ql.exec.Utilities";
@@ -68,7 +69,7 @@ public class HiveBindingFactory {
     try {
       hiveVersionInfo = Class.forName(HIVE_VERSION_CLASS_NAME, true, classLoader);
     } catch (ClassNotFoundException e) {
-      LOG.debug("Class " + HIVE_VERSION_CLASS_NAME + ", not found, returning " + 
+      LOG.debug("Class " + HIVE_VERSION_CLASS_NAME + ", not found, returning {}",
           Hive010Binding.class.getSimpleName());
       return Hive010Binding.class;
     }
@@ -85,8 +86,7 @@ public class HiveBindingFactory {
       Method getVersionMethod = hiveVersionInfo.
           getMethod(HIVE_VERSION_METHOD_NAME, (Class[])null);
       String rawVersion = (String)getVersionMethod.invoke(null, (Object[])null);
-      LOG.debug("Raw Version from " + hiveVersionInfo.getSimpleName() + " is '" +
-          rawVersion + "'");
+      LOG.debug("Raw Version from {} is '{}'", hiveVersionInfo.getSimpleName(), rawVersion);
       hiveVersion = trimVersion(rawVersion);
     } catch (Exception e) {
       throw new UnexpectedHiveVersionProviderError("Unexpected error whilst " +
@@ -97,8 +97,7 @@ public class HiveBindingFactory {
       return createBindingForUnknownVersion();
     }
     if(hiveVersion.startsWith(HIVE_VERSION_010)) {
-      LOG.debug("Hive version " + hiveVersion + ", returning " +
-          Hive010Binding.class.getSimpleName());
+      LOG.debug("Hive version {}, returning {}", hiveVersion, Hive010Binding.class.getSimpleName());
       return Hive010Binding.class;
     } else if(hiveVersion.startsWith(HIVE_VERSION_011)) {
       LOG.debug("Hive version " + hiveVersion + ", returning " +
@@ -110,7 +109,7 @@ public class HiveBindingFactory {
           "and the parquet-hive jars from the parquet project should not be included " +
           "in Hive's classpath.");
     }
-    LOG.debug("Hive version " + hiveVersion + ", returning " +
+    LOG.debug("Hive version {}, returning {}", hiveVersion,
         Hive012Binding.class.getSimpleName());
     // as of 11/26/2013 it looks like the 0.12 binding will work for 0.13
     return Hive012Binding.class;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
index 9b87719..7d9ac8f 100644
--- a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
+++ b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java
@@ -24,8 +24,6 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -46,6 +44,8 @@ import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.util.Progressable;
 
 import org.apache.parquet.hadoop.ParquetOutputFormat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  *
@@ -55,7 +55,7 @@ import org.apache.parquet.hadoop.ParquetOutputFormat;
 public class MapredParquetOutputFormat extends FileOutputFormat<Void, ArrayWritable> implements
   HiveOutputFormat<Void, ArrayWritable> {
 
-  private static final Log LOG = LogFactory.getLog(MapredParquetOutputFormat.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MapredParquetOutputFormat.class);
 
   protected ParquetOutputFormat<ArrayWritable> realOutputFormat;
 
@@ -96,7 +96,7 @@ public class MapredParquetOutputFormat extends FileOutputFormat<Void, ArrayWrita
       final Properties tableProperties,
       final Progressable progress) throws IOException {
 
-    LOG.info("creating new record writer..." + this);
+    LOG.info("creating new record writer...{}", this);
 
     final String columnNameProperty = tableProperties.getProperty(IOConstants.COLUMNS);
     final String columnTypeProperty = tableProperties.getProperty(IOConstants.COLUMNS_TYPES);

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
index a225a95..053578d 100644
--- a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
+++ b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
@@ -22,8 +22,6 @@ import static org.apache.parquet.format.converter.ParquetMetadataConverter.SKIP_
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import org.apache.hadoop.io.ArrayWritable;
@@ -46,9 +44,11 @@ import org.apache.parquet.hadoop.util.ContextUtil;
 import org.apache.parquet.hive.HiveBinding;
 import org.apache.parquet.hive.HiveBindingFactory;
 import org.apache.parquet.schema.MessageTypeParser;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ParquetRecordReaderWrapper  implements RecordReader<Void, ArrayWritable> {
-  public static final Log LOG = LogFactory.getLog(ParquetRecordReaderWrapper.class);
+  public static final Logger LOG = LoggerFactory.getLogger(ParquetRecordReaderWrapper.class);
 
   private final long splitLen; // for getPos()
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
index 117e9d4..8d4c5d7 100644
--- a/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
+++ b/parquet-hive/parquet-hive-storage-handler/src/main/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.io.parquet.write;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.io.Writable;
@@ -36,11 +34,13 @@ import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 
 import org.apache.parquet.hadoop.ParquetOutputFormat;
 import org.apache.parquet.hadoop.util.ContextUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ParquetRecordWriterWrapper implements RecordWriter<Void, ArrayWritable>,
   FileSinkOperator.RecordWriter {
 
-  public static final Log LOG = LogFactory.getLog(ParquetRecordWriterWrapper.class);
+  public static final Logger LOG = LoggerFactory.getLogger(ParquetRecordWriterWrapper.class);
 
   private final org.apache.hadoop.mapreduce.RecordWriter<Void, ArrayWritable> realWriter;
   private final TaskAttemptContext taskContext;
@@ -58,10 +58,10 @@ public class ParquetRecordWriterWrapper implements RecordWriter<Void, ArrayWrita
       }
       taskContext = ContextUtil.newTaskAttemptContext(jobConf, taskAttemptID);
 
-      LOG.info("creating real writer to write at " + name);
+      LOG.info("creating real writer to write at {}", name);
       realWriter = (org.apache.hadoop.mapreduce.RecordWriter<Void, ArrayWritable>)
           ((ParquetOutputFormat) realOutputFormat).getRecordWriter(taskContext, new Path(name));
-      LOG.info("real writer: " + realWriter);
+      LOG.info("real writer: {}", realWriter);
     } catch (final InterruptedException e) {
       throw new IOException(e);
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetLoader.java
----------------------------------------------------------------------
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetLoader.java b/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetLoader.java
index be54aa8..7f87691 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetLoader.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/ParquetLoader.java
@@ -20,7 +20,6 @@ package org.apache.parquet.pig;
 
 import static java.util.Arrays.asList;
 import static org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths;
-import static org.apache.parquet.Log.DEBUG;
 import static org.apache.parquet.hadoop.util.ContextUtil.getConfiguration;
 import static org.apache.parquet.pig.PigSchemaConverter.parsePigSchema;
 import static org.apache.parquet.pig.PigSchemaConverter.pigSchemaToString;
@@ -74,10 +73,11 @@ import static org.apache.pig.Expression.Column;
 import static org.apache.pig.Expression.Const;
 import static org.apache.pig.Expression.OpType;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.ParquetInputFormat;
 import org.apache.parquet.hadoop.metadata.GlobalMetaData;
 import org.apache.parquet.io.ParquetDecodingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  *
@@ -88,7 +88,7 @@ import org.apache.parquet.io.ParquetDecodingException;
  *
  */
 public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDown, LoadPredicatePushdown {
-  private static final Log LOG = Log.getLog(ParquetLoader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetLoader.class);
 
   public static final String ENABLE_PREDICATE_FILTER_PUSHDOWN = "parquet.pig.predicate.pushdown.enable";
   private static final boolean DEFAULT_PREDICATE_PUSHDOWN_ENABLED = false;
@@ -157,9 +157,9 @@ public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDow
 
   @Override
   public void setLocation(String location, Job job) throws IOException {
-    if (DEBUG) {
+    if (LOG.isDebugEnabled()) {
       String jobToString = String.format("job[id=%s, name=%s]", job.getJobID(), job.getJobName());
-      LOG.debug("LoadFunc.setLocation(" + location + ", " + jobToString + ")");
+      LOG.debug("LoadFunc.setLocation({}, {})", location, jobToString);
     }
 
     setInput(location, job);
@@ -201,7 +201,7 @@ public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDow
 
   @Override
   public InputFormat<Void, Tuple> getInputFormat() throws IOException {
-    if (DEBUG) LOG.debug("LoadFunc.getInputFormat()");
+    LOG.debug("LoadFunc.getInputFormat()");
     return getParquetInputFormat();
   }
 
@@ -248,7 +248,7 @@ public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDow
   @Override
   public void prepareToRead(@SuppressWarnings("rawtypes") RecordReader reader, PigSplit split)
       throws IOException {
-    if (DEBUG) LOG.debug("LoadFunc.prepareToRead(" + reader + ", " + split + ")");
+    LOG.debug("LoadFunc.prepareToRead({}, {})", reader, split);
     this.reader = reader;
   }
 
@@ -268,9 +268,9 @@ public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDow
 
   @Override
   public String[] getPartitionKeys(String location, Job job) throws IOException {
-    if (DEBUG) {
+    if (LOG.isDebugEnabled()) {
       String jobToString = String.format("job[id=%s, name=%s]", job.getJobID(), job.getJobName());
-      LOG.debug("LoadMetadata.getPartitionKeys(" + location + ", " + jobToString + ")");
+      LOG.debug("LoadMetadata.getPartitionKeys({}, {})", location, jobToString);
     }
     setInput(location, job);
     return null;
@@ -278,9 +278,9 @@ public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDow
 
   @Override
   public ResourceSchema getSchema(String location, Job job) throws IOException {
-    if (DEBUG) {
+    if (LOG.isDebugEnabled()) {
       String jobToString = String.format("job[id=%s, name=%s]", job.getJobID(), job.getJobName());
-      LOG.debug("LoadMetadata.getSchema(" + location + ", " + jobToString + ")");
+      LOG.debug("LoadMetadata.getSchema({}, {})", location, jobToString);
     }
     setInput(location, job);
     return new ResourceSchema(schema);
@@ -323,9 +323,9 @@ public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDow
   @Override
   public ResourceStatistics getStatistics(String location, Job job)
       throws IOException {
-    if (DEBUG) {
+    if (LOG.isDebugEnabled()) {
       String jobToString = String.format("job[id=%s, name=%s]", job.getJobID(), job.getJobName());
-      LOG.debug("LoadMetadata.getStatistics(" + location + ", " + jobToString + ")");
+      LOG.debug("LoadMetadata.getStatistics({}, {})", location, jobToString);
     }
     /* We need to call setInput since setLocation is not
        guaranteed to be called before this */
@@ -347,7 +347,7 @@ public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDow
 
   @Override
   public void setPartitionFilter(Expression expression) throws IOException {
-    if (DEBUG) LOG.debug("LoadMetadata.setPartitionFilter(" + expression + ")");
+    LOG.debug("LoadMetadata.setPartitionFilter({})", expression);
   }
 
   @Override
@@ -465,10 +465,10 @@ public class ParquetLoader extends LoadFunc implements LoadMetadata, LoadPushDow
 
   @Override
   public void setPushdownPredicate(Expression e) throws IOException {
-    LOG.info("Pig pushdown expression: " + e);
+    LOG.info("Pig pushdown expression: {}", e);
 
     FilterPredicate pred = buildFilter(e);
-    LOG.info("Parquet filter predicate expression: " + pred);
+    LOG.info("Parquet filter predicate expression: {}", pred);
 
     storeInUDFContext(ParquetInputFormat.FILTER_PREDICATE, pred);
   }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-pig/src/main/java/org/apache/parquet/pig/PigSchemaConverter.java
----------------------------------------------------------------------
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/PigSchemaConverter.java b/parquet-pig/src/main/java/org/apache/parquet/pig/PigSchemaConverter.java
index e3e4b53..c9eb0ba 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/PigSchemaConverter.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/PigSchemaConverter.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.pig;
 
-import static org.apache.parquet.Log.DEBUG;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -36,7 +35,6 @@ import org.apache.pig.impl.util.Pair;
 import org.apache.pig.impl.util.Utils;
 import org.apache.pig.parser.ParserException;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.schema.ConversionPatterns;
 import org.apache.parquet.schema.GroupType;
 import org.apache.parquet.schema.MessageType;
@@ -46,6 +44,8 @@ import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeNameConverter;
 import org.apache.parquet.schema.Type;
 import org.apache.parquet.schema.Type.Repetition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
@@ -60,7 +60,7 @@ import org.apache.parquet.schema.Type.Repetition;
  *
  */
 public class PigSchemaConverter {
-  private static final Log LOG = Log.getLog(PigSchemaConverter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(PigSchemaConverter.class);
   static final String ARRAY_VALUE_NAME = "value";
   private ColumnAccess columnAccess;
 
@@ -456,9 +456,9 @@ public class PigSchemaConverter {
    */
   public MessageType filter(MessageType schemaToFilter, Schema requestedPigSchema, RequiredFieldList requiredFieldList) {
     try {
-      if (DEBUG) LOG.debug("filtering schema:\n" + schemaToFilter + "\nwith requested pig schema:\n " + requestedPigSchema);
+      if (LOG.isDebugEnabled()) LOG.debug("filtering schema:\n" + schemaToFilter + "\nwith requested pig schema:\n " + requestedPigSchema);
       List<Type> result = columnAccess.filterTupleSchema(schemaToFilter, requestedPigSchema, requiredFieldList);
-      if (DEBUG) LOG.debug("schema:\n" + schemaToFilter + "\nfiltered to:\n" + result);
+      if (LOG.isDebugEnabled()) LOG.debug("schema:\n" + schemaToFilter + "\nfiltered to:\n" + result);
       return new MessageType(schemaToFilter.getName(), result);
     } catch (RuntimeException e) {
       throw new RuntimeException("can't filter " + schemaToFilter + " with " + requestedPigSchema, e);
@@ -466,7 +466,7 @@ public class PigSchemaConverter {
   }
 
   private Type filter(Type type, FieldSchema fieldSchema) {
-    if (DEBUG) LOG.debug("filtering type:\n" + type + "\nwith:\n " + fieldSchema);
+    if (LOG.isDebugEnabled()) LOG.debug("filtering type:\n" + type + "\nwith:\n " + fieldSchema);
     try {
       switch (fieldSchema.type) {
       case DataType.BAG:
@@ -486,12 +486,12 @@ public class PigSchemaConverter {
   }
 
   private Type filterTuple(GroupType tupleType, FieldSchema tupleFieldSchema) throws FrontendException {
-    if (DEBUG) LOG.debug("filtering TUPLE schema:\n" + tupleType + "\nwith:\n " + tupleFieldSchema);
+    if (LOG.isDebugEnabled()) LOG.debug("filtering TUPLE schema:\n" + tupleType + "\nwith:\n " + tupleFieldSchema);
     return tupleType.withNewFields(columnAccess.filterTupleSchema(tupleType, tupleFieldSchema.schema, null));
   }
 
   private Type filterMap(GroupType mapType, FieldSchema mapFieldSchema) throws FrontendException {
-    if (DEBUG) LOG.debug("filtering MAP schema:\n" + mapType + "\nwith:\n " + mapFieldSchema);
+    if (LOG.isDebugEnabled()) LOG.debug("filtering MAP schema:\n" + mapType + "\nwith:\n " + mapFieldSchema);
     if (mapType.getFieldCount() != 1) {
       throw new RuntimeException("not unwrapping the right type, this should be a Map: " + mapType);
     }
@@ -504,7 +504,7 @@ public class PigSchemaConverter {
   }
 
   private Type filterBag(GroupType bagType, FieldSchema bagFieldSchema) throws FrontendException {
-    if (DEBUG) LOG.debug("filtering BAG schema:\n" + bagType + "\nwith:\n " + bagFieldSchema);
+    if (LOG.isDebugEnabled()) LOG.debug("filtering BAG schema:\n" + bagType + "\nwith:\n " + bagFieldSchema);
     if (bagType.getFieldCount() != 1) {
       throw new RuntimeException("not unwrapping the right type, this should be a Bag: " + bagType);
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-pig/src/main/java/org/apache/parquet/pig/TupleReadSupport.java
----------------------------------------------------------------------
diff --git a/parquet-pig/src/main/java/org/apache/parquet/pig/TupleReadSupport.java b/parquet-pig/src/main/java/org/apache/parquet/pig/TupleReadSupport.java
index ee7c710..75bb5b5 100644
--- a/parquet-pig/src/main/java/org/apache/parquet/pig/TupleReadSupport.java
+++ b/parquet-pig/src/main/java/org/apache/parquet/pig/TupleReadSupport.java
@@ -33,7 +33,6 @@ import org.apache.pig.impl.logicalLayer.FrontendException;
 import org.apache.pig.impl.logicalLayer.schema.Schema;
 import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
 import org.apache.pig.impl.util.ObjectSerializer;
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.api.InitContext;
 import org.apache.parquet.hadoop.api.ReadSupport;
 import org.apache.parquet.io.ParquetDecodingException;
@@ -41,6 +40,8 @@ import org.apache.parquet.io.api.RecordMaterializer;
 import org.apache.parquet.pig.convert.TupleRecordMaterializer;
 import org.apache.parquet.schema.IncompatibleSchemaModificationException;
 import org.apache.parquet.schema.MessageType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Read support for Pig Tuple
@@ -54,7 +55,7 @@ public class TupleReadSupport extends ReadSupport<Tuple> {
   static final String PARQUET_COLUMN_INDEX_ACCESS = "parquet.private.pig.column.index.access";
   static final String PARQUET_PIG_REQUIRED_FIELDS = "parquet.private.pig.required.fields";
   static final String PARQUET_PIG_ELEPHANT_BIRD_COMPATIBLE = "parquet.pig.elephantbird.compatible";
-  private static final Log LOG = Log.getLog(TupleReadSupport.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TupleReadSupport.class);
 
   private static final PigSchemaConverter pigSchemaConverter = new PigSchemaConverter(false);
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest2.java
----------------------------------------------------------------------
diff --git a/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest2.java b/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest2.java
index 388d527..0b8a464 100644
--- a/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest2.java
+++ b/parquet-pig/src/test/java/org/apache/parquet/pig/PerfTest2.java
@@ -48,8 +48,9 @@ import org.apache.pig.data.TupleFactory;
 import org.apache.pig.impl.util.Utils;
 import org.apache.pig.parser.ParserException;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.util.ContextUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  *
@@ -60,6 +61,9 @@ import org.apache.parquet.hadoop.util.ContextUtil;
  */
 public class PerfTest2 {
 
+  private static final Logger LOG = LoggerFactory.getLogger(PerfTest2.class);
+  private static final boolean DEBUG = LOG.isDebugEnabled();
+
   static final int COLUMN_COUNT = 50;
   private static final long ROW_COUNT = 100000;
   private static Configuration conf = new Configuration();
@@ -173,7 +177,7 @@ public class PerfTest2 {
       recordReader.initialize(split, taskAttemptContext);
       Tuple t;
       while ((t = loadFunc.getNext()) != null) {
-        if (Log.DEBUG) System.out.println(t);
+        if (DEBUG) System.out.println(t);
         ++i;
       }
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-pig/src/test/java/org/apache/parquet/pig/TestTupleRecordConsumer.java
----------------------------------------------------------------------
diff --git a/parquet-pig/src/test/java/org/apache/parquet/pig/TestTupleRecordConsumer.java b/parquet-pig/src/test/java/org/apache/parquet/pig/TestTupleRecordConsumer.java
index 83e1227..ef048f2 100644
--- a/parquet-pig/src/test/java/org/apache/parquet/pig/TestTupleRecordConsumer.java
+++ b/parquet-pig/src/test/java/org/apache/parquet/pig/TestTupleRecordConsumer.java
@@ -40,7 +40,6 @@ import org.apache.pig.impl.util.Utils;
 import org.apache.pig.parser.ParserException;
 import org.junit.Test;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.example.data.Group;
 import org.apache.parquet.example.data.GroupWriter;
 import org.apache.parquet.example.data.simple.SimpleGroup;
@@ -51,9 +50,11 @@ import org.apache.parquet.io.ConverterConsumer;
 import org.apache.parquet.io.RecordConsumerLoggingWrapper;
 import org.apache.parquet.io.api.RecordMaterializer;
 import org.apache.parquet.schema.MessageType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestTupleRecordConsumer {
-  private static final Log logger = Log.getLog(TestTupleRecordConsumer.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestTupleRecordConsumer.class);
 
   @Test
   public void testArtSchema() throws ExecException, ParserException {
@@ -127,7 +128,7 @@ public class TestTupleRecordConsumer {
     RecordMaterializer<Tuple> recordConsumer = newPigRecordConsumer(pigSchemaString);
     TupleWriteSupport tupleWriter = newTupleWriter(pigSchemaString, recordConsumer);
     for (Tuple tuple : input) {
-      logger.debug(tuple);
+      LOG.debug("{}", tuple);
       tupleWriter.write(tuple);
       tuples.add(recordConsumer.getCurrentRecord());
     }
@@ -151,14 +152,14 @@ public class TestTupleRecordConsumer {
       groupWriter.write(group);
       final Tuple tuple = pigRecordConsumer.getCurrentRecord();
       tuples.add(tuple);
-      logger.debug("in: "+group+"\nout:"+tuple);
+      LOG.debug("in: {}\nout:{}", group, tuple);
     }
 
     List<Group> groups = new ArrayList<Group>();
     GroupRecordConverter recordConsumer = new GroupRecordConverter(schema);
     TupleWriteSupport tupleWriter = newTupleWriter(pigSchemaString, recordConsumer);
     for (Tuple t : tuples) {
-      logger.debug(t);
+      LOG.debug("{}", t);
       tupleWriter.write(t);
       groups.add(recordConsumer.getCurrentRecord());
     }
@@ -166,7 +167,7 @@ public class TestTupleRecordConsumer {
     assertEquals(input.size(), groups.size());
     for (int i = 0; i < input.size(); i++) {
       Group in = input.get(i);
-      logger.debug(in);
+      LOG.debug("{}", in);
       Group out = groups.get(i);
       assertEquals(in.toString(), out.toString());
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-pig/src/test/java/org/apache/parquet/pig/TupleConsumerPerfTest.java
----------------------------------------------------------------------
diff --git a/parquet-pig/src/test/java/org/apache/parquet/pig/TupleConsumerPerfTest.java b/parquet-pig/src/test/java/org/apache/parquet/pig/TupleConsumerPerfTest.java
index ff192e2..2148e06 100644
--- a/parquet-pig/src/test/java/org/apache/parquet/pig/TupleConsumerPerfTest.java
+++ b/parquet-pig/src/test/java/org/apache/parquet/pig/TupleConsumerPerfTest.java
@@ -31,7 +31,6 @@ import org.apache.pig.data.TupleFactory;
 import org.apache.pig.impl.util.Utils;
 import org.apache.pig.parser.ParserException;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.impl.ColumnWriteStoreV1;
 import org.apache.parquet.column.page.PageReadStore;
 import org.apache.parquet.column.page.mem.MemPageStore;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoReadSupport.java
----------------------------------------------------------------------
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoReadSupport.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoReadSupport.java
index e6921db..3a21d84 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoReadSupport.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoReadSupport.java
@@ -21,11 +21,12 @@ package org.apache.parquet.proto;
 import com.google.protobuf.Message;
 import com.twitter.elephantbird.util.Protobufs;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.api.InitContext;
 import org.apache.parquet.hadoop.api.ReadSupport;
 import org.apache.parquet.io.api.RecordMaterializer;
 import org.apache.parquet.schema.MessageType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.Map;
 
@@ -35,7 +36,7 @@ import java.util.Map;
  */
 public class ProtoReadSupport<T extends Message> extends ReadSupport<T> {
 
-  private static final Log LOG = Log.getLog(ProtoReadSupport.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ProtoReadSupport.class);
 
   public static final String PB_REQUESTED_PROJECTION = "parquet.proto.projection";
 
@@ -62,11 +63,11 @@ public class ProtoReadSupport<T extends Message> extends ReadSupport<T> {
 
     if (requestedProjectionString != null && !requestedProjectionString.trim().isEmpty()) {
       MessageType requestedProjection = getSchemaForRead(context.getFileSchema(), requestedProjectionString);
-      LOG.debug("Reading data with projection " + requestedProjection);
+      LOG.debug("Reading data with projection {}", requestedProjection);
       return new ReadContext(requestedProjection);
     } else {
       MessageType fileSchema = context.getFileSchema();
-      LOG.debug("Reading data with schema " + fileSchema);
+      LOG.debug("Reading data with schema {}", fileSchema);
       return new ReadContext(fileSchema);
     }
   }
@@ -85,7 +86,7 @@ public class ProtoReadSupport<T extends Message> extends ReadSupport<T> {
       throw new RuntimeException("I Need parameter " + PB_CLASS + " with Protocol Buffer class");
     }
 
-    LOG.debug("Reading data with Protocol Buffer class " + headerProtoClass);
+    LOG.debug("Reading data with Protocol Buffer class {}", headerProtoClass);
 
     MessageType requestedSchema = readContext.getRequestedSchema();
     Class<? extends Message> protobufClass = Protobufs.getProtobufClass(headerProtoClass);

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoSchemaConverter.java
----------------------------------------------------------------------
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoSchemaConverter.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoSchemaConverter.java
index 3f6ed6b..2c4a1ca 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoSchemaConverter.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoSchemaConverter.java
@@ -29,7 +29,6 @@ import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT64;
 
 import java.util.List;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.Type;
 import org.apache.parquet.schema.Types;
@@ -40,6 +39,8 @@ import com.google.protobuf.Descriptors;
 import com.google.protobuf.Descriptors.FieldDescriptor.JavaType;
 import com.google.protobuf.Message;
 import com.twitter.elephantbird.util.Protobufs;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * <p/>
@@ -49,7 +50,7 @@ import com.twitter.elephantbird.util.Protobufs;
  */
 public class ProtoSchemaConverter {
 
-  private static final Log LOG = Log.getLog(ProtoSchemaConverter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ProtoSchemaConverter.class);
 
   public MessageType convert(Class<? extends Message> protobufClass) {
     LOG.debug("Converting protocol buffer class \"" + protobufClass + "\" to parquet schema.");

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoWriteSupport.java
----------------------------------------------------------------------
diff --git a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoWriteSupport.java b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoWriteSupport.java
index cef2b93..c0ed351 100644
--- a/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoWriteSupport.java
+++ b/parquet-protobuf/src/main/java/org/apache/parquet/proto/ProtoWriteSupport.java
@@ -26,7 +26,6 @@ import com.google.protobuf.MessageOrBuilder;
 import com.google.protobuf.TextFormat;
 import com.twitter.elephantbird.util.Protobufs;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.BadConfigurationException;
 import org.apache.parquet.hadoop.api.WriteSupport;
 import org.apache.parquet.io.InvalidRecordException;
@@ -36,6 +35,8 @@ import org.apache.parquet.schema.GroupType;
 import org.apache.parquet.schema.IncompatibleSchemaModificationException;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.Type;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.lang.reflect.Array;
 import java.util.HashMap;
@@ -48,7 +49,7 @@ import java.util.Map;
  */
 public class ProtoWriteSupport<T extends MessageOrBuilder> extends WriteSupport<T> {
 
-  private static final Log LOG = Log.getLog(ProtoWriteSupport.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ProtoWriteSupport.class);
   public static final String PB_CLASS_WRITE = "parquet.proto.writeClass";
 
   private RecordConsumer recordConsumer;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-protobuf/src/test/java/org/apache/parquet/proto/utils/WriteUsingMR.java
----------------------------------------------------------------------
diff --git a/parquet-protobuf/src/test/java/org/apache/parquet/proto/utils/WriteUsingMR.java b/parquet-protobuf/src/test/java/org/apache/parquet/proto/utils/WriteUsingMR.java
index c87cb44..d18076a 100644
--- a/parquet-protobuf/src/test/java/org/apache/parquet/proto/utils/WriteUsingMR.java
+++ b/parquet-protobuf/src/test/java/org/apache/parquet/proto/utils/WriteUsingMR.java
@@ -27,9 +27,10 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
-import org.apache.parquet.Log;
 import org.apache.parquet.proto.ProtoParquetOutputFormat;
 import org.apache.parquet.proto.TestUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -44,7 +45,7 @@ import static java.lang.Thread.sleep;
  */
 public class WriteUsingMR {
 
-  private static final Log LOG = Log.getLog(WriteUsingMR.class);
+  private static final Logger LOG = LoggerFactory.getLogger(WriteUsingMR.class);
   Configuration conf = new Configuration();
   private static List<Message> inputMessages;
   Path outputPath;
@@ -61,7 +62,7 @@ public class WriteUsingMR {
       } else {
         for (Message msg : inputMessages) {
           context.write(null, msg);
-          LOG.debug("Reading msg from mock writing mapper" + msg);
+          LOG.debug("Reading msg from mock writing mapper {}", msg);
         }
       }
     }
@@ -102,7 +103,7 @@ public class WriteUsingMR {
   static void waitForJob(Job job) throws Exception {
     job.submit();
     while (!job.isComplete()) {
-      LOG.debug("waiting for job " + job.getJobName());
+      LOG.debug("waiting for job {}", job.getJobName());
       sleep(50);
     }
     LOG.debug("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/AbstractThriftWriteSupport.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/AbstractThriftWriteSupport.java b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/AbstractThriftWriteSupport.java
index 5f210d3..fe8019c 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/AbstractThriftWriteSupport.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/AbstractThriftWriteSupport.java
@@ -22,7 +22,6 @@ import org.apache.thrift.TBase;
 
 import com.twitter.elephantbird.pig.util.ThriftToPig;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.BadConfigurationException;
 import org.apache.parquet.hadoop.api.WriteSupport;
 import org.apache.parquet.io.ColumnIOFactory;
@@ -34,11 +33,13 @@ import org.apache.parquet.thrift.ParquetWriteProtocol;
 import org.apache.parquet.thrift.ThriftMetaData;
 import org.apache.parquet.thrift.ThriftSchemaConverter;
 import org.apache.parquet.thrift.struct.ThriftType.StructType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 public abstract class AbstractThriftWriteSupport<T> extends WriteSupport<T> {
   public static final String PARQUET_THRIFT_CLASS = "parquet.thrift.class";
-  private static final Log LOG = Log.getLog(AbstractThriftWriteSupport.class);
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractThriftWriteSupport.class);
 
   public static void setGenericThriftClass(Configuration configuration, Class<?> thriftClass) {
     configuration.set(PARQUET_THRIFT_CLASS, thriftClass.getName());

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftReadSupport.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftReadSupport.java b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftReadSupport.java
index 1c020ae..f49fb67 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftReadSupport.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/hadoop/thrift/ThriftReadSupport.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.mapred.JobConf;
 import org.apache.thrift.TBase;
 import org.apache.thrift.protocol.TProtocol;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.Strings;
 import org.apache.parquet.hadoop.api.InitContext;
 import org.apache.parquet.hadoop.api.ReadSupport;
@@ -44,9 +43,11 @@ import org.apache.parquet.thrift.projection.StrictFieldProjectionFilter;
 import org.apache.parquet.thrift.projection.ThriftProjectionException;
 import org.apache.parquet.thrift.projection.deprecated.DeprecatedFieldProjectionFilter;
 import org.apache.parquet.thrift.struct.ThriftType.StructType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ThriftReadSupport<T> extends ReadSupport<T> {
-  private static final Log LOG = Log.getLog(ThriftReadSupport.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ThriftReadSupport.class);
 
   /**
    * Deprecated. Use {@link #STRICT_THRIFT_COLUMN_FILTER_KEY}
@@ -128,8 +129,8 @@ public class ThriftReadSupport<T> extends ReadSupport<T> {
     }
 
     if (!Strings.isNullOrEmpty(deprecated)) {
-      LOG.warn(String.format("Using %s is deprecated. Please see the docs for %s!",
-          THRIFT_COLUMN_FILTER_KEY, STRICT_THRIFT_COLUMN_FILTER_KEY));
+      LOG.warn("Using {} is deprecated. Please see the docs for {}!",
+          THRIFT_COLUMN_FILTER_KEY, STRICT_THRIFT_COLUMN_FILTER_KEY);
       return new DeprecatedFieldProjectionFilter(deprecated);
     }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetReadProtocol.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetReadProtocol.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetReadProtocol.java
index d3b496a..b72c85c 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetReadProtocol.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetReadProtocol.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.thrift;
 
-import static org.apache.parquet.Log.DEBUG;
 
 import java.nio.ByteBuffer;
 import java.util.Collection;
@@ -33,12 +32,12 @@ import org.apache.thrift.protocol.TMessage;
 import org.apache.thrift.protocol.TProtocol;
 import org.apache.thrift.protocol.TSet;
 import org.apache.thrift.protocol.TStruct;
-
-import org.apache.parquet.Log;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 class ParquetReadProtocol extends ParquetProtocol {
-  private static final Log LOG = Log.getLog(ParquetReadProtocol.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetReadProtocol.class);
 
   ParquetReadProtocol() {
     super("read");
@@ -63,102 +62,102 @@ class ParquetReadProtocol extends ParquetProtocol {
   }
 
   public TMessage readMessageBegin() throws TException {
-    if (DEBUG) LOG.debug("readMessageBegin()");
+    LOG.debug("readMessageBegin()");
     return next().readMessageBegin();
   }
 
   public void readMessageEnd() throws TException {
-    if (DEBUG) LOG.debug("readMessageEnd()");
+    LOG.debug("readMessageEnd()");
     next().readMessageEnd();
   }
 
   public TStruct readStructBegin() throws TException {
-    if (DEBUG) LOG.debug("readStructBegin()");
+    LOG.debug("readStructBegin()");
     return next().readStructBegin();
   }
 
   public void readStructEnd() throws TException {
-    if (DEBUG) LOG.debug("readStructEnd()");
+    LOG.debug("readStructEnd()");
     next().readStructEnd();
   }
 
   public TField readFieldBegin() throws TException {
-    if (DEBUG) LOG.debug("readFieldBegin()");
+    LOG.debug("readFieldBegin()");
     return next().readFieldBegin();
   }
 
   public void readFieldEnd() throws TException {
-    if (DEBUG) LOG.debug("readFieldEnd()");
+    LOG.debug("readFieldEnd()");
     next().readFieldEnd();
   }
 
   public TMap readMapBegin() throws TException {
-    if (DEBUG) LOG.debug("readMapBegin()");
+    LOG.debug("readMapBegin()");
     return next().readMapBegin();
   }
 
   public void readMapEnd() throws TException {
-    if (DEBUG) LOG.debug("readMapEnd()");
+    LOG.debug("readMapEnd()");
     next().readMapEnd();
   }
 
   public TList readListBegin() throws TException {
-    if (DEBUG) LOG.debug("readListBegin()");
+    LOG.debug("readListBegin()");
     return next().readListBegin();
   }
 
   public void readListEnd() throws TException {
-    if (DEBUG) LOG.debug("readListEnd()");
+    LOG.debug("readListEnd()");
     next().readListEnd();
   }
 
   public TSet readSetBegin() throws TException {
-    if (DEBUG) LOG.debug("readSetBegin()");
+    LOG.debug("readSetBegin()");
     return next().readSetBegin();
   }
 
   public void readSetEnd() throws TException {
-    if (DEBUG) LOG.debug("readSetEnd()");
+    LOG.debug("readSetEnd()");
     next().readSetEnd();
   }
 
   public boolean readBool() throws TException {
-    if (DEBUG) LOG.debug("readBool()");
+    LOG.debug("readBool()");
     return next().readBool();
   }
 
   public byte readByte() throws TException {
-    if (DEBUG) LOG.debug("readByte()");
+    LOG.debug("readByte()");
     return next().readByte();
   }
 
   public short readI16() throws TException {
-    if (DEBUG) LOG.debug("readI16()");
+    LOG.debug("readI16()");
     return next().readI16();
   }
 
   public int readI32() throws TException {
-    if (DEBUG) LOG.debug("readI32()");
+    LOG.debug("readI32()");
     return next().readI32();
   }
 
   public long readI64() throws TException {
-    if (DEBUG) LOG.debug("readI64()");
+    LOG.debug("readI64()");
     return next().readI64();
   }
 
   public double readDouble() throws TException {
-    if (DEBUG) LOG.debug("readDouble()");
+    LOG.debug("readDouble()");
     return next().readDouble();
   }
 
   public String readString() throws TException {
-    if (DEBUG) LOG.debug("readString()");
+    LOG.debug("readString()");
     return next().readString();
   }
 
   public ByteBuffer readBinary() throws TException {
-    if (DEBUG) LOG.debug("readBinary()");
+    LOG.debug("readBinary()");
     return next().readBinary();
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetWriteProtocol.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetWriteProtocol.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetWriteProtocol.java
index 40984cc..8755ee4 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetWriteProtocol.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ParquetWriteProtocol.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.thrift;
 
-import static org.apache.parquet.Log.DEBUG;
 
 import java.nio.ByteBuffer;
 
@@ -32,7 +31,6 @@ import org.apache.thrift.protocol.TSet;
 import org.apache.thrift.protocol.TStruct;
 import org.apache.thrift.protocol.TType;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.io.ColumnIO;
 import org.apache.parquet.io.GroupColumnIO;
 import org.apache.parquet.io.MessageColumnIO;
@@ -48,6 +46,8 @@ import org.apache.parquet.thrift.struct.ThriftType.ListType;
 import org.apache.parquet.thrift.struct.ThriftType.MapType;
 import org.apache.parquet.thrift.struct.ThriftType.SetType;
 import org.apache.parquet.thrift.struct.ThriftType.StructType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ParquetWriteProtocol extends ParquetProtocol {
 
@@ -409,7 +409,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
 
   }
 
-  private static final Log LOG = Log.getLog(ParquetWriteProtocol.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetWriteProtocol.class);
 
 
   private final RecordConsumer recordConsumer;
@@ -438,7 +438,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeMessageBegin(TMessage message) throws TException {
-    if (DEBUG) LOG.debug("writeMessageBegin("+message+")");
+    LOG.debug("writeMessageBegin({})", message);
     currentProtocol.writeMessageBegin(message);
   }
 
@@ -448,7 +448,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeMessageEnd() throws TException {
-    if (DEBUG) LOG.debug("writeMessageEnd()");
+    LOG.debug("writeMessageEnd()");
     currentProtocol.writeMessageEnd();
   }
 
@@ -458,7 +458,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeStructBegin(TStruct struct) throws TException {
-    if (DEBUG) LOG.debug("writeStructBegin("+toString(struct)+")");
+    if (LOG.isDebugEnabled()) LOG.debug("writeStructBegin("+toString(struct)+")");
     currentProtocol.writeStructBegin(struct);
   }
 
@@ -468,7 +468,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeStructEnd() throws TException {
-    if (DEBUG) LOG.debug("writeStructEnd()");
+    LOG.debug("writeStructEnd()");
     currentProtocol.writeStructEnd();
   }
 
@@ -478,7 +478,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeFieldBegin(TField field) throws TException {
-    if (DEBUG) LOG.debug("writeFieldBegin("+field+")");
+    LOG.debug("writeFieldBegin({})", field);
     currentProtocol.writeFieldBegin(field);
   }
 
@@ -488,7 +488,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeFieldEnd() throws TException {
-    if (DEBUG) LOG.debug("writeFieldEnd()");
+    LOG.debug("writeFieldEnd()");
     currentProtocol.writeFieldEnd();
   }
 
@@ -498,7 +498,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeFieldStop() throws TException {
-    if (DEBUG) LOG.debug("writeFieldStop()");
+    LOG.debug("writeFieldStop()");
     currentProtocol.writeFieldStop();
   }
 
@@ -508,7 +508,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeMapBegin(TMap map) throws TException {
-    if (DEBUG) LOG.debug("writeMapBegin("+toString(map)+")");
+    if (LOG.isDebugEnabled()) LOG.debug("writeMapBegin("+toString(map)+")");
     currentProtocol.writeMapBegin(map);
   }
 
@@ -518,7 +518,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeMapEnd() throws TException {
-    if (DEBUG) LOG.debug("writeMapEnd()");
+    LOG.debug("writeMapEnd()");
     currentProtocol.writeMapEnd();
   }
 
@@ -528,7 +528,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeListBegin(TList list) throws TException {
-    if (DEBUG) LOG.debug("writeListBegin("+toString(list)+")");
+    if (LOG.isDebugEnabled()) LOG.debug("writeListBegin("+toString(list)+")");
     currentProtocol.writeListBegin(list);
   }
 
@@ -539,7 +539,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeListEnd() throws TException {
-    if (DEBUG) LOG.debug("writeListEnd()");
+    LOG.debug("writeListEnd()");
     currentProtocol.writeListEnd();
   }
 
@@ -550,7 +550,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeSetBegin(TSet set) throws TException {
-    if (DEBUG) LOG.debug("writeSetBegin("+set+")");
+    LOG.debug("writeSetBegin({})", set);
     currentProtocol.writeSetBegin(set);
   }
 
@@ -560,7 +560,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeSetEnd() throws TException {
-    if (DEBUG) LOG.debug("writeSetEnd()");
+    LOG.debug("writeSetEnd()");
     currentProtocol.writeSetEnd();
   }
 
@@ -570,7 +570,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeBool(boolean b) throws TException {
-    if (DEBUG) LOG.debug("writeBool("+b+")");
+    LOG.debug("writeBool({})", b);
     currentProtocol.writeBool(b);
   }
 
@@ -580,7 +580,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeByte(byte b) throws TException {
-    if (DEBUG) LOG.debug("writeByte("+b+")");
+    LOG.debug("writeByte({})", b);
     currentProtocol.writeByte(b);
   }
 
@@ -590,7 +590,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeI16(short i16) throws TException {
-    if (DEBUG) LOG.debug("writeI16("+i16+")");
+    LOG.debug("writeI16({})", i16);
     currentProtocol.writeI16(i16);
   }
 
@@ -600,7 +600,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeI32(int i32) throws TException {
-    if (DEBUG) LOG.debug("writeI32("+i32+")");
+    LOG.debug("writeI32({})", i32);
     currentProtocol.writeI32(i32);
   }
 
@@ -610,7 +610,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeI64(long i64) throws TException {
-    if (DEBUG) LOG.debug("writeI64("+i64+")");
+    LOG.debug("writeI64({})", i64);
     currentProtocol.writeI64(i64);
   }
 
@@ -620,7 +620,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeDouble(double dub) throws TException {
-    if (DEBUG) LOG.debug("writeDouble("+dub+")");
+    LOG.debug("writeDouble({})", dub);
     currentProtocol.writeDouble(dub);
   }
 
@@ -630,7 +630,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeString(String str) throws TException {
-    if (DEBUG) LOG.debug("writeString("+str+")");
+    LOG.debug("writeString({})", str);
     currentProtocol.writeString(str);
   }
 
@@ -640,7 +640,7 @@ public class ParquetWriteProtocol extends ParquetProtocol {
    */
   @Override
   public void writeBinary(ByteBuffer buf) throws TException {
-    if (DEBUG) LOG.debug("writeBinary("+buf+")");
+    LOG.debug("writeBinary({})", buf);
     currentProtocol.writeBinary(buf);
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftMetaData.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftMetaData.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftMetaData.java
index a7628cc..f61c311 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftMetaData.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftMetaData.java
@@ -19,11 +19,12 @@
 package org.apache.parquet.thrift;
 import java.util.*;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.BadConfigurationException;
 import org.apache.parquet.thrift.struct.ThriftType;
 import org.apache.parquet.thrift.struct.ThriftType.StructType;
 import org.apache.thrift.TBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  *
@@ -33,7 +34,7 @@ import org.apache.thrift.TBase;
  *
  */
 public class ThriftMetaData {
-  private static final Log LOG = Log.getLog(ThriftMetaData.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ThriftMetaData.class);
 
   private static final String THRIFT_CLASS = "thrift.class";
   private static final String THRIFT_DESCRIPTOR = "thrift.descriptor";

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftRecordConverter.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftRecordConverter.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftRecordConverter.java
index 3160d5f..0bc0455 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftRecordConverter.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/ThriftRecordConverter.java
@@ -34,7 +34,6 @@ import org.apache.thrift.protocol.TSet;
 import org.apache.thrift.protocol.TStruct;
 import org.apache.thrift.protocol.TType;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.Preconditions;
 import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.io.api.Binary;
@@ -56,6 +55,8 @@ import org.apache.parquet.thrift.struct.ThriftType.MapType;
 import org.apache.parquet.thrift.struct.ThriftType.SetType;
 import org.apache.parquet.thrift.struct.ThriftType.StructType;
 import org.apache.parquet.thrift.struct.ThriftTypeID;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * converts the columnar events into a Thrift protocol.
@@ -66,7 +67,7 @@ import org.apache.parquet.thrift.struct.ThriftTypeID;
  */
 public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
 
-  private static final Log LOG = Log.getLog(ThriftRecordConverter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ThriftRecordConverter.class);
 
   public static final String IGNORE_NULL_LIST_ELEMENTS =
       "parquet.thrift.ignore-null-elements";

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/StrictFieldProjectionFilter.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/StrictFieldProjectionFilter.java b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/StrictFieldProjectionFilter.java
index 645ae96..b048f16 100644
--- a/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/StrictFieldProjectionFilter.java
+++ b/parquet-thrift/src/main/java/org/apache/parquet/thrift/projection/StrictFieldProjectionFilter.java
@@ -21,9 +21,10 @@ package org.apache.parquet.thrift.projection;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.Strings;
 import org.apache.parquet.glob.WildcardPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Stricter Implementation of {@link FieldProjectionFilter}.
@@ -38,7 +39,7 @@ import org.apache.parquet.glob.WildcardPath;
  * throw when {@link #assertNoUnmatchedPatterns()} is called.
  */
 public class StrictFieldProjectionFilter implements FieldProjectionFilter {
-  private static final Log LOG = Log.getLog(FieldProjectionFilter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(FieldProjectionFilter.class);
   private static final String GLOB_SEPARATOR = ";";
 
   // use a list instead of a Set, so we can detect overlapping patterns and

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestInputOutputFormat.java b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestInputOutputFormat.java
index 0835cdb..af8e60d 100644
--- a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestInputOutputFormat.java
+++ b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestInputOutputFormat.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
 import org.apache.thrift.TBase;
 import org.junit.Test;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.example.data.Group;
 import org.apache.parquet.hadoop.metadata.CompressionCodecName;
 import org.apache.parquet.thrift.test.compat.StructV1;
@@ -56,9 +55,11 @@ import com.twitter.data.proto.tutorial.thrift.AddressBook;
 import com.twitter.data.proto.tutorial.thrift.Name;
 import com.twitter.data.proto.tutorial.thrift.Person;
 import com.twitter.data.proto.tutorial.thrift.PhoneNumber;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestInputOutputFormat {
-  private static final Log LOG = Log.getLog(TestInputOutputFormat.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestInputOutputFormat.class);
 
   public static AddressBook nextAddressbook(int i) {
     final ArrayList<Person> persons = new ArrayList<Person>();
@@ -245,10 +246,10 @@ public class TestInputOutputFormat {
   public static void waitForJob(Job job) throws Exception {
     job.submit();
     while (!job.isComplete()) {
-      LOG.debug("waiting for job " + job.getJobName());
+      LOG.debug("waiting for job {}", job.getJobName());
       sleep(100);
     }
-    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
+    LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
     if (!job.isSuccessful()) {
       throw new RuntimeException("job failed " + job.getJobName());
     }


[4/4] parquet-mr git commit: PARQUET-423: Replace old Log class with SLF4J Logging

Posted by ju...@apache.org.
PARQUET-423: Replace old Log class with SLF4J Logging

And make writing files less noisy

Author: Niels Basjes <nb...@bol.com>

Closes #369 from nielsbasjes/PARQUET-423-2 and squashes the following commits:

b31e30f [Niels Basjes] Merge branch 'master' of github.com:apache/parquet-mr into PARQUET-423-2
2d4db4b [Niels Basjes] Merge branch 'PARQUET-423-2' of github.com:nielsbasjes/parquet-mr into PARQUET-423-2
49fcaa7 [Niels Basjes] PARQUET-423: Remove debug logging statements in high performance sections during build time
aaaf4a6 [Niels Basjes] Merge branch 'PARQUET-423-2' of github.com:nielsbasjes/parquet-mr into PARQUET-423-2
745666e [Niels Basjes] Undo needless change
94e0c7a [Niels Basjes] PARQUET-423: Further optimize logging performance
b72f924 [Niels Basjes] PARQUET-423: Improved the performance
cb7eb61 [Niels Basjes] PARQUET-423: Workaround AVRO errors
7d161b3 [Niels Basjes] PARQUET-423: Restore the old (obsolete) Log class
05d6a47 [Niels Basjes] PARQUET-423: Replace old Log class with SLF4J Logging
692ebfb [Niels Basjes] Undo needless change
f1ede3d [Niels Basjes] PARQUET-423: Further optimize logging performance
a0c6b59 [Niels Basjes] PARQUET-423: Improved the performance
67bef9b [Niels Basjes] PARQUET-423: Workaround AVRO errors
87cd64f [Niels Basjes] PARQUET-423: Restore the old (obsolete) Log class
96d97d5 [Niels Basjes] PARQUET-423: Replace old Log class with SLF4J Logging


Project: http://git-wip-us.apache.org/repos/asf/parquet-mr/repo
Commit: http://git-wip-us.apache.org/repos/asf/parquet-mr/commit/df9d8e41
Tree: http://git-wip-us.apache.org/repos/asf/parquet-mr/tree/df9d8e41
Diff: http://git-wip-us.apache.org/repos/asf/parquet-mr/diff/df9d8e41

Branch: refs/heads/master
Commit: df9d8e415436292ae33e1ca0b8da256640de9710
Parents: aa416b5
Author: Niels Basjes <nb...@bol.com>
Authored: Wed Oct 26 09:09:56 2016 -0700
Committer: Julien Le Dem <ju...@dremio.com>
Committed: Wed Oct 26 09:09:56 2016 -0700

----------------------------------------------------------------------
 .../parquet/avro/TestInputOutputFormat.java     |  9 ++--
 .../avro/TestReflectInputOutputFormat.java      |  9 ++--
 .../avro/TestSpecificInputOutputFormat.java     |  9 ++--
 .../apache/parquet/CorruptDeltaByteArrays.java  | 11 ++--
 .../org/apache/parquet/CorruptStatistics.java   |  4 +-
 .../parquet/column/impl/ColumnReaderImpl.java   | 20 ++++----
 .../parquet/column/impl/ColumnWriterV1.java     | 14 ++---
 .../parquet/column/impl/ColumnWriterV2.java     | 16 +++---
 .../bitpacking/BitPackingValuesReader.java      |  7 +--
 .../bitpacking/ByteBitPackingValuesReader.java  |  7 +--
 .../DeltaLengthByteArrayValuesReader.java       |  8 +--
 .../DeltaLengthByteArrayValuesWriter.java       |  7 +--
 .../dictionary/DictionaryValuesReader.java      | 11 ++--
 .../dictionary/DictionaryValuesWriter.java      | 11 ++--
 .../values/plain/BinaryPlainValuesReader.java   |  8 +--
 .../values/plain/BooleanPlainValuesReader.java  |  8 +--
 .../FixedLenByteArrayPlainValuesReader.java     |  9 ++--
 .../FixedLenByteArrayPlainValuesWriter.java     |  7 +--
 .../column/values/plain/PlainValuesReader.java  |  9 ++--
 .../column/values/plain/PlainValuesWriter.java  |  7 +--
 .../rle/RunLengthBitPackingHybridDecoder.java   | 12 ++---
 .../rle/RunLengthBitPackingHybridEncoder.java   | 12 ++---
 .../org/apache/parquet/example/data/Group.java  | 10 ++--
 .../parquet/filter2/compat/FilterCompat.java    |  9 ++--
 .../org/apache/parquet/io/BaseRecordReader.java | 28 +++++-----
 .../java/org/apache/parquet/io/ColumnIO.java    |  3 --
 .../org/apache/parquet/io/GroupColumnIO.java    |  5 +-
 .../org/apache/parquet/io/MessageColumnIO.java  | 54 +++++++++++---------
 .../io/RecordConsumerLoggingWrapper.java        | 49 +++++++++---------
 .../parquet/io/RecordReaderImplementation.java  |  7 +--
 .../parquet/io/ValidatingRecordConsumer.java    | 10 ++--
 .../parquet/schema/MessageTypeParser.java       |  5 +-
 .../parquet/column/mem/TestMemColumn.java       |  9 ++--
 .../parquet/column/page/mem/MemPageReader.java  |  8 +--
 .../parquet/column/page/mem/MemPageStore.java   | 17 +++---
 .../parquet/column/page/mem/MemPageWriter.java  | 27 +++++-----
 .../values/bitpacking/TestBitPackingColumn.java | 11 ++--
 .../java/org/apache/parquet/io/PerfTest.java    |  5 --
 .../org/apache/parquet/io/TestColumnIO.java     |  7 +--
 .../java/org/apache/parquet/Closeables.java     |  5 +-
 .../org/apache/parquet/bytes/BytesUtils.java    |  7 +--
 .../test/java/org/apache/parquet/TestLog.java   | 31 -----------
 .../org/apache/parquet/bytes/BytesInput.java    | 26 +++++-----
 .../bytes/CapacityByteArrayOutputStream.java    | 12 +++--
 .../bitpacking/ByteBasedBitPackingEncoder.java  | 14 ++---
 .../values/bitpacking/TestBitPacking.java       |  9 ++--
 .../values/bitpacking/TestByteBitPacking.java   | 43 ++++++++--------
 .../values/bitpacking/TestLemireBitPacking.java | 17 +++---
 .../dictionarylevel/DictionaryFilter.java       |  7 +--
 .../converter/ParquetMetadataConverter.java     |  9 ++--
 .../hadoop/ColumnChunkPageReadStore.java        |  5 +-
 .../hadoop/ColumnChunkPageWriteStore.java       | 10 ++--
 .../parquet/hadoop/DirectCodecFactory.java      | 11 ++--
 .../hadoop/InternalParquetRecordReader.java     | 28 +++++-----
 .../hadoop/InternalParquetRecordWriter.java     | 16 +++---
 .../org/apache/parquet/hadoop/LruCache.java     | 34 ++++++------
 .../apache/parquet/hadoop/MemoryManager.java    |  7 +--
 .../parquet/hadoop/ParquetFileReader.java       | 36 +++++--------
 .../parquet/hadoop/ParquetFileWriter.java       | 41 +++++++--------
 .../parquet/hadoop/ParquetInputFormat.java      | 30 +++++------
 .../parquet/hadoop/ParquetOutputCommitter.java  |  5 +-
 .../parquet/hadoop/ParquetOutputFormat.java     | 28 +++++-----
 .../parquet/hadoop/ParquetRecordReader.java     |  5 +-
 .../hadoop/UnmaterializableRecordCounter.java   |  5 +-
 .../parquet/hadoop/codec/CodecConfig.java       | 18 +++----
 .../parquet/hadoop/util/HadoopStreams.java      |  6 ++-
 .../parquet/hadoop/util/SerializationUtil.java  |  5 +-
 .../parquet/hadoop/TestParquetFileWriter.java   |  7 +--
 .../hadoop/example/TestInputOutputFormat.java   |  9 ++--
 .../src/test/resources/log4j.properties         | 24 +++++++++
 .../parquet/hive/internal/Hive010Binding.java   |  5 +-
 .../parquet/hive/internal/Hive012Binding.java   |  5 +-
 .../apache/parquet/hive/HiveBindingFactory.java | 15 +++---
 .../io/parquet/MapredParquetOutputFormat.java   |  8 +--
 .../read/ParquetRecordReaderWrapper.java        |  6 +--
 .../write/ParquetRecordWriterWrapper.java       | 10 ++--
 .../org/apache/parquet/pig/ParquetLoader.java   | 32 ++++++------
 .../apache/parquet/pig/PigSchemaConverter.java  | 18 +++----
 .../apache/parquet/pig/TupleReadSupport.java    |  5 +-
 .../java/org/apache/parquet/pig/PerfTest2.java  |  8 ++-
 .../parquet/pig/TestTupleRecordConsumer.java    | 13 ++---
 .../parquet/pig/TupleConsumerPerfTest.java      |  1 -
 .../apache/parquet/proto/ProtoReadSupport.java  | 11 ++--
 .../parquet/proto/ProtoSchemaConverter.java     |  5 +-
 .../apache/parquet/proto/ProtoWriteSupport.java |  5 +-
 .../parquet/proto/utils/WriteUsingMR.java       |  9 ++--
 .../thrift/AbstractThriftWriteSupport.java      |  5 +-
 .../hadoop/thrift/ThriftReadSupport.java        |  9 ++--
 .../parquet/thrift/ParquetReadProtocol.java     | 47 +++++++++--------
 .../parquet/thrift/ParquetWriteProtocol.java    | 48 ++++++++---------
 .../apache/parquet/thrift/ThriftMetaData.java   |  5 +-
 .../parquet/thrift/ThriftRecordConverter.java   |  5 +-
 .../projection/StrictFieldProjectionFilter.java |  5 +-
 .../hadoop/thrift/TestInputOutputFormat.java    |  9 ++--
 ...stParquetToThriftReadWriteAndProjection.java |  7 +--
 .../thrift/TestThriftToParquetFileWriter.java   |  8 +--
 .../parquet/thrift/TestParquetReadProtocol.java |  7 +--
 .../thrift/TestParquetWriteProtocol.java        | 13 ++---
 .../parquet/tools/command/MergeCommand.java     |  2 -
 pom.xml                                         |  1 -
 100 files changed, 670 insertions(+), 646 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-avro/src/test/java/org/apache/parquet/avro/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-avro/src/test/java/org/apache/parquet/avro/TestInputOutputFormat.java b/parquet-avro/src/test/java/org/apache/parquet/avro/TestInputOutputFormat.java
index 36c090f..7ba6c9b 100644
--- a/parquet-avro/src/test/java/org/apache/parquet/avro/TestInputOutputFormat.java
+++ b/parquet-avro/src/test/java/org/apache/parquet/avro/TestInputOutputFormat.java
@@ -36,14 +36,15 @@ import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
 import org.junit.Test;
-import org.apache.parquet.Log;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static java.lang.Thread.sleep;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 
 public class TestInputOutputFormat {
-  private static final Log LOG = Log.getLog(TestInputOutputFormat.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestInputOutputFormat.class);
 
   private static Schema avroSchema;
   static {
@@ -132,10 +133,10 @@ public class TestInputOutputFormat {
   private void waitForJob(Job job) throws Exception {
     job.submit();
     while (!job.isComplete()) {
-      LOG.debug("waiting for job " + job.getJobName());
+      LOG.debug("waiting for job {}", job.getJobName());
       sleep(100);
     }
-    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
+    LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
     if (!job.isSuccessful()) {
       throw new RuntimeException("job failed " + job.getJobName());
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-avro/src/test/java/org/apache/parquet/avro/TestReflectInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-avro/src/test/java/org/apache/parquet/avro/TestReflectInputOutputFormat.java b/parquet-avro/src/test/java/org/apache/parquet/avro/TestReflectInputOutputFormat.java
index 3e1d32e..729f24a 100644
--- a/parquet-avro/src/test/java/org/apache/parquet/avro/TestReflectInputOutputFormat.java
+++ b/parquet-avro/src/test/java/org/apache/parquet/avro/TestReflectInputOutputFormat.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnReader;
 import org.apache.parquet.filter.ColumnPredicates;
 import org.apache.parquet.filter.ColumnRecordFilter;
@@ -46,6 +45,8 @@ import org.apache.parquet.filter.UnboundRecordFilter;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static java.lang.Thread.sleep;
 import static org.junit.Assert.assertArrayEquals;
@@ -55,7 +56,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 public class TestReflectInputOutputFormat {
-  private static final Log LOG = Log.getLog(TestReflectInputOutputFormat.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestReflectInputOutputFormat.class);
 
 
   public static class Service {
@@ -477,10 +478,10 @@ public class TestReflectInputOutputFormat {
   private void waitForJob(Job job) throws Exception {
     job.submit();
     while (!job.isComplete()) {
-      LOG.debug("waiting for job " + job.getJobName());
+      LOG.debug("waiting for job {}", job.getJobName());
       sleep(100);
     }
-    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
+    LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
     if (!job.isSuccessful()) {
       throw new RuntimeException("job failed " + job.getJobName());
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-avro/src/test/java/org/apache/parquet/avro/TestSpecificInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-avro/src/test/java/org/apache/parquet/avro/TestSpecificInputOutputFormat.java b/parquet-avro/src/test/java/org/apache/parquet/avro/TestSpecificInputOutputFormat.java
index 17a0af1..a0b58f3 100644
--- a/parquet-avro/src/test/java/org/apache/parquet/avro/TestSpecificInputOutputFormat.java
+++ b/parquet-avro/src/test/java/org/apache/parquet/avro/TestSpecificInputOutputFormat.java
@@ -39,15 +39,16 @@ import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnReader;
 import org.apache.parquet.filter.ColumnPredicates;
 import org.apache.parquet.filter.ColumnRecordFilter;
 import org.apache.parquet.filter.RecordFilter;
 import org.apache.parquet.filter.UnboundRecordFilter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestSpecificInputOutputFormat {
-  private static final Log LOG = Log.getLog(TestSpecificInputOutputFormat.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestSpecificInputOutputFormat.class);
 
   public static Car nextRecord(int i) {
     String vin = "1VXBR12EXCP000000";
@@ -268,10 +269,10 @@ public class TestSpecificInputOutputFormat {
   private void waitForJob(Job job) throws Exception {
     job.submit();
     while (!job.isComplete()) {
-      LOG.debug("waiting for job " + job.getJobName());
+      LOG.debug("waiting for job {}", job.getJobName());
       sleep(100);
     }
-    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
+    LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
     if (!job.isSuccessful()) {
       throw new RuntimeException("job failed " + job.getJobName());
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/CorruptDeltaByteArrays.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/CorruptDeltaByteArrays.java b/parquet-column/src/main/java/org/apache/parquet/CorruptDeltaByteArrays.java
index 258c9ee..d19e489 100644
--- a/parquet-column/src/main/java/org/apache/parquet/CorruptDeltaByteArrays.java
+++ b/parquet-column/src/main/java/org/apache/parquet/CorruptDeltaByteArrays.java
@@ -20,9 +20,11 @@ package org.apache.parquet;
 
 import org.apache.parquet.VersionParser.ParsedVersion;
 import org.apache.parquet.column.Encoding;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class CorruptDeltaByteArrays {
-  private static final Log LOG = Log.getLog(CorruptStatistics.class);
+  private static final Logger LOG = LoggerFactory.getLogger(CorruptStatistics.class);
 
   private static final SemanticVersion PARQUET_246_FIXED_VERSION =
       new SemanticVersion(1, 8, 0);
@@ -43,7 +45,7 @@ public class CorruptDeltaByteArrays {
 
     if (!version.hasSemanticVersion()) {
       LOG.warn("Requiring sequential reads because created_by did not " +
-          "contain a valid version (see PARQUET-246): " + version.version);
+          "contain a valid version (see PARQUET-246): {}", version.version);
       return true;
     }
 
@@ -61,7 +63,7 @@ public class CorruptDeltaByteArrays {
 
     if (semver.compareTo(PARQUET_246_FIXED_VERSION) < 0) {
       LOG.info("Requiring sequential reads because this file was created " +
-          "prior to " + PARQUET_246_FIXED_VERSION + ". See PARQUET-246" );
+          "prior to {}. See PARQUET-246", PARQUET_246_FIXED_VERSION );
       return true;
     }
 
@@ -75,8 +77,7 @@ public class CorruptDeltaByteArrays {
     }
 
     if (Strings.isNullOrEmpty(createdBy)) {
-      LOG.info("Requiring sequential reads because file version is empty. " +
-          "See PARQUET-246");
+      LOG.info("Requiring sequential reads because file version is empty. See PARQUET-246");
       return true;
     }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java b/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java
index 3b90338..3e3aa3c 100644
--- a/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java
+++ b/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java
@@ -24,6 +24,8 @@ import org.apache.parquet.SemanticVersion.SemanticVersionParseException;
 import org.apache.parquet.VersionParser.ParsedVersion;
 import org.apache.parquet.VersionParser.VersionParseException;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * There was a bug (PARQUET-251) that caused the statistics metadata
@@ -35,7 +37,7 @@ import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
 public class CorruptStatistics {
   private static final AtomicBoolean alreadyLogged = new AtomicBoolean(false);
 
-  private static final Log LOG = Log.getLog(CorruptStatistics.class);
+  private static final Logger LOG = LoggerFactory.getLogger(CorruptStatistics.class);
 
   // the version in which the bug described by jira: PARQUET-251 was fixed
   // the bug involved writing invalid binary statistics, so stats written prior to this

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReaderImpl.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReaderImpl.java b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReaderImpl.java
index 3fc327e..931b4b1 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReaderImpl.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnReaderImpl.java
@@ -19,7 +19,6 @@
 package org.apache.parquet.column.impl;
 
 import static java.lang.String.format;
-import static org.apache.parquet.Log.DEBUG;
 import static org.apache.parquet.Preconditions.checkNotNull;
 import static org.apache.parquet.column.ValuesType.DEFINITION_LEVEL;
 import static org.apache.parquet.column.ValuesType.REPETITION_LEVEL;
@@ -30,7 +29,6 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.parquet.CorruptDeltaByteArrays;
-import org.apache.parquet.Log;
 import org.apache.parquet.VersionParser.ParsedVersion;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.bytes.BytesUtils;
@@ -51,6 +49,8 @@ import org.apache.parquet.io.api.Binary;
 import org.apache.parquet.io.api.PrimitiveConverter;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeNameConverter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * ColumnReader implementation
@@ -59,7 +59,7 @@ import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeNameConverter;
  *
  */
 public class ColumnReaderImpl implements ColumnReader {
-  private static final Log LOG = Log.getLog(ColumnReaderImpl.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ColumnReaderImpl.class);
 
   /**
    * binds the lower level page decoder to the record converter materializing the records
@@ -523,7 +523,7 @@ public class ColumnReaderImpl implements ColumnReader {
   private void checkRead() {
     if (isPageFullyConsumed()) {
       if (isFullyConsumed()) {
-        if (DEBUG) LOG.debug("end reached");
+        LOG.debug("end reached");
         repetitionLevel = 0; // the next repetition level
         return;
       }
@@ -533,7 +533,7 @@ public class ColumnReaderImpl implements ColumnReader {
   }
 
   private void readPage() {
-    if (DEBUG) LOG.debug("loading page");
+    LOG.debug("loading page");
     DataPage page = pageReader.readPage();
     page.accept(new DataPage.Visitor<Void>() {
       @Override
@@ -590,14 +590,14 @@ public class ColumnReaderImpl implements ColumnReader {
     this.definitionLevelColumn = new ValuesReaderIntIterator(dlReader);
     try {
       ByteBuffer bytes = page.getBytes().toByteBuffer();
-      if (DEBUG) LOG.debug("page size " + bytes.remaining() + " bytes and " + pageValueCount + " records");
-      if (DEBUG) LOG.debug("reading repetition levels at 0");
+      LOG.debug("page size {} bytes and {} records", bytes.remaining(), pageValueCount);
+      LOG.debug("reading repetition levels at 0");
       rlReader.initFromPage(pageValueCount, bytes, 0);
       int next = rlReader.getNextOffset();
-      if (DEBUG) LOG.debug("reading definition levels at " + next);
+      LOG.debug("reading definition levels at {}", next);
       dlReader.initFromPage(pageValueCount, bytes, next);
       next = dlReader.getNextOffset();
-      if (DEBUG) LOG.debug("reading data at " + next);
+      LOG.debug("reading data at {}", next);
       initDataReader(page.getValueEncoding(), bytes, next, page.getValueCount());
     } catch (IOException e) {
       throw new ParquetDecodingException("could not read page " + page + " in col " + path, e);
@@ -608,7 +608,7 @@ public class ColumnReaderImpl implements ColumnReader {
     this.repetitionLevelColumn = newRLEIterator(path.getMaxRepetitionLevel(), page.getRepetitionLevels());
     this.definitionLevelColumn = newRLEIterator(path.getMaxDefinitionLevel(), page.getDefinitionLevels());
     try {
-      if (DEBUG) LOG.debug("page data size " + page.getData().size() + " bytes and " + pageValueCount + " records");
+      LOG.debug("page data size {} bytes and {} records", page.getData().size(), pageValueCount);
       initDataReader(page.getDataEncoding(), page.getData().toByteBuffer(), 0, page.getValueCount());
     } catch (IOException e) {
       throw new ParquetDecodingException("could not read page " + page + " in col " + path, e);

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV1.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV1.java b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV1.java
index dc6ebec..c5b3884 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV1.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV1.java
@@ -22,7 +22,6 @@ import static org.apache.parquet.bytes.BytesInput.concat;
 
 import java.io.IOException;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.ColumnWriter;
 import org.apache.parquet.column.ParquetProperties;
@@ -32,8 +31,8 @@ import org.apache.parquet.column.statistics.Statistics;
 import org.apache.parquet.column.values.ValuesWriter;
 import org.apache.parquet.io.ParquetEncodingException;
 import org.apache.parquet.io.api.Binary;
-
-import static java.lang.Math.max;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Writes (repetition level, definition level, value) triplets and deals with writing pages to the underlying layer.
@@ -42,8 +41,11 @@ import static java.lang.Math.max;
  *
  */
 final class ColumnWriterV1 implements ColumnWriter {
-  private static final Log LOG = Log.getLog(ColumnWriterV1.class);
-  private static final boolean DEBUG = Log.DEBUG;
+  private static final Logger LOG = LoggerFactory.getLogger(ColumnWriterV1.class);
+
+  // By default: Debugging disabled this way (using the "if (DEBUG)" IN the methods) to allow
+  // the java compiler (not the JIT) to remove the unused statements during build time.
+  private static final boolean DEBUG = false;
 
   private final ColumnDescriptor path;
   private final PageWriter pageWriter;
@@ -74,7 +76,7 @@ final class ColumnWriterV1 implements ColumnWriter {
   }
 
   private void log(Object value, int r, int d) {
-    LOG.debug(path + " " + value + " r:" + r + " d:" + d);
+    if (DEBUG) LOG.debug( "{} {} r:{} d:{}", path, value, r, d);
   }
 
   private void resetStatistics() {

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV2.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV2.java b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV2.java
index 396d53a..c6fd91b 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV2.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/impl/ColumnWriterV2.java
@@ -18,15 +18,10 @@
  */
 package org.apache.parquet.column.impl;
 
-import static java.lang.Math.max;
-import static org.apache.parquet.bytes.BytesUtils.getWidthFromMaxInt;
-
 import java.io.IOException;
 
 import org.apache.parquet.Ints;
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesInput;
-import org.apache.parquet.bytes.CapacityByteArrayOutputStream;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.ColumnWriter;
 import org.apache.parquet.column.Encoding;
@@ -38,6 +33,8 @@ import org.apache.parquet.column.values.ValuesWriter;
 import org.apache.parquet.column.values.rle.RunLengthBitPackingHybridEncoder;
 import org.apache.parquet.io.ParquetEncodingException;
 import org.apache.parquet.io.api.Binary;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Writes (repetition level, definition level, value) triplets and deals with writing pages to the underlying layer.
@@ -46,8 +43,11 @@ import org.apache.parquet.io.api.Binary;
  *
  */
 final class ColumnWriterV2 implements ColumnWriter {
-  private static final Log LOG = Log.getLog(ColumnWriterV2.class);
-  private static final boolean DEBUG = Log.DEBUG;
+  private static final Logger LOG = LoggerFactory.getLogger(ColumnWriterV2.class);
+
+  // By default: Debugging disabled this way (using the "if (DEBUG)" IN the methods) to allow
+  // the java compiler (not the JIT) to remove the unused statements during build time.
+  private static final boolean DEBUG = false;
 
   private final ColumnDescriptor path;
   private final PageWriter pageWriter;
@@ -73,7 +73,7 @@ final class ColumnWriterV2 implements ColumnWriter {
   }
 
   private void log(Object value, int r, int d) {
-    LOG.debug(path + " " + value + " r:" + r + " d:" + d);
+    LOG.debug("{} {} r:{} d:{}", path, value, r, d);
   }
 
   private void resetStatistics() {

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesReader.java
index f540c39..a5608cb 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/BitPackingValuesReader.java
@@ -25,11 +25,12 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.parquet.bytes.ByteBufferInputStream;
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesUtils;
 import org.apache.parquet.column.values.ValuesReader;
 import org.apache.parquet.column.values.bitpacking.BitPacking.BitPackingReader;
 import org.apache.parquet.io.ParquetDecodingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * a column reader that packs the ints in the number of bits required based on the maximum size.
@@ -38,7 +39,7 @@ import org.apache.parquet.io.ParquetDecodingException;
  *
  */
 public class BitPackingValuesReader extends ValuesReader {
-  private static final Log LOG = Log.getLog(BitPackingValuesReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(BitPackingValuesReader.class);
 
   private ByteBufferInputStream in;
   private BitPackingReader bitPackingReader;
@@ -73,7 +74,7 @@ public class BitPackingValuesReader extends ValuesReader {
   public void initFromPage(int valueCount, ByteBuffer in, int offset) throws IOException {
     int effectiveBitLength = valueCount * bitsPerValue;
     int length = BytesUtils.paddedByteCountFromBits(effectiveBitLength);
-    if (Log.DEBUG) LOG.debug("reading " + length + " bytes for " + valueCount + " values of size " + bitsPerValue + " bits." );
+    LOG.debug("reading {} bytes for {} values of size {} bits.", length, valueCount, bitsPerValue);
     this.in = new ByteBufferInputStream(in, offset, length);
     this.bitPackingReader = createBitPackingReader(bitsPerValue, this.in, valueCount);
     this.nextOffset = offset + length;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBitPackingValuesReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBitPackingValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBitPackingValuesReader.java
index f4c8c8e..7c19340 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBitPackingValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBitPackingValuesReader.java
@@ -22,14 +22,15 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.nio.ByteBuffer;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesUtils;
 import org.apache.parquet.column.values.ValuesReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ByteBitPackingValuesReader extends ValuesReader {
   private static final int VALUES_AT_A_TIME = 8; // because we're using unpack8Values()
 
-  private static final Log LOG = Log.getLog(ByteBitPackingValuesReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ByteBitPackingValuesReader.class);
 
   private final int bitWidth;
   private final BytePacker packer;
@@ -69,7 +70,7 @@ public class ByteBitPackingValuesReader extends ValuesReader {
       throws IOException {
     int effectiveBitLength = valueCount * bitWidth;
     int length = BytesUtils.paddedByteCountFromBits(effectiveBitLength); // ceil
-    if (Log.DEBUG) LOG.debug("reading " + length + " bytes for " + valueCount + " values of size " + bitWidth + " bits." );
+    LOG.debug("reading {} bytes for {} values of size {} bits.", length, valueCount, bitWidth);
     this.encoded = page;
     this.encodedPos = offset;
     this.decodedPosition = VALUES_AT_A_TIME - 1;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesReader.java
index 41f221d..d810ba8 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesReader.java
@@ -18,15 +18,15 @@
  */
 package org.apache.parquet.column.values.deltalengthbytearray;
 
-import static org.apache.parquet.Log.DEBUG;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.values.ValuesReader;
 import org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesReader;
 import org.apache.parquet.io.api.Binary;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Reads binary data written by {@link DeltaLengthByteArrayValuesWriter}
@@ -36,7 +36,7 @@ import org.apache.parquet.io.api.Binary;
  */
 public class DeltaLengthByteArrayValuesReader extends ValuesReader {
 
-  private static final Log LOG = Log.getLog(DeltaLengthByteArrayValuesReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DeltaLengthByteArrayValuesReader.class);
   private ValuesReader lengthReader;
   private ByteBuffer in;
   private int offset;
@@ -48,7 +48,7 @@ public class DeltaLengthByteArrayValuesReader extends ValuesReader {
   @Override
   public void initFromPage(int valueCount, ByteBuffer in, int offset)
       throws IOException {
-    if (DEBUG) LOG.debug("init from page at offset "+ offset + " for length " + (in.limit() - offset));
+    LOG.debug("init from page at offset {} for length {}", offset, (in.limit() - offset));
     lengthReader.initFromPage(valueCount, in, offset);
     offset = lengthReader.getNextOffset();
     this.in = in;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesWriter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesWriter.java
index f7ad912..118153c 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/deltalengthbytearray/DeltaLengthByteArrayValuesWriter.java
@@ -21,7 +21,6 @@ package org.apache.parquet.column.values.deltalengthbytearray;
 import java.io.IOException;
 
 import org.apache.parquet.bytes.ByteBufferAllocator;
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.bytes.CapacityByteArrayOutputStream;
 import org.apache.parquet.bytes.LittleEndianDataOutputStream;
@@ -31,6 +30,8 @@ import org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesWriter;
 import org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesWriterForInteger;
 import org.apache.parquet.io.ParquetEncodingException;
 import org.apache.parquet.io.api.Binary;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Write lengths of byte-arrays using delta encoding, followed by concatenated byte-arrays
@@ -44,7 +45,7 @@ import org.apache.parquet.io.api.Binary;
  */
 public class DeltaLengthByteArrayValuesWriter extends ValuesWriter {
 
-  private static final Log LOG = Log.getLog(DeltaLengthByteArrayValuesWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DeltaLengthByteArrayValuesWriter.class);
 
   private ValuesWriter lengthWriter;
   private CapacityByteArrayOutputStream arrayOut;
@@ -81,7 +82,7 @@ public class DeltaLengthByteArrayValuesWriter extends ValuesWriter {
     } catch (IOException e) {
       throw new ParquetEncodingException("could not write page", e);
     }
-    if (Log.DEBUG) LOG.debug("writing a buffer of size " + arrayOut.size());
+    LOG.debug("writing a buffer of size {}", arrayOut.size());
     return BytesInput.concat(lengthWriter.getBytes(), BytesInput.from(arrayOut));
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesReader.java
index e421da9..19ff47c 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesReader.java
@@ -18,19 +18,19 @@
  */
 package org.apache.parquet.column.values.dictionary;
 
-import static org.apache.parquet.Log.DEBUG;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.parquet.bytes.ByteBufferInputStream;
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesUtils;
 import org.apache.parquet.column.Dictionary;
 import org.apache.parquet.column.values.ValuesReader;
 import org.apache.parquet.column.values.rle.RunLengthBitPackingHybridDecoder;
 import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.io.api.Binary;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Reads values that have been dictionary encoded
@@ -39,7 +39,7 @@ import org.apache.parquet.io.api.Binary;
  *
  */
 public class DictionaryValuesReader extends ValuesReader {
-  private static final Log LOG = Log.getLog(DictionaryValuesReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DictionaryValuesReader.class);
 
   private ByteBufferInputStream in;
 
@@ -56,10 +56,9 @@ public class DictionaryValuesReader extends ValuesReader {
       throws IOException {
     this.in = new ByteBufferInputStream(page, offset, page.limit() - offset);
     if (page.limit() - offset > 0) {
-      if (DEBUG)
-        LOG.debug("init from page at offset " + offset + " for length " + (page.limit() - offset));
+      LOG.debug("init from page at offset {} for length {}", offset, (page.limit() - offset));
       int bitWidth = BytesUtils.readIntLittleEndianOnOneByte(in);
-      if (DEBUG) LOG.debug("bit width " + bitWidth);
+      LOG.debug("bit width {}", bitWidth);
       decoder = new RunLengthBitPackingHybridDecoder(bitWidth, in);
     } else {
       decoder = new RunLengthBitPackingHybridDecoder(1, in) {

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesWriter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesWriter.java
index 86edd79..5ef7712 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/dictionary/DictionaryValuesWriter.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.column.values.dictionary;
 
-import static org.apache.parquet.Log.DEBUG;
 import static org.apache.parquet.bytes.BytesInput.concat;
 import it.unimi.dsi.fastutil.doubles.Double2IntLinkedOpenHashMap;
 import it.unimi.dsi.fastutil.doubles.Double2IntMap;
@@ -41,7 +40,6 @@ import java.util.Iterator;
 import java.util.List;
 
 import org.apache.parquet.bytes.ByteBufferAllocator;
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.bytes.BytesUtils;
 import org.apache.parquet.bytes.CapacityByteArrayOutputStream;
@@ -56,6 +54,9 @@ import org.apache.parquet.column.values.rle.RunLengthBitPackingHybridEncoder;
 import org.apache.parquet.io.ParquetEncodingException;
 import org.apache.parquet.io.api.Binary;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Will attempt to encode values using a dictionary and fall back to plain encoding
  *  if the dictionary gets too big
@@ -64,7 +65,7 @@ import org.apache.parquet.io.api.Binary;
  *
  */
 public abstract class DictionaryValuesWriter extends ValuesWriter implements RequiresFallback {
-  private static final Log LOG = Log.getLog(DictionaryValuesWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DictionaryValuesWriter.class);
 
   /* max entries allowed for the dictionary will fail over to plain encoding if reached */
   private static final int MAX_DICTIONARY_ENTRIES = Integer.MAX_VALUE - 1;
@@ -158,7 +159,7 @@ public abstract class DictionaryValuesWriter extends ValuesWriter implements Req
   @Override
   public BytesInput getBytes() {
     int maxDicId = getDictionarySize() - 1;
-    if (DEBUG) LOG.debug("max dic id " + maxDicId);
+    LOG.debug("max dic id {}", maxDicId);
     int bitWidth = BytesUtils.getWidthFromMaxInt(maxDicId);
     int initialSlabSize =
         CapacityByteArrayOutputStream.initialSlabSizeHeuristic(MIN_INITIAL_SLAB_SIZE, maxDictionaryByteSize, 10);
@@ -174,7 +175,7 @@ public abstract class DictionaryValuesWriter extends ValuesWriter implements Req
       // encodes the bit width
       byte[] bytesHeader = new byte[] { (byte) bitWidth };
       BytesInput rleEncodedBytes = encoder.toBytes();
-      if (DEBUG) LOG.debug("rle encoded bytes " + rleEncodedBytes.size());
+      LOG.debug("rle encoded bytes {}", rleEncodedBytes.size());
       BytesInput bytes = concat(BytesInput.from(bytesHeader), rleEncodedBytes);
       // remember size of dictionary when we last wrote a page
       lastUsedDictionarySize = getDictionarySize();

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BinaryPlainValuesReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BinaryPlainValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BinaryPlainValuesReader.java
index 26f5e29..82e5551 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BinaryPlainValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BinaryPlainValuesReader.java
@@ -18,19 +18,19 @@
  */
 package org.apache.parquet.column.values.plain;
 
-import static org.apache.parquet.Log.DEBUG;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesUtils;
 import org.apache.parquet.column.values.ValuesReader;
 import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.io.api.Binary;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class BinaryPlainValuesReader extends ValuesReader {
-  private static final Log LOG = Log.getLog(BinaryPlainValuesReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(BinaryPlainValuesReader.class);
   private ByteBuffer in;
   private int offset;
 
@@ -63,7 +63,7 @@ public class BinaryPlainValuesReader extends ValuesReader {
   @Override
   public void initFromPage(int valueCount, ByteBuffer in, int offset)
       throws IOException {
-    if (DEBUG) LOG.debug("init from page at offset "+ offset + " for length " + (in.limit() - offset));
+    LOG.debug("init from page at offset {} for length {}", offset, (in.limit() - offset));
     this.in = in;
     this.offset = offset;
   }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesReader.java
index a279938..1f8fc2c 100755
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/BooleanPlainValuesReader.java
@@ -18,15 +18,15 @@
  */
 package org.apache.parquet.column.values.plain;
 
-import static org.apache.parquet.Log.DEBUG;
 import static org.apache.parquet.column.values.bitpacking.Packer.LITTLE_ENDIAN;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.values.ValuesReader;
 import org.apache.parquet.column.values.bitpacking.ByteBitPackingValuesReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * encodes boolean for the plain encoding: one bit at a time (0 = false)
@@ -35,7 +35,7 @@ import org.apache.parquet.column.values.bitpacking.ByteBitPackingValuesReader;
  *
  */
 public class BooleanPlainValuesReader extends ValuesReader {
-  private static final Log LOG = Log.getLog(BooleanPlainValuesReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(BooleanPlainValuesReader.class);
 
   private ByteBitPackingValuesReader in = new ByteBitPackingValuesReader(1, LITTLE_ENDIAN);
 
@@ -64,7 +64,7 @@ public class BooleanPlainValuesReader extends ValuesReader {
    */
   @Override
   public void initFromPage(int valueCount, ByteBuffer in, int offset) throws IOException {
-    if (DEBUG) LOG.debug("init from page at offset "+ offset + " for length " + (in.limit() - offset));
+    LOG.debug("init from page at offset {} for length {}", offset, (in.limit() - offset));
     this.in.initFromPage(valueCount, in, offset);
   }
   

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesReader.java
index 8496e7e..7a14f81 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesReader.java
@@ -20,12 +20,11 @@ package org.apache.parquet.column.values.plain;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import org.apache.parquet.Log;
 import org.apache.parquet.column.values.ValuesReader;
 import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.io.api.Binary;
-
-import static org.apache.parquet.Log.DEBUG;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * ValuesReader for FIXED_LEN_BYTE_ARRAY.
@@ -33,7 +32,7 @@ import static org.apache.parquet.Log.DEBUG;
  * @author David Z. Chen <dc...@linkedin.com>
  */
 public class FixedLenByteArrayPlainValuesReader extends ValuesReader {
-  private static final Log LOG = Log.getLog(FixedLenByteArrayPlainValuesReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(FixedLenByteArrayPlainValuesReader.class);
   private ByteBuffer in;
   private int offset;
   private int length;
@@ -61,7 +60,7 @@ public class FixedLenByteArrayPlainValuesReader extends ValuesReader {
   @Override
   public void initFromPage(int valueCount, ByteBuffer in, int offset)
       throws IOException {
-    if (DEBUG) LOG.debug("init from page at offset "+ offset + " for length " + (in.limit() - offset));
+    LOG.debug("init from page at offset {} for length {}", offset, (in.limit() - offset));
     this.in = in;
     this.offset = offset;
   }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesWriter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesWriter.java
index 6ab2dea..d7b2deb 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/FixedLenByteArrayPlainValuesWriter.java
@@ -21,7 +21,6 @@ package org.apache.parquet.column.values.plain;
 import java.io.IOException;
 
 import org.apache.parquet.bytes.ByteBufferAllocator;
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.bytes.CapacityByteArrayOutputStream;
 import org.apache.parquet.bytes.LittleEndianDataOutputStream;
@@ -29,6 +28,8 @@ import org.apache.parquet.column.values.ValuesWriter;
 import org.apache.parquet.column.Encoding;
 import org.apache.parquet.io.ParquetEncodingException;
 import org.apache.parquet.io.api.Binary;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * ValuesWriter for FIXED_LEN_BYTE_ARRAY.
@@ -36,7 +37,7 @@ import org.apache.parquet.io.api.Binary;
  * @author David Z. Chen <dc...@linkedin.com>
  */
 public class FixedLenByteArrayPlainValuesWriter extends ValuesWriter {
-  private static final Log LOG = Log.getLog(PlainValuesWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(PlainValuesWriter.class);
 
   private CapacityByteArrayOutputStream arrayOut;
   private LittleEndianDataOutputStream out;
@@ -76,7 +77,7 @@ public class FixedLenByteArrayPlainValuesWriter extends ValuesWriter {
     } catch (IOException e) {
       throw new ParquetEncodingException("could not write page", e);
     }
-    if (Log.DEBUG) LOG.debug("writing a buffer of size " + arrayOut.size());
+    LOG.debug("writing a buffer of size {}", arrayOut.size());
     return BytesInput.from(arrayOut);
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesReader.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesReader.java
index c8fb303..e79cbb2 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesReader.java
@@ -18,16 +18,15 @@
  */
 package org.apache.parquet.column.values.plain;
 
-import static org.apache.parquet.Log.DEBUG;
-
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.parquet.bytes.ByteBufferInputStream;
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.LittleEndianDataInputStream;
 import org.apache.parquet.column.values.ValuesReader;
 import org.apache.parquet.io.ParquetDecodingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Plain encoding for float, double, int, long
@@ -36,7 +35,7 @@ import org.apache.parquet.io.ParquetDecodingException;
  *
  */
 abstract public class PlainValuesReader extends ValuesReader {
-  private static final Log LOG = Log.getLog(PlainValuesReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(PlainValuesReader.class);
 
   protected LittleEndianDataInputStream in;
 
@@ -46,7 +45,7 @@ abstract public class PlainValuesReader extends ValuesReader {
    */
   @Override
   public void initFromPage(int valueCount, ByteBuffer in, int offset) throws IOException {
-    if (DEBUG) LOG.debug("init from page at offset "+ offset + " for length " + (in.limit() - offset));
+    LOG.debug("init from page at offset {} for length {}", offset , (in.limit() - offset));
     this.in = new LittleEndianDataInputStream(toInputStream(in, offset));
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesWriter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesWriter.java b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesWriter.java
index add5495..aa96cb6 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesWriter.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/plain/PlainValuesWriter.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.nio.charset.Charset;
 
 import org.apache.parquet.bytes.ByteBufferAllocator;
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.bytes.CapacityByteArrayOutputStream;
 import org.apache.parquet.bytes.LittleEndianDataOutputStream;
@@ -30,6 +29,8 @@ import org.apache.parquet.column.Encoding;
 import org.apache.parquet.column.values.ValuesWriter;
 import org.apache.parquet.io.ParquetEncodingException;
 import org.apache.parquet.io.api.Binary;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Plain encoding except for booleans
@@ -38,7 +39,7 @@ import org.apache.parquet.io.api.Binary;
  *
  */
 public class PlainValuesWriter extends ValuesWriter {
-  private static final Log LOG = Log.getLog(PlainValuesWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(PlainValuesWriter.class);
 
   public static final Charset CHARSET = Charset.forName("UTF-8");
 
@@ -117,7 +118,7 @@ public class PlainValuesWriter extends ValuesWriter {
     } catch (IOException e) {
       throw new ParquetEncodingException("could not write page", e);
     }
-    if (Log.DEBUG) LOG.debug("writing a buffer of size " + arrayOut.size());
+    if (LOG.isDebugEnabled()) LOG.debug("writing a buffer of size {}", arrayOut.size());
     return BytesInput.from(arrayOut);
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridDecoder.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridDecoder.java b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridDecoder.java
index 1280e8d..6daa349 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridDecoder.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridDecoder.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.column.values.rle;
 
-import static org.apache.parquet.Log.DEBUG;
 
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -26,12 +25,13 @@ import java.io.InputStream;
 import java.nio.ByteBuffer;
 
 import org.apache.parquet.bytes.ByteBufferInputStream;
-import org.apache.parquet.Log;
 import org.apache.parquet.Preconditions;
 import org.apache.parquet.bytes.BytesUtils;
 import org.apache.parquet.column.values.bitpacking.BytePacker;
 import org.apache.parquet.column.values.bitpacking.Packer;
 import org.apache.parquet.io.ParquetDecodingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Decodes values written in the grammar described in {@link RunLengthBitPackingHybridEncoder}
@@ -39,7 +39,7 @@ import org.apache.parquet.io.ParquetDecodingException;
  * @author Julien Le Dem
  */
 public class RunLengthBitPackingHybridDecoder {
-  private static final Log LOG = Log.getLog(RunLengthBitPackingHybridDecoder.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RunLengthBitPackingHybridDecoder.class);
 
   private static enum MODE { RLE, PACKED }
 
@@ -53,7 +53,7 @@ public class RunLengthBitPackingHybridDecoder {
   private int[] currentBuffer;
 
   public RunLengthBitPackingHybridDecoder(int bitWidth, InputStream in) {
-    if (DEBUG) LOG.debug("decoding bitWidth " + bitWidth);
+    LOG.debug("decoding bitWidth {}", bitWidth);
 
     Preconditions.checkArgument(bitWidth >= 0 && bitWidth <= 32, "bitWidth must be >= 0 and <= 32");
     this.bitWidth = bitWidth;
@@ -87,13 +87,13 @@ public class RunLengthBitPackingHybridDecoder {
     switch (mode) {
     case RLE:
       currentCount = header >>> 1;
-      if (DEBUG) LOG.debug("reading " + currentCount + " values RLE");
+      LOG.debug("reading {} values RLE", currentCount);
       currentValue = BytesUtils.readIntLittleEndianPaddedOnBitWidth(in, bitWidth);
       break;
     case PACKED:
       int numGroups = header >>> 1;
       currentCount = numGroups * 8;
-      if (DEBUG) LOG.debug("reading " + currentCount + " values BIT PACKED");
+      LOG.debug("reading {} values BIT PACKED", currentCount);
       currentBuffer = new int[currentCount]; // TODO: reuse a buffer
       byte[] bytes = new byte[numGroups * bitWidth];
       // At the end of the file RLE data though, there might not be that many bytes left.

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridEncoder.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridEncoder.java b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridEncoder.java
index 001d3f6..5fba70a 100644
--- a/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridEncoder.java
+++ b/parquet-column/src/main/java/org/apache/parquet/column/values/rle/RunLengthBitPackingHybridEncoder.java
@@ -21,15 +21,15 @@ package org.apache.parquet.column.values.rle;
 import java.io.IOException;
 
 import org.apache.parquet.bytes.ByteBufferAllocator;
-import org.apache.parquet.Log;
 import org.apache.parquet.Preconditions;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.bytes.BytesUtils;
 import org.apache.parquet.bytes.CapacityByteArrayOutputStream;
 import org.apache.parquet.column.values.bitpacking.BytePacker;
 import org.apache.parquet.column.values.bitpacking.Packer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import static org.apache.parquet.Log.DEBUG;
 
 /**
  * Encodes values using a combination of run length encoding and bit packing,
@@ -60,7 +60,7 @@ import static org.apache.parquet.Log.DEBUG;
  * @author Alex Levenson
  */
 public class RunLengthBitPackingHybridEncoder {
-  private static final Log LOG = Log.getLog(RunLengthBitPackingHybridEncoder.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RunLengthBitPackingHybridEncoder.class);
 
   private final BytePacker packer;
 
@@ -118,10 +118,8 @@ public class RunLengthBitPackingHybridEncoder {
   private boolean toBytesCalled;
 
   public RunLengthBitPackingHybridEncoder(int bitWidth, int initialCapacity, int pageSize, ByteBufferAllocator allocator) {
-    if (DEBUG) {
-      LOG.debug(String.format("Encoding: RunLengthBitPackingHybridEncoder with "
-        + "bithWidth: %d initialCapacity %d", bitWidth, initialCapacity));
-    }
+    LOG.debug("Encoding: RunLengthBitPackingHybridEncoder with "
+      + "bithWidth: {} initialCapacity {}", bitWidth, initialCapacity);
 
     Preconditions.checkArgument(bitWidth >= 0 && bitWidth <= 32, "bitWidth must be >= 0 and <= 32");
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/example/data/Group.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/example/data/Group.java b/parquet-column/src/main/java/org/apache/parquet/example/data/Group.java
index 3fb7d4d..61f6317 100644
--- a/parquet-column/src/main/java/org/apache/parquet/example/data/Group.java
+++ b/parquet-column/src/main/java/org/apache/parquet/example/data/Group.java
@@ -18,14 +18,14 @@
  */
 package org.apache.parquet.example.data;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.example.data.simple.NanoTime;
 import org.apache.parquet.io.api.Binary;
 import org.apache.parquet.io.api.RecordConsumer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 abstract public class Group extends GroupValueSource {
-  private static final Log logger = Log.getLog(Group.class);
-  private static final boolean DEBUG = Log.DEBUG;
+  private static final Logger LOG = LoggerFactory.getLogger(Group.class);
 
   public void add(String field, int value) {
     add(getType().getFieldIndex(field), value);
@@ -64,7 +64,9 @@ abstract public class Group extends GroupValueSource {
   }
 
   public Group addGroup(String field) {
-    if (DEBUG) logger.debug("add group "+field+" to "+getType().getName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("add group {} to {}", field, getType().getName());
+    }
     return addGroup(getType().getFieldIndex(field));
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/filter2/compat/FilterCompat.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/compat/FilterCompat.java b/parquet-column/src/main/java/org/apache/parquet/filter2/compat/FilterCompat.java
index 2efcc39..17bd2e1 100644
--- a/parquet-column/src/main/java/org/apache/parquet/filter2/compat/FilterCompat.java
+++ b/parquet-column/src/main/java/org/apache/parquet/filter2/compat/FilterCompat.java
@@ -18,10 +18,11 @@
  */
 package org.apache.parquet.filter2.compat;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.filter.UnboundRecordFilter;
 import org.apache.parquet.filter2.predicate.FilterPredicate;
 import org.apache.parquet.filter2.predicate.LogicalInverseRewriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.parquet.Preconditions.checkArgument;
 import static org.apache.parquet.Preconditions.checkNotNull;
@@ -40,7 +41,7 @@ import static org.apache.parquet.Preconditions.checkNotNull;
  * codebase.
  */
 public class FilterCompat {
-  private static final Log LOG = Log.getLog(FilterCompat.class);
+  private static final Logger LOG = LoggerFactory.getLogger(FilterCompat.class);
 
   /**
    * Anyone wanting to use a {@link Filter} need only implement this interface,
@@ -67,13 +68,13 @@ public class FilterCompat {
   public static Filter get(FilterPredicate filterPredicate) {
     checkNotNull(filterPredicate, "filterPredicate");
 
-    LOG.info("Filtering using predicate: " + filterPredicate);
+    LOG.info("Filtering using predicate: {}", filterPredicate);
 
     // rewrite the predicate to not include the not() operator
     FilterPredicate collapsedPredicate = LogicalInverseRewriter.rewrite(filterPredicate);
 
     if (!filterPredicate.equals(collapsedPredicate)) {
-      LOG.info("Predicate has been collapsed to: " + collapsedPredicate);
+      LOG.info("Predicate has been collapsed to: {}", collapsedPredicate);
     }
 
     return new FilterPredicateCompat(collapsedPredicate);

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/io/BaseRecordReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/BaseRecordReader.java b/parquet-column/src/main/java/org/apache/parquet/io/BaseRecordReader.java
index f2d88fc..8c7f390 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/BaseRecordReader.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/BaseRecordReader.java
@@ -18,17 +18,17 @@
  */
 package org.apache.parquet.io;
 
-import static org.apache.parquet.Log.DEBUG;
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnReadStore;
 import org.apache.parquet.io.RecordReaderImplementation.State;
 import org.apache.parquet.io.api.Binary;
 import org.apache.parquet.io.api.RecordConsumer;
 import org.apache.parquet.io.api.RecordMaterializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 // TODO(julien): this class appears to be unused -- can it be nuked? - todd
 public abstract class BaseRecordReader<T> extends RecordReader<T> {
-  private static final Log LOG = Log.getLog(BaseRecordReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(BaseRecordReader.class);
 
   public RecordConsumer recordConsumer;
   public RecordMaterializer<T> recordMaterializer;
@@ -48,11 +48,11 @@ public abstract class BaseRecordReader<T> extends RecordReader<T> {
   private int endIndex;
 
   protected void currentLevel(int currentLevel) {
-    if (DEBUG) LOG.debug("currentLevel: "+currentLevel);
+    LOG.debug("currentLevel: {}",currentLevel);
   }
 
   protected void log(String message) {
-    if (DEBUG) LOG.debug("bc: "+message);
+    LOG.debug("bc: {}", message);
   }
 
   final protected int getCaseId(int state, int currentLevel, int d, int nextR) {
@@ -62,18 +62,18 @@ public abstract class BaseRecordReader<T> extends RecordReader<T> {
   final protected void startMessage() {
     // reset state
     endField = null;
-    if (DEBUG) LOG.debug("startMessage()");
+    LOG.debug("startMessage()");
     recordConsumer.startMessage();
   }
 
   final protected void startGroup(String field, int index) {
     startField(field, index);
-    if (DEBUG) LOG.debug("startGroup()");
+    LOG.debug("startGroup()");
     recordConsumer.startGroup();
   }
 
   private void startField(String field, int index) {
-    if (DEBUG) LOG.debug("startField("+field+","+index+")");
+    LOG.debug("startField({},{})", field, index);
     if (endField != null && index == endIndex) {
       // skip the close/open tag
       endField = null;
@@ -89,13 +89,13 @@ public abstract class BaseRecordReader<T> extends RecordReader<T> {
 
   final protected void addPrimitiveINT64(String field, int index, long value) {
     startField(field, index);
-    if (DEBUG) LOG.debug("addLong("+value+")");
+    LOG.debug("addLong({})", value);
     recordConsumer.addLong(value);
     endField(field, index);
   }
 
   private void endField(String field, int index) {
-    if (DEBUG) LOG.debug("endField("+field+","+index+")");
+    LOG.debug("endField({},{})", field, index);
     if (endField != null) {
       recordConsumer.endField(endField, endIndex);
     }
@@ -105,14 +105,14 @@ public abstract class BaseRecordReader<T> extends RecordReader<T> {
 
   final protected void addPrimitiveBINARY(String field, int index, Binary value) {
     startField(field, index);
-    if (DEBUG) LOG.debug("addBinary("+value+")");
+    LOG.debug("addBinary({})", value);
     recordConsumer.addBinary(value);
     endField(field, index);
   }
 
   final protected void addPrimitiveINT32(String field, int index, int value) {
     startField(field, index);
-    if (DEBUG) LOG.debug("addInteger("+value+")");
+    LOG.debug("addInteger({})", value);
     recordConsumer.addInteger(value);
     endField(field, index);
   }
@@ -123,7 +123,7 @@ public abstract class BaseRecordReader<T> extends RecordReader<T> {
       recordConsumer.endField(endField, endIndex);
       endField = null;
     }
-    if (DEBUG) LOG.debug("endGroup()");
+    LOG.debug("endGroup()");
     recordConsumer.endGroup();
     endField(field, index);
   }
@@ -134,7 +134,7 @@ public abstract class BaseRecordReader<T> extends RecordReader<T> {
       recordConsumer.endField(endField, endIndex);
       endField = null;
     }
-    if (DEBUG) LOG.debug("endMessage()");
+    LOG.debug("endMessage()");
     recordConsumer.endMessage();
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/io/ColumnIO.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/ColumnIO.java b/parquet-column/src/main/java/org/apache/parquet/io/ColumnIO.java
index 95a969e..9c6e729 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/ColumnIO.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/ColumnIO.java
@@ -22,7 +22,6 @@ package org.apache.parquet.io;
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.schema.Type;
 import org.apache.parquet.schema.Type.Repetition;
 
@@ -34,8 +33,6 @@ import org.apache.parquet.schema.Type.Repetition;
  */
 abstract public class ColumnIO {
 
-  static final boolean DEBUG = Log.DEBUG;
-
   private final GroupColumnIO parent;
   private final Type type;
   private final String name;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/io/GroupColumnIO.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/GroupColumnIO.java b/parquet-column/src/main/java/org/apache/parquet/io/GroupColumnIO.java
index 1efe0d1..14b8426 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/GroupColumnIO.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/GroupColumnIO.java
@@ -27,8 +27,9 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.schema.GroupType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Group level of the IO structure
@@ -38,7 +39,7 @@ import org.apache.parquet.schema.GroupType;
  *
  */
 public class GroupColumnIO extends ColumnIO {
-  private static final Log LOG = Log.getLog(GroupColumnIO.class);
+  private static final Logger LOG = LoggerFactory.getLogger(GroupColumnIO.class);
 
   private final Map<String, ColumnIO> childrenByName = new HashMap<String, ColumnIO>();
   private final List<ColumnIO> children = new ArrayList<ColumnIO>();

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/io/MessageColumnIO.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/MessageColumnIO.java b/parquet-column/src/main/java/org/apache/parquet/io/MessageColumnIO.java
index f962105..67efdb3 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/MessageColumnIO.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/MessageColumnIO.java
@@ -25,12 +25,10 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnWriteStore;
 import org.apache.parquet.column.ColumnWriter;
 import org.apache.parquet.column.impl.ColumnReadStoreImpl;
 import org.apache.parquet.column.page.PageReadStore;
-import org.apache.parquet.column.values.dictionary.IntList;
 import org.apache.parquet.filter.UnboundRecordFilter;
 import org.apache.parquet.filter2.compat.FilterCompat;
 import org.apache.parquet.filter2.compat.FilterCompat.Filter;
@@ -49,6 +47,9 @@ import org.apache.parquet.schema.MessageType;
 
 import it.unimi.dsi.fastutil.ints.IntArrayList;
 import it.unimi.dsi.fastutil.ints.IntIterator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import static org.apache.parquet.Preconditions.checkNotNull;
 
 /**
@@ -58,9 +59,9 @@ import static org.apache.parquet.Preconditions.checkNotNull;
  * @author Julien Le Dem
  */
 public class MessageColumnIO extends GroupColumnIO {
-  private static final Log logger = Log.getLog(MessageColumnIO.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MessageColumnIO.class);
 
-  private static final boolean DEBUG = Log.DEBUG;
+  private static final boolean DEBUG = LOG.isDebugEnabled();
 
   private List<PrimitiveColumnIO> leaves;
 
@@ -261,20 +262,24 @@ public class MessageColumnIO extends GroupColumnIO {
       r = new int[maxDepth];
     }
 
-    public void printState() {
-      log(currentLevel + ", " + fieldsWritten[currentLevel] + ": " + Arrays.toString(currentColumnIO.getFieldPath()) + " r:" + r[currentLevel]);
-      if (r[currentLevel] > currentColumnIO.getRepetitionLevel()) {
-        // sanity check
-        throw new InvalidRecordException(r[currentLevel] + "(r) > " + currentColumnIO.getRepetitionLevel() + " ( schema r)");
+    private void printState() {
+      if (DEBUG) {
+        log(currentLevel + ", " + fieldsWritten[currentLevel] + ": " + Arrays.toString(currentColumnIO.getFieldPath()) + " r:" + r[currentLevel]);
+        if (r[currentLevel] > currentColumnIO.getRepetitionLevel()) {
+          // sanity check
+          throw new InvalidRecordException(r[currentLevel] + "(r) > " + currentColumnIO.getRepetitionLevel() + " ( schema r)");
+        }
       }
     }
 
-    private void log(Object m) {
-      String indent = "";
-      for (int i = 0; i < currentLevel; ++i) {
-        indent += "  ";
+    private void log(Object message, Object...parameters) {
+      if (DEBUG) {
+        String indent = "";
+        for (int i = 0; i < currentLevel; ++i) {
+          indent += "  ";
+        }
+        LOG.debug(indent + message, parameters);
       }
-      logger.debug(indent + m);
     }
 
     @Override
@@ -298,7 +303,7 @@ public class MessageColumnIO extends GroupColumnIO {
     @Override
     public void startField(String field, int index) {
       try {
-        if (DEBUG) log("startField(" + field + ", " + index + ")");
+        if (DEBUG) log("startField({}, {})", field, index);
         currentColumnIO = ((GroupColumnIO) currentColumnIO).getChild(index);
         emptyField = true;
         if (DEBUG) printState();
@@ -309,7 +314,7 @@ public class MessageColumnIO extends GroupColumnIO {
 
     @Override
     public void endField(String field, int index) {
-      if (DEBUG) log("endField(" + field + ", " + index + ")");
+      if (DEBUG) log("endField({}, {})",field ,index);
       currentColumnIO = currentColumnIO.getParent();
       if (emptyField) {
         throw new ParquetEncodingException("empty fields are illegal, the field should be ommited completely instead");
@@ -326,8 +331,7 @@ public class MessageColumnIO extends GroupColumnIO {
           try {
             ColumnIO undefinedField = ((GroupColumnIO) currentColumnIO).getChild(i);
             int d = currentColumnIO.getDefinitionLevel();
-            if (DEBUG)
-              log(Arrays.toString(undefinedField.getFieldPath()) + ".writeNull(" + r[currentLevel] + "," + d + ")");
+            if (DEBUG) log(Arrays.toString(undefinedField.getFieldPath()) + ".writeNull(" + r[currentLevel] + "," + d + ")");
             writeNull(undefinedField, r[currentLevel], d);
           } catch (RuntimeException e) {
             throw new ParquetEncodingException("error while writing nulls for fields of indexes " + i + " . current index: " + fieldsWritten[currentLevel], e);
@@ -372,7 +376,7 @@ public class MessageColumnIO extends GroupColumnIO {
 
     private void setRepetitionLevel() {
       r[currentLevel] = currentColumnIO.getRepetitionLevel();
-      if (DEBUG) log("r: " + r[currentLevel]);
+      if (DEBUG) log("r: {}", r[currentLevel]);
     }
 
     @Override
@@ -428,7 +432,7 @@ public class MessageColumnIO extends GroupColumnIO {
 
     @Override
     public void addInteger(int value) {
-      if (DEBUG) log("addInt(" + value + ")");
+      if (DEBUG) log("addInt({})", value);
       emptyField = false;
       getColumnWriter().write(value, r[currentLevel], currentColumnIO.getDefinitionLevel());
 
@@ -438,7 +442,7 @@ public class MessageColumnIO extends GroupColumnIO {
 
     @Override
     public void addLong(long value) {
-      if (DEBUG) log("addLong(" + value + ")");
+      if (DEBUG) log("addLong({})", value);
       emptyField = false;
       getColumnWriter().write(value, r[currentLevel], currentColumnIO.getDefinitionLevel());
 
@@ -448,7 +452,7 @@ public class MessageColumnIO extends GroupColumnIO {
 
     @Override
     public void addBoolean(boolean value) {
-      if (DEBUG) log("addBoolean(" + value + ")");
+      if (DEBUG) log("addBoolean({})", value);
       emptyField = false;
       getColumnWriter().write(value, r[currentLevel], currentColumnIO.getDefinitionLevel());
 
@@ -458,7 +462,7 @@ public class MessageColumnIO extends GroupColumnIO {
 
     @Override
     public void addBinary(Binary value) {
-      if (DEBUG) log("addBinary(" + value.length() + " bytes)");
+      if (DEBUG) log("addBinary({} bytes)", value.length());
       emptyField = false;
       getColumnWriter().write(value, r[currentLevel], currentColumnIO.getDefinitionLevel());
 
@@ -468,7 +472,7 @@ public class MessageColumnIO extends GroupColumnIO {
 
     @Override
     public void addFloat(float value) {
-      if (DEBUG) log("addFloat(" + value + ")");
+      if (DEBUG) log("addFloat({})", value);
       emptyField = false;
       getColumnWriter().write(value, r[currentLevel], currentColumnIO.getDefinitionLevel());
 
@@ -478,7 +482,7 @@ public class MessageColumnIO extends GroupColumnIO {
 
     @Override
     public void addDouble(double value) {
-      if (DEBUG) log("addDouble(" + value + ")");
+      if (DEBUG) log("addDouble({})", value);
       emptyField = false;
       getColumnWriter().write(value, r[currentLevel], currentColumnIO.getDefinitionLevel());
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/io/RecordConsumerLoggingWrapper.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/RecordConsumerLoggingWrapper.java b/parquet-column/src/main/java/org/apache/parquet/io/RecordConsumerLoggingWrapper.java
index 7a8b1c1..b90e216 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/RecordConsumerLoggingWrapper.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/RecordConsumerLoggingWrapper.java
@@ -18,10 +18,12 @@
  */
 package org.apache.parquet.io;
 
-import java.util.Arrays;
-import org.apache.parquet.Log;
 import org.apache.parquet.io.api.Binary;
 import org.apache.parquet.io.api.RecordConsumer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
 
 /**
  * This class can be used to wrap an actual RecordConsumer and log all calls
@@ -30,8 +32,7 @@ import org.apache.parquet.io.api.RecordConsumer;
  *
  */
 public class RecordConsumerLoggingWrapper extends RecordConsumer {
-    private static final Log logger = Log.getLog(RecordConsumerLoggingWrapper.class);
-    private static final boolean DEBUG = Log.DEBUG;
+    private static final Logger LOG = LoggerFactory.getLogger(RecordConsumerLoggingWrapper.class);
 
     private final RecordConsumer delegate;
 
@@ -50,12 +51,12 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void startField(String field, int index) {
-      if (DEBUG) logOpen(field);
+      logOpen(field);
       delegate.startField(field, index);
     }
 
     private void logOpen(String field) {
-      log("<"+field+">");
+      log("<{}>", field);
     }
 
     private String indent() {
@@ -66,8 +67,10 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
       return result.toString();
     }
 
-    private void log(Object value) {
-      logger.debug(indent() + value);
+    private void log(Object value, Object ... parameters) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(indent() + value, parameters);
+      }
     }
 
     /**
@@ -75,8 +78,8 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void startGroup() {
-      if (DEBUG) ++indent;
-      if (DEBUG) log("<!-- start group -->");
+      ++indent;
+      log("<!-- start group -->");
       delegate.startGroup();
     }
 
@@ -85,7 +88,7 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void addInteger(int value) {
-      if (DEBUG) log(value);
+      log(value);
       delegate.addInteger(value);
     }
 
@@ -94,7 +97,7 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void addLong(long value) {
-      if (DEBUG) log(value);
+      log(value);
       delegate.addLong(value);
     }
 
@@ -103,7 +106,7 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void addBoolean(boolean value) {
-      if (DEBUG) log(value);
+      log(value);
       delegate.addBoolean(value);
     }
 
@@ -112,7 +115,7 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void addBinary(Binary value) {
-      if (DEBUG) log(Arrays.toString(value.getBytesUnsafe()));
+      if (LOG.isDebugEnabled()) log(Arrays.toString(value.getBytesUnsafe()));
       delegate.addBinary(value);
     }
 
@@ -121,7 +124,7 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void addFloat(float value) {
-      if (DEBUG) log(value);
+      log(value);
       delegate.addFloat(value);
     }
 
@@ -130,7 +133,7 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void addDouble(double value) {
-      if (DEBUG) log(value);
+      log(value);
       delegate.addDouble(value);
     }
 
@@ -139,7 +142,7 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void flush() {
-      if (DEBUG) log("<!-- flush -->");
+      log("<!-- flush -->");
       delegate.flush();
     }
 
@@ -148,8 +151,8 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void endGroup() {
-      if (DEBUG) log("<!-- end group -->");
-      if (DEBUG) --indent;
+      log("<!-- end group -->");
+      --indent;
       delegate.endGroup();
     }
 
@@ -158,12 +161,12 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void endField(String field, int index) {
-      if (DEBUG) logClose(field);
+      logClose(field);
       delegate.endField(field, index);
     }
 
     private void logClose(String field) {
-      log("</"+field+">");
+      log("</{}>", field);
     }
 
     /**
@@ -171,7 +174,7 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
      */
     @Override
     public void startMessage() {
-      if (DEBUG) log("<!-- start message -->");
+      log("<!-- start message -->");
       delegate.startMessage();
     }
 
@@ -181,7 +184,7 @@ public class RecordConsumerLoggingWrapper extends RecordConsumer {
     @Override
     public void endMessage() {
       delegate.endMessage();
-      if (DEBUG) log("<!-- end message -->");
+      log("<!-- end message -->");
     }
 
 }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/io/RecordReaderImplementation.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/RecordReaderImplementation.java b/parquet-column/src/main/java/org/apache/parquet/io/RecordReaderImplementation.java
index 7a87cbb..af7d4a5 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/RecordReaderImplementation.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/RecordReaderImplementation.java
@@ -26,7 +26,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnReader;
 import org.apache.parquet.column.impl.ColumnReadStoreImpl;
 import org.apache.parquet.io.api.Converter;
@@ -36,6 +35,8 @@ import org.apache.parquet.io.api.RecordConsumer;
 import org.apache.parquet.io.api.RecordMaterializer;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
@@ -45,7 +46,7 @@ import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
  * @param <T> the type of the materialized record
  */
 class RecordReaderImplementation<T> extends RecordReader<T> {
-  private static final Log LOG = Log.getLog(RecordReaderImplementation.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RecordReaderImplementation.class);
 
   public static class Case {
 
@@ -376,7 +377,7 @@ class RecordReaderImplementation<T> extends RecordReader<T> {
   }
 
   private RecordConsumer wrap(RecordConsumer recordConsumer) {
-    if (Log.DEBUG) {
+    if (LOG.isDebugEnabled()) {
       return new RecordConsumerLoggingWrapper(recordConsumer);
     }
     return recordConsumer;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/io/ValidatingRecordConsumer.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/io/ValidatingRecordConsumer.java b/parquet-column/src/main/java/org/apache/parquet/io/ValidatingRecordConsumer.java
index 46f0aae..c27381a 100644
--- a/parquet-column/src/main/java/org/apache/parquet/io/ValidatingRecordConsumer.java
+++ b/parquet-column/src/main/java/org/apache/parquet/io/ValidatingRecordConsumer.java
@@ -22,13 +22,14 @@ import java.util.ArrayDeque;
 import java.util.Arrays;
 import java.util.Deque;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.io.api.Binary;
 import org.apache.parquet.io.api.RecordConsumer;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.Type;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
 import org.apache.parquet.schema.Type.Repetition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.*;
 
@@ -40,8 +41,7 @@ import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.*;
  *
  */
 public class ValidatingRecordConsumer extends RecordConsumer {
-  private static final Log LOG = Log.getLog(ValidatingRecordConsumer.class);
-  private static final boolean DEBUG = Log.DEBUG;
+  private static final Logger LOG = LoggerFactory.getLogger(ValidatingRecordConsumer.class);
 
   private final RecordConsumer delegate;
 
@@ -139,7 +139,7 @@ public class ValidatingRecordConsumer extends RecordConsumer {
     Type currentType = types.peek().asGroupType().getType(fields.peek());
     int c = fieldValueCount.pop() + 1;
     fieldValueCount.push(c);
-    if (DEBUG) LOG.debug("validate " + p + " for " + currentType.getName());
+    LOG.debug("validate {} for {}",p ,currentType.getName());
     switch (currentType.getRepetition()) {
       case OPTIONAL:
       case REQUIRED:
@@ -161,7 +161,7 @@ public class ValidatingRecordConsumer extends RecordConsumer {
     Type currentType = types.peek().asGroupType().getType(fields.peek());
     int c = fieldValueCount.pop() + 1;
     fieldValueCount.push(c);
-    if (DEBUG) LOG.debug("validate " + Arrays.toString(ptypes) + " for " + currentType.getName());
+    if (LOG.isDebugEnabled()) LOG.debug("validate " + Arrays.toString(ptypes) + " for " + currentType.getName());
     switch (currentType.getRepetition()) {
       case OPTIONAL:
       case REQUIRED:

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/main/java/org/apache/parquet/schema/MessageTypeParser.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/org/apache/parquet/schema/MessageTypeParser.java b/parquet-column/src/main/java/org/apache/parquet/schema/MessageTypeParser.java
index b7274c2..f0c178a 100644
--- a/parquet-column/src/main/java/org/apache/parquet/schema/MessageTypeParser.java
+++ b/parquet-column/src/main/java/org/apache/parquet/schema/MessageTypeParser.java
@@ -22,11 +22,12 @@ import java.util.Arrays;
 import java.util.Locale;
 import java.util.StringTokenizer;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
 import org.apache.parquet.schema.Type.Repetition;
 import org.apache.parquet.schema.Types.GroupBuilder;
 import org.apache.parquet.schema.Types.PrimitiveBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Parses a schema from a textual format similar to that described in the Dremel paper.
@@ -34,7 +35,7 @@ import org.apache.parquet.schema.Types.PrimitiveBuilder;
  * @author Julien Le Dem
  */
 public class MessageTypeParser {
-  private static final Log LOG = Log.getLog(MessageTypeParser.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MessageTypeParser.class);
 
   private static class Tokenizer {
 


[3/4] parquet-mr git commit: PARQUET-423: Replace old Log class with SLF4J Logging

Posted by ju...@apache.org.
http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/test/java/org/apache/parquet/column/mem/TestMemColumn.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/mem/TestMemColumn.java b/parquet-column/src/test/java/org/apache/parquet/column/mem/TestMemColumn.java
index 42c1776..c855339 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/mem/TestMemColumn.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/mem/TestMemColumn.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertEquals;
 import org.apache.parquet.column.ParquetProperties;
 import org.junit.Test;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.ColumnReader;
 import org.apache.parquet.column.ColumnWriter;
@@ -34,9 +33,11 @@ import org.apache.parquet.example.DummyRecordConverter;
 import org.apache.parquet.io.api.Binary;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.MessageTypeParser;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestMemColumn {
-  private static final Log LOG = Log.getLog(TestMemColumn.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestMemColumn.class);
 
   @Test
   public void testMemColumn() throws Exception {
@@ -134,7 +135,7 @@ public class TestMemColumn {
     for (int i = 0; i < 837; i++) {
       int r = rs[i % rs.length];
       int d = ds[i % ds.length];
-      LOG.debug("write i: " + i);
+      LOG.debug("write i: {}", i);
       if (d == 2) {
         columnWriter.write((long)i, r, d);
       } else {
@@ -148,7 +149,7 @@ public class TestMemColumn {
     for (int j = 0; j < columnReader.getTotalValueCount(); j++) {
       int r = rs[i % rs.length];
       int d = ds[i % ds.length];
-      LOG.debug("read i: " + i);
+      LOG.debug("read i: {}", i);
       assertEquals("r row " + i, r, columnReader.getCurrentRepetitionLevel());
       assertEquals("d row " + i, d, columnReader.getCurrentDefinitionLevel());
       if (d == 2) {

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageReader.java b/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageReader.java
index a6e8910..5373c9a 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageReader.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageReader.java
@@ -18,20 +18,20 @@
  */
 package org.apache.parquet.column.page.mem;
 
-import static org.apache.parquet.Log.DEBUG;
 import static org.apache.parquet.Preconditions.checkNotNull;
 
 import java.util.Iterator;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.page.DictionaryPage;
 import org.apache.parquet.column.page.DataPage;
 import org.apache.parquet.column.page.PageReader;
 import org.apache.parquet.io.ParquetDecodingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 public class MemPageReader implements PageReader {
-  private static final Log LOG = Log.getLog(MemPageReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MemPageReader.class);
 
   private final long totalValueCount;
   private final Iterator<DataPage> pages;
@@ -54,7 +54,7 @@ public class MemPageReader implements PageReader {
   public DataPage readPage() {
     if (pages.hasNext()) {
       DataPage next = pages.next();
-      if (DEBUG) LOG.debug("read page " + next);
+      LOG.debug("read page {}", next);
       return next;
     } else {
       throw new ParquetDecodingException("after last page");

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageStore.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageStore.java b/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageStore.java
index 219e5cd..cdde894 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageStore.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageStore.java
@@ -18,12 +18,6 @@
  */
 package org.apache.parquet.column.page.mem;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.UnknownColumnException;
 import org.apache.parquet.column.page.DataPage;
@@ -31,10 +25,17 @@ import org.apache.parquet.column.page.PageReadStore;
 import org.apache.parquet.column.page.PageReader;
 import org.apache.parquet.column.page.PageWriteStore;
 import org.apache.parquet.column.page.PageWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 
 public class MemPageStore implements PageReadStore, PageWriteStore {
-  private static final Log LOG = Log.getLog(MemPageStore.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MemPageStore.class);
 
   private Map<ColumnDescriptor, MemPageWriter> pageWriters = new HashMap<ColumnDescriptor, MemPageWriter>();
 
@@ -62,7 +63,7 @@ public class MemPageStore implements PageReadStore, PageWriteStore {
       throw new UnknownColumnException(descriptor);
     }
     List<DataPage> pages = new ArrayList<DataPage>(pageWriter.getPages());
-    if (Log.DEBUG) LOG.debug("initialize page reader with "+ pageWriter.getTotalValueCount() + " values and " + pages.size() + " pages");
+    LOG.debug("initialize page reader with {} values and {} pages", pageWriter.getTotalValueCount(), pages.size());
     return new MemPageReader(pageWriter.getTotalValueCount(), pages.iterator(), pageWriter.getDictionaryPage());
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageWriter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageWriter.java b/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageWriter.java
index ddab636..be3a0f9 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageWriter.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/page/mem/MemPageWriter.java
@@ -18,26 +18,26 @@
  */
 package org.apache.parquet.column.page.mem;
 
-import static org.apache.parquet.Log.DEBUG;
-import static org.apache.parquet.bytes.BytesInput.copy;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.column.Encoding;
+import org.apache.parquet.column.page.DataPage;
 import org.apache.parquet.column.page.DataPageV1;
 import org.apache.parquet.column.page.DataPageV2;
 import org.apache.parquet.column.page.DictionaryPage;
-import org.apache.parquet.column.page.DataPage;
 import org.apache.parquet.column.page.PageWriter;
 import org.apache.parquet.column.statistics.Statistics;
 import org.apache.parquet.io.ParquetEncodingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.parquet.bytes.BytesInput.copy;
 
 public class MemPageWriter implements PageWriter {
-  private static final Log LOG = Log.getLog(MemPageWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MemPageWriter.class);
 
   private final List<DataPage> pages = new ArrayList<DataPage>();
   private DictionaryPage dictionaryPage;
@@ -53,7 +53,7 @@ public class MemPageWriter implements PageWriter {
     memSize += bytesInput.size();
     pages.add(new DataPageV1(BytesInput.copy(bytesInput), valueCount, (int)bytesInput.size(), statistics, rlEncoding, dlEncoding, valuesEncoding));
     totalValueCount += valueCount;
-    if (DEBUG) LOG.debug("page written for " + bytesInput.size() + " bytes and " + valueCount + " records");
+    LOG.debug("page written for {} bytes and {} records", bytesInput.size(), valueCount);
   }
 
   @Override
@@ -67,8 +67,7 @@ public class MemPageWriter implements PageWriter {
     memSize += size;
     pages.add(DataPageV2.uncompressed(rowCount, nullCount, valueCount, copy(repetitionLevels), copy(definitionLevels), dataEncoding, copy(data), statistics));
     totalValueCount += valueCount;
-    if (DEBUG) LOG.debug("page written for " + size + " bytes and " + valueCount + " records");
-
+    LOG.debug("page written for {} bytes and {} records", size, valueCount);
   }
 
   @Override
@@ -101,7 +100,7 @@ public class MemPageWriter implements PageWriter {
     }
     this.memSize += dictionaryPage.getBytes().size();
     this.dictionaryPage = dictionaryPage.copy();
-    if (DEBUG) LOG.debug("dictionary page written for " + dictionaryPage.getBytes().size() + " bytes and " + dictionaryPage.getDictionarySize() + " records");
+    LOG.debug("dictionary page written for {} bytes and {} records", dictionaryPage.getBytes().size(), dictionaryPage.getDictionarySize());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPackingColumn.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPackingColumn.java b/parquet-column/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPackingColumn.java
index aef259c..d83628a 100644
--- a/parquet-column/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPackingColumn.java
+++ b/parquet-column/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPackingColumn.java
@@ -28,12 +28,13 @@ import java.nio.ByteBuffer;
 import org.junit.Test;
 
 import org.apache.parquet.bytes.DirectByteBufferAllocator;
-import org.apache.parquet.Log;
 import org.apache.parquet.column.values.ValuesReader;
 import org.apache.parquet.column.values.ValuesWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestBitPackingColumn {
-  private static final Log LOG = Log.getLog(TestBitPackingColumn.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestBitPackingColumn.class);
 
   @Test
   public void testZero() throws IOException {
@@ -163,7 +164,7 @@ public class TestBitPackingColumn {
 
   private void validateEncodeDecode(int bitLength, int[] vals, String expected) throws IOException {
     for (PACKING_TYPE type : PACKING_TYPE.values()) {
-      LOG.debug(type);
+      LOG.debug("{}", type);
       final int bound = (int)Math.pow(2, bitLength) - 1;
       ValuesWriter w = type.getWriter(bound);
       for (int i : vals) {
@@ -171,7 +172,7 @@ public class TestBitPackingColumn {
       }
       byte[] bytes = w.getBytes().toByteArray();
       LOG.debug("vals ("+bitLength+"): " + TestBitPacking.toString(vals));
-      LOG.debug("bytes: " + TestBitPacking.toString(bytes));
+      LOG.debug("bytes: {}", TestBitPacking.toString(bytes));
       assertEquals(type.toString(), expected, TestBitPacking.toString(bytes));
       ValuesReader r = type.getReader(bound);
       r.initFromPage(vals.length, ByteBuffer.wrap(bytes), 0);
@@ -179,7 +180,7 @@ public class TestBitPackingColumn {
       for (int i = 0; i < result.length; i++) {
         result[i] = r.readInteger();
       }
-      LOG.debug("result: " + TestBitPacking.toString(result));
+      LOG.debug("result: {}", TestBitPacking.toString(result));
       assertArrayEquals(type + " result: " + TestBitPacking.toString(result), vals, result);
     }
   }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/test/java/org/apache/parquet/io/PerfTest.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/org/apache/parquet/io/PerfTest.java b/parquet-column/src/test/java/org/apache/parquet/io/PerfTest.java
index e4687b1..bf783df 100644
--- a/parquet-column/src/test/java/org/apache/parquet/io/PerfTest.java
+++ b/parquet-column/src/test/java/org/apache/parquet/io/PerfTest.java
@@ -24,9 +24,6 @@ import static org.apache.parquet.example.Paper.schema;
 import static org.apache.parquet.example.Paper.schema2;
 import static org.apache.parquet.example.Paper.schema3;
 
-import java.util.logging.Level;
-
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ParquetProperties;
 import org.apache.parquet.column.impl.ColumnWriteStoreV1;
 import org.apache.parquet.column.page.mem.MemPageStore;
@@ -37,8 +34,6 @@ import org.apache.parquet.schema.MessageType;
 
 
 /**
- * make sure {@link Log#LEVEL} is set to {@link Level#OFF}
- *
  * @author Julien Le Dem
  *
  */

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-column/src/test/java/org/apache/parquet/io/TestColumnIO.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/org/apache/parquet/io/TestColumnIO.java b/parquet-column/src/test/java/org/apache/parquet/io/TestColumnIO.java
index e9e599a..0aa3420 100644
--- a/parquet-column/src/test/java/org/apache/parquet/io/TestColumnIO.java
+++ b/parquet-column/src/test/java/org/apache/parquet/io/TestColumnIO.java
@@ -44,7 +44,6 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.ColumnWriteStore;
 import org.apache.parquet.column.ColumnWriter;
@@ -67,10 +66,12 @@ import org.apache.parquet.schema.PrimitiveType;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
 import org.apache.parquet.schema.Type;
 import org.apache.parquet.schema.Type.Repetition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class TestColumnIO {
-  private static final Log LOG = Log.getLog(TestColumnIO.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestColumnIO.class);
 
   private static final String oneOfEach =
     "message Document {\n"
@@ -492,7 +493,7 @@ public class TestColumnIO {
   }
 
   private void log(Object o) {
-    LOG.info(o);
+    LOG.info("{}", o);
   }
 
   private void validateFSA(int[][] expectedFSA, MessageColumnIO columnIO, RecordReaderImplementation<?> recordReader) {

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-common/src/main/java/org/apache/parquet/Closeables.java
----------------------------------------------------------------------
diff --git a/parquet-common/src/main/java/org/apache/parquet/Closeables.java b/parquet-common/src/main/java/org/apache/parquet/Closeables.java
index 2d8bb77..086f6cc 100644
--- a/parquet-common/src/main/java/org/apache/parquet/Closeables.java
+++ b/parquet-common/src/main/java/org/apache/parquet/Closeables.java
@@ -21,13 +21,16 @@ package org.apache.parquet;
 import java.io.Closeable;
 import java.io.IOException;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Utility for working with {@link java.io.Closeable}ss
  */
 public final class Closeables {
   private Closeables() { }
 
-  private static final Log LOG = Log.getLog(Closeables.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Closeables.class);
 
   /**
    * Closes a (potentially null) closeable.

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-common/src/main/java/org/apache/parquet/bytes/BytesUtils.java
----------------------------------------------------------------------
diff --git a/parquet-common/src/main/java/org/apache/parquet/bytes/BytesUtils.java b/parquet-common/src/main/java/org/apache/parquet/bytes/BytesUtils.java
index 049f7bd..266685d 100644
--- a/parquet-common/src/main/java/org/apache/parquet/bytes/BytesUtils.java
+++ b/parquet-common/src/main/java/org/apache/parquet/bytes/BytesUtils.java
@@ -25,7 +25,8 @@ import java.io.OutputStream;
 import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
 
-import org.apache.parquet.Log;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * utility methods to deal with bytes
@@ -34,7 +35,7 @@ import org.apache.parquet.Log;
  *
  */
 public class BytesUtils {
-  private static final Log LOG = Log.getLog(BytesUtils.class);
+  private static final Logger LOG = LoggerFactory.getLogger(BytesUtils.class);
 
   public static final Charset UTF8 = Charset.forName("UTF-8");
 
@@ -158,7 +159,7 @@ public class BytesUtils {
     out.write((v >>>  8) & 0xFF);
     out.write((v >>> 16) & 0xFF);
     out.write((v >>> 24) & 0xFF);
-    if (Log.DEBUG) LOG.debug("write le int: " + v + " => "+ ((v >>>  0) & 0xFF) + " " + ((v >>>  8) & 0xFF) + " " + ((v >>> 16) & 0xFF) + " " + ((v >>> 24) & 0xFF));
+    if (LOG.isDebugEnabled()) LOG.debug("write le int: " + v + " => "+ ((v >>>  0) & 0xFF) + " " + ((v >>>  8) & 0xFF) + " " + ((v >>> 16) & 0xFF) + " " + ((v >>> 24) & 0xFF));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-common/src/test/java/org/apache/parquet/TestLog.java
----------------------------------------------------------------------
diff --git a/parquet-common/src/test/java/org/apache/parquet/TestLog.java b/parquet-common/src/test/java/org/apache/parquet/TestLog.java
deleted file mode 100644
index 4508b0d..0000000
--- a/parquet-common/src/test/java/org/apache/parquet/TestLog.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.parquet;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-public class TestLog {
-
-  @Test
-  public void test() {
-    // Use a compile time log level of INFO for performance
-    Assert.assertFalse("Do not merge in log level DEBUG", Log.DEBUG);
-  }
-}

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-encoding/src/main/java/org/apache/parquet/bytes/BytesInput.java
----------------------------------------------------------------------
diff --git a/parquet-encoding/src/main/java/org/apache/parquet/bytes/BytesInput.java b/parquet-encoding/src/main/java/org/apache/parquet/bytes/BytesInput.java
index cd9c6b2..6e593c2 100644
--- a/parquet-encoding/src/main/java/org/apache/parquet/bytes/BytesInput.java
+++ b/parquet-encoding/src/main/java/org/apache/parquet/bytes/BytesInput.java
@@ -29,7 +29,8 @@ import java.nio.ByteBuffer;
 import java.nio.channels.Channels;
 import java.nio.channels.WritableByteChannel;
 
-import org.apache.parquet.Log;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
@@ -44,7 +45,7 @@ import org.apache.parquet.Log;
  *
  */
 abstract public class BytesInput {
-  private static final Log LOG = Log.getLog(BytesInput.class);
+  private static final Logger LOG = LoggerFactory.getLogger(BytesInput.class);
   private static final boolean DEBUG = false;//Log.DEBUG;
   private static final EmptyBytesInput EMPTY_BYTES_INPUT = new EmptyBytesInput();
 
@@ -90,12 +91,12 @@ abstract public class BytesInput {
    * @return a Bytes input that will write the given bytes
    */
   public static BytesInput from(byte[] in) {
-    if (DEBUG) LOG.debug("BytesInput from array of " + in.length + " bytes");
+    LOG.debug("BytesInput from array of {} bytes", in.length);
     return new ByteArrayBytesInput(in, 0 , in.length);
   }
 
   public static BytesInput from(byte[] in, int offset, int length) {
-    if (DEBUG) LOG.debug("BytesInput from array of " + length + " bytes");
+    LOG.debug("BytesInput from array of {} bytes", length);
     return new ByteArrayBytesInput(in, offset, length);
   }
 
@@ -189,7 +190,7 @@ abstract public class BytesInput {
   public byte[] toByteArray() throws IOException {
     BAOS baos = new BAOS((int)size());
     this.writeAllTo(baos);
-    if (DEBUG) LOG.debug("converted " + size() + " to byteArray of " + baos.size() + " bytes");
+    LOG.debug("converted {} to byteArray of {} bytes", size() , baos.size());
     return baos.getBuf();
   }
 
@@ -228,7 +229,7 @@ abstract public class BytesInput {
   }
 
   private static class StreamBytesInput extends BytesInput {
-    private static final Log LOG = Log.getLog(BytesInput.StreamBytesInput.class);
+    private static final Logger LOG = LoggerFactory.getLogger(BytesInput.StreamBytesInput.class);
     private final InputStream in;
     private final int byteCount;
 
@@ -240,13 +241,13 @@ abstract public class BytesInput {
 
     @Override
     public void writeAllTo(OutputStream out) throws IOException {
-      if (DEBUG) LOG.debug("write All "+ byteCount + " bytes");
+      LOG.debug("write All {} bytes", byteCount);
       // TODO: more efficient
       out.write(this.toByteArray());
     }
 
     public byte[] toByteArray() throws IOException {
-      if (DEBUG) LOG.debug("read all "+ byteCount + " bytes");
+      LOG.debug("read all {} bytes", byteCount);
       byte[] buf = new byte[byteCount];
       new DataInputStream(in).readFully(buf);
       return buf;
@@ -260,7 +261,7 @@ abstract public class BytesInput {
   }
 
   private static class SequenceBytesIn extends BytesInput {
-    private static final Log LOG = Log.getLog(BytesInput.SequenceBytesIn.class);
+    private static final Logger LOG = LoggerFactory.getLogger(BytesInput.SequenceBytesIn.class);
 
     private final List<BytesInput> inputs;
     private final long size;
@@ -278,10 +279,11 @@ abstract public class BytesInput {
     @Override
     public void writeAllTo(OutputStream out) throws IOException {
       for (BytesInput input : inputs) {
-        if (DEBUG) LOG.debug("write " + input.size() + " bytes to out");
-        if (DEBUG && input instanceof SequenceBytesIn) LOG.debug("{");
+
+        LOG.debug("write {} bytes to out", input.size());
+        if (input instanceof SequenceBytesIn) LOG.debug("{");
         input.writeAllTo(out);
-        if (DEBUG && input instanceof SequenceBytesIn) LOG.debug("}");
+        if (input instanceof SequenceBytesIn) LOG.debug("}");
       }
     }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-encoding/src/main/java/org/apache/parquet/bytes/CapacityByteArrayOutputStream.java
----------------------------------------------------------------------
diff --git a/parquet-encoding/src/main/java/org/apache/parquet/bytes/CapacityByteArrayOutputStream.java b/parquet-encoding/src/main/java/org/apache/parquet/bytes/CapacityByteArrayOutputStream.java
index 6155565..92674d4 100644
--- a/parquet-encoding/src/main/java/org/apache/parquet/bytes/CapacityByteArrayOutputStream.java
+++ b/parquet-encoding/src/main/java/org/apache/parquet/bytes/CapacityByteArrayOutputStream.java
@@ -30,9 +30,11 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.OutputStreamCloseException;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Similar to a {@link ByteArrayOutputStream}, but uses a different strategy for growing that does not involve copying.
  * Where ByteArrayOutputStream is backed by a single array that "grows" by copying into a new larger array, this output
@@ -54,7 +56,7 @@ import org.apache.parquet.OutputStreamCloseException;
  *
  */
 public class CapacityByteArrayOutputStream extends OutputStream {
-  private static final Log LOG = Log.getLog(CapacityByteArrayOutputStream.class);
+  private static final Logger LOG = LoggerFactory.getLogger(CapacityByteArrayOutputStream.class);
   private static final ByteBuffer EMPTY_SLAB = ByteBuffer.wrap(new byte[0]);
 
   private int initialSlabSize;
@@ -167,11 +169,11 @@ public class CapacityByteArrayOutputStream extends OutputStream {
     }
 
     if (nextSlabSize < minimumSize) {
-      if (Log.DEBUG) LOG.debug(format("slab size %,d too small for value of size %,d. Bumping up slab size", nextSlabSize, minimumSize));
+      LOG.debug("slab size {} too small for value of size {}. Bumping up slab size", nextSlabSize, minimumSize);
       nextSlabSize = minimumSize;
     }
 
-    if (Log.DEBUG) LOG.debug(format("used %d slabs, adding new slab of size %d", slabs.size(), nextSlabSize));
+    LOG.debug("used {} slabs, adding new slab of size {}", slabs.size(), nextSlabSize);
 
     this.currentSlab = allocator.allocate(nextSlabSize);
     this.slabs.add(currentSlab);
@@ -265,7 +267,7 @@ public class CapacityByteArrayOutputStream extends OutputStream {
     // readjust slab size.
     // 7 = 2^3 - 1 so that doubling the initial size 3 times will get to the same size
     this.initialSlabSize = max(bytesUsed / 7, initialSlabSize);
-    if (Log.DEBUG) LOG.debug(String.format("initial slab of size %d", initialSlabSize));
+    LOG.debug("initial slab of size {}", initialSlabSize);
     for (ByteBuffer slab : slabs) {
       allocator.release(slab);
     }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBasedBitPackingEncoder.java
----------------------------------------------------------------------
diff --git a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBasedBitPackingEncoder.java b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBasedBitPackingEncoder.java
index 448c0be..cc23e8f 100644
--- a/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBasedBitPackingEncoder.java
+++ b/parquet-encoding/src/main/java/org/apache/parquet/column/values/bitpacking/ByteBasedBitPackingEncoder.java
@@ -18,16 +18,16 @@
  */
 package org.apache.parquet.column.values.bitpacking;
 
-import static org.apache.parquet.Log.DEBUG;
-import static org.apache.parquet.bytes.BytesInput.concat;
+import org.apache.parquet.bytes.BytesInput;
+import org.apache.parquet.bytes.BytesUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.parquet.Log;
-import org.apache.parquet.bytes.BytesInput;
-import org.apache.parquet.bytes.BytesUtils;
+import static org.apache.parquet.bytes.BytesInput.concat;
 
 /**
  * Uses the generated Byte based bit packing to write ints into a BytesInput
@@ -36,7 +36,7 @@ import org.apache.parquet.bytes.BytesUtils;
  *
  */
 public class ByteBasedBitPackingEncoder {
-  private static final Log LOG = Log.getLog(ByteBasedBitPackingEncoder.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ByteBasedBitPackingEncoder.class);
 
   private static final int VALUES_WRITTEN_AT_A_TIME = 8;
 
@@ -99,7 +99,7 @@ public class ByteBasedBitPackingEncoder {
   public BytesInput toBytes() throws IOException {
     int packedByteLength = packedPosition + BytesUtils.paddedByteCountFromBits(inputSize * bitWidth);
 
-    if (DEBUG) LOG.debug("writing " + (slabs.size() * slabSize + packedByteLength) + " bytes");
+    LOG.debug("writing {} bytes", (slabs.size() * slabSize + packedByteLength));
     if (inputSize > 0) {
       for (int i = inputSize; i < input.length; i++) {
         input[i] = 0;

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPacking.java
----------------------------------------------------------------------
diff --git a/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPacking.java b/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPacking.java
index ce9b3ac..664fb1c 100644
--- a/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPacking.java
+++ b/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestBitPacking.java
@@ -27,12 +27,13 @@ import java.io.IOException;
 import org.junit.Assert;
 import org.junit.Test;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.values.bitpacking.BitPacking.BitPackingReader;
 import org.apache.parquet.column.values.bitpacking.BitPacking.BitPackingWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestBitPacking {
-  private static final Log LOG = Log.getLog(TestBitPacking.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestBitPacking.class);
 
   @Test
   public void testZero() throws IOException {
@@ -170,7 +171,7 @@ public class TestBitPacking {
     w.finish();
     byte[] bytes = baos.toByteArray();
     LOG.debug("vals ("+bitLength+"): " + toString(vals));
-    LOG.debug("bytes: " + toString(bytes));
+    LOG.debug("bytes: {}", toString(bytes));
     Assert.assertEquals(expected, toString(bytes));
     ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
     BitPackingReader r = BitPacking.createBitPackingReader(bitLength, bais, vals.length);
@@ -178,7 +179,7 @@ public class TestBitPacking {
     for (int i = 0; i < result.length; i++) {
       result[i] = r.read();
     }
-    LOG.debug("result: " + toString(result));
+    LOG.debug("result: {}", toString(result));
     assertArrayEquals(vals, result);
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestByteBitPacking.java
----------------------------------------------------------------------
diff --git a/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestByteBitPacking.java b/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestByteBitPacking.java
index b7dc26b..81467e6 100644
--- a/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestByteBitPacking.java
+++ b/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestByteBitPacking.java
@@ -26,23 +26,24 @@ import java.util.Random;
 
 import org.junit.Assert;
 import org.junit.Test;
-import org.apache.parquet.Log;
 import org.apache.parquet.column.values.bitpacking.BitPacking.BitPackingReader;
 import org.apache.parquet.column.values.bitpacking.BitPacking.BitPackingWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestByteBitPacking {
-  private static final Log LOG = Log.getLog(TestByteBitPacking.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestByteBitPacking.class);
 
   @Test
   public void testPackUnPack() {
     LOG.debug("");
     LOG.debug("testPackUnPack");
     for (int i = 1; i < 32; i++) {
-      LOG.debug("Width: " + i);
+      LOG.debug("Width: {}", i);
       int[] unpacked = new int[32];
       int[] values = generateValues(i);
       packUnpack(Packer.BIG_ENDIAN.newBytePacker(i), values, unpacked);
-      LOG.debug("Output: " + TestBitPacking.toString(unpacked));
+      LOG.debug("Output: {}", TestBitPacking.toString(unpacked));
       Assert.assertArrayEquals("width "+i, values, unpacked);
     }
   }
@@ -52,15 +53,15 @@ public class TestByteBitPacking {
     LOG.debug("");
     LOG.debug("testPackUnPackLong");
     for (int i = 1; i < 64; i++) {
-      LOG.debug("Width: " + i);
+      LOG.debug("Width: {}", i);
       long[] unpacked32 = new long[32];
       long[] unpacked8 = new long[32];
       long[] values = generateValuesLong(i);
       packUnpack32(Packer.BIG_ENDIAN.newBytePackerForLong(i), values, unpacked32);
-      LOG.debug("Output 32: " + TestBitPacking.toString(unpacked32));
+      LOG.debug("Output 32: {}", TestBitPacking.toString(unpacked32));
       Assert.assertArrayEquals("width "+i, values, unpacked32);
       packUnpack8(Packer.BIG_ENDIAN.newBytePackerForLong(i), values, unpacked8);
-      LOG.debug("Output 8: " + TestBitPacking.toString(unpacked8));
+      LOG.debug("Output 8: {}", TestBitPacking.toString(unpacked8));
       Assert.assertArrayEquals("width "+i, values, unpacked8);
     }
   }
@@ -68,14 +69,14 @@ public class TestByteBitPacking {
   private void packUnpack(BytePacker packer, int[] values, int[] unpacked) {
     byte[] packed = new byte[packer.getBitWidth() * 4];
     packer.pack32Values(values, 0, packed, 0);
-    LOG.debug("packed: " + TestBitPacking.toString(packed));
+    LOG.debug("packed: {}", TestBitPacking.toString(packed));
     packer.unpack32Values(ByteBuffer.wrap(packed), 0, unpacked, 0);
   }
 
   private void packUnpack32(BytePackerForLong packer, long[] values, long[] unpacked) {
     byte[] packed = new byte[packer.getBitWidth() * 4];
     packer.pack32Values(values, 0, packed, 0);
-    LOG.debug("packed: " + TestBitPacking.toString(packed));
+    LOG.debug("packed: {}", TestBitPacking.toString(packed));
     packer.unpack32Values(packed, 0, unpacked, 0);
   }
 
@@ -84,7 +85,7 @@ public class TestByteBitPacking {
     for (int i = 0; i < 4; i++) {
       packer.pack8Values(values,  8 * i, packed, packer.getBitWidth() * i);
     }
-    LOG.debug("packed: " + TestBitPacking.toString(packed));
+    LOG.debug("packed: {}", TestBitPacking.toString(packed));
     for (int i = 0; i < 4; i++) {
       packer.unpack8Values(packed, packer.getBitWidth() * i, unpacked, 8 * i);
     }
@@ -95,7 +96,7 @@ public class TestByteBitPacking {
     for (int j = 0; j < values.length; j++) {
       values[j] = (int)(Math.random() * 100000) % (int)Math.pow(2, bitWidth);
     }
-    LOG.debug("Input:  " + TestBitPacking.toString(values));
+    LOG.debug("Input:  {}", TestBitPacking.toString(values));
     return values;
   }
 
@@ -105,7 +106,7 @@ public class TestByteBitPacking {
     for (int j = 0; j < values.length; j++) {
       values[j] = random.nextLong() & ((1l << bitWidth) - 1l);
     }
-    LOG.debug("Input:  " + TestBitPacking.toString(values));
+    LOG.debug("Input:  {}", TestBitPacking.toString(values));
     return values;
   }
 
@@ -114,7 +115,7 @@ public class TestByteBitPacking {
     LOG.debug("");
     LOG.debug("testPackUnPackAgainstHandWritten");
     for (int i = 1; i < 8; i++) {
-      LOG.debug("Width: " + i);
+      LOG.debug("Width: {}", i);
       byte[] packed = new byte[i * 4];
       int[] unpacked = new int[32];
       int[] values = generateValues(i);
@@ -123,7 +124,7 @@ public class TestByteBitPacking {
       final BytePacker packer = Packer.BIG_ENDIAN.newBytePacker(i);
       packer.pack32Values(values, 0, packed, 0);
 
-      LOG.debug("Generated: " + TestBitPacking.toString(packed));
+      LOG.debug("Generated: {}", TestBitPacking.toString(packed));
 
       // pack manual
       final ByteArrayOutputStream manualOut = new ByteArrayOutputStream();
@@ -132,7 +133,7 @@ public class TestByteBitPacking {
         writer.write(values[j]);
       }
       final byte[] packedManualAsBytes = manualOut.toByteArray();
-      LOG.debug("Manual: " + TestBitPacking.toString(packedManualAsBytes));
+      LOG.debug("Manual: {}", TestBitPacking.toString(packedManualAsBytes));
 
       // unpack manual
       final BitPackingReader reader = BitPacking.createBitPackingReader(i, new ByteArrayInputStream(packed), 32);
@@ -140,7 +141,7 @@ public class TestByteBitPacking {
         unpacked[j] = reader.read();
       }
 
-      LOG.debug("Output: " + TestBitPacking.toString(unpacked));
+      LOG.debug("Output: {}", TestBitPacking.toString(unpacked));
       Assert.assertArrayEquals("width " + i, values, unpacked);
     }
   }
@@ -149,9 +150,9 @@ public class TestByteBitPacking {
   public void testPackUnPackAgainstLemire() throws IOException {
     for (Packer pack: Packer.values()) {
       LOG.debug("");
-      LOG.debug("testPackUnPackAgainstLemire " + pack.name());
+      LOG.debug("testPackUnPackAgainstLemire {}", pack.name());
       for (int i = 1; i < 32; i++) {
-        LOG.debug("Width: " + i);
+        LOG.debug("Width: {}", i);
         int[] packed = new int[i];
         int[] unpacked = new int[32];
         int[] values = generateValues(i);
@@ -178,17 +179,17 @@ public class TestByteBitPacking {
           }
         }
         final byte[] packedByLemireAsBytes = lemireOut.toByteArray();
-        LOG.debug("Lemire out: " + TestBitPacking.toString(packedByLemireAsBytes));
+        LOG.debug("Lemire out: {}", TestBitPacking.toString(packedByLemireAsBytes));
 
         // pack manual
         final BytePacker bytePacker = pack.newBytePacker(i);
         byte[] packedGenerated = new byte[i * 4];
         bytePacker.pack32Values(values, 0, packedGenerated, 0);
-        LOG.debug("Gener. out: " + TestBitPacking.toString(packedGenerated));
+        LOG.debug("Gener. out: {}", TestBitPacking.toString(packedGenerated));
         Assert.assertEquals(pack.name() + " width " + i, TestBitPacking.toString(packedByLemireAsBytes), TestBitPacking.toString(packedGenerated));
 
         bytePacker.unpack32Values(ByteBuffer.wrap(packedByLemireAsBytes), 0, unpacked, 0);
-        LOG.debug("Output: " + TestBitPacking.toString(unpacked));
+        LOG.debug("Output: {}", TestBitPacking.toString(unpacked));
 
         Assert.assertArrayEquals("width " + i, values, unpacked);
       }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestLemireBitPacking.java
----------------------------------------------------------------------
diff --git a/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestLemireBitPacking.java b/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestLemireBitPacking.java
index 2c5fa58..6a980da 100644
--- a/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestLemireBitPacking.java
+++ b/parquet-encoding/src/test/java/org/apache/parquet/column/values/bitpacking/TestLemireBitPacking.java
@@ -26,12 +26,13 @@ import java.nio.ByteBuffer;
 import org.junit.Assert;
 import org.junit.Test;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.values.bitpacking.BitPacking.BitPackingReader;
 import org.apache.parquet.column.values.bitpacking.BitPacking.BitPackingWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestLemireBitPacking {
-  private static final Log LOG = Log.getLog(TestLemireBitPacking.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestLemireBitPacking.class);
 
   @Test
   public void testPackUnPack() {
@@ -39,7 +40,7 @@ public class TestLemireBitPacking {
       LOG.debug("");
       LOG.debug("testPackUnPack");
       for (int i = 1; i < 32; i++) {
-        LOG.debug("Width: " + i);
+        LOG.debug("Width: {}", i);
         int[] values = generateValues(i);
         int[] unpacked = new int[32];
         {
@@ -73,7 +74,7 @@ public class TestLemireBitPacking {
     for (int j = 0; j < values.length; j++) {
       values[j] = (int)(Math.random() * 100000) % (int)Math.pow(2, bitWidth);
     }
-    LOG.debug("Input:  " + TestBitPacking.toString(values));
+    LOG.debug("Input:  {}", TestBitPacking.toString(values));
     return values;
   }
 
@@ -82,7 +83,7 @@ public class TestLemireBitPacking {
     LOG.debug("");
     LOG.debug("testPackUnPackAgainstHandWritten");
     for (int i = 1; i < 8; i++) {
-      LOG.debug("Width: " + i);
+      LOG.debug("Width: {}", i);
       int[] packed = new int[i];
       int[] unpacked = new int[32];
       int[] values = generateValues(i);
@@ -99,7 +100,7 @@ public class TestLemireBitPacking {
         lemireOut.write((v >>>  0) & 0xFF);
       }
       final byte[] packedByLemireAsBytes = lemireOut.toByteArray();
-      LOG.debug("Lemire: " + TestBitPacking.toString(packedByLemireAsBytes));
+      LOG.debug("Lemire: {}", TestBitPacking.toString(packedByLemireAsBytes));
 
       // pack manual
       final ByteArrayOutputStream manualOut = new ByteArrayOutputStream();
@@ -108,7 +109,7 @@ public class TestLemireBitPacking {
         writer.write(values[j]);
       }
       final byte[] packedManualAsBytes = manualOut.toByteArray();
-      LOG.debug("Manual: " + TestBitPacking.toString(packedManualAsBytes));
+      LOG.debug("Manual: {}", TestBitPacking.toString(packedManualAsBytes));
 
       // unpack manual
       final BitPackingReader reader = BitPacking.createBitPackingReader(i, new ByteArrayInputStream(packedByLemireAsBytes), 32);
@@ -116,7 +117,7 @@ public class TestLemireBitPacking {
         unpacked[j] = reader.read();
       }
 
-      LOG.debug("Output: " + TestBitPacking.toString(unpacked));
+      LOG.debug("Output: {}", TestBitPacking.toString(unpacked));
       Assert.assertArrayEquals("width " + i, values, unpacked);
     }
   }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/filter2/dictionarylevel/DictionaryFilter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/filter2/dictionarylevel/DictionaryFilter.java b/parquet-hadoop/src/main/java/org/apache/parquet/filter2/dictionarylevel/DictionaryFilter.java
index bf99435..91f3007 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/filter2/dictionarylevel/DictionaryFilter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/filter2/dictionarylevel/DictionaryFilter.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.filter2.dictionarylevel;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.Dictionary;
 import org.apache.parquet.column.Encoding;
@@ -30,6 +29,8 @@ import org.apache.parquet.filter2.predicate.Operators.*;
 import org.apache.parquet.filter2.predicate.UserDefinedPredicate;
 import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
 import org.apache.parquet.hadoop.metadata.ColumnPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.HashMap;
@@ -47,7 +48,7 @@ import static org.apache.parquet.Preconditions.checkNotNull;
  */
 public class DictionaryFilter implements FilterPredicate.Visitor<Boolean> {
 
-  private static final Log LOG = Log.getLog(DictionaryFilter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DictionaryFilter.class);
   private static final boolean BLOCK_MIGHT_MATCH = false;
   private static final boolean BLOCK_CANNOT_MATCH = true;
 
@@ -99,7 +100,7 @@ public class DictionaryFilter implements FilterPredicate.Visitor<Boolean> {
         case DOUBLE: dictSet.add(dict.decodeToDouble(i));
           break;
         default:
-          LOG.warn("Unknown dictionary type" + meta.getType());
+          LOG.warn("Unknown dictionary type{}", meta.getType());
       }
     }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
index 6481b8f..bf22b61 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java
@@ -38,7 +38,6 @@ import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.parquet.CorruptStatistics;
-import org.apache.parquet.Log;
 import org.apache.parquet.format.PageEncodingStats;
 import org.apache.parquet.hadoop.metadata.ColumnPath;
 import org.apache.parquet.format.ColumnChunk;
@@ -71,6 +70,8 @@ import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
 import org.apache.parquet.schema.Type.Repetition;
 import org.apache.parquet.schema.TypeVisitor;
 import org.apache.parquet.schema.Types;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 // TODO: This file has become too long!
 // TODO: Lets split it up: https://issues.apache.org/jira/browse/PARQUET-310
@@ -80,7 +81,7 @@ public class ParquetMetadataConverter {
   public static final MetadataFilter SKIP_ROW_GROUPS = new SkipMetadataFilter();
   public static final long MAX_STATS_SIZE = 4096; // limit stats to 4k
 
-  private static final Log LOG = Log.getLog(ParquetMetadataConverter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetMetadataConverter.class);
 
   private final boolean useSignedStringMinMax;
 
@@ -789,9 +790,9 @@ public class ParquetMetadataConverter {
         return filterFileMetaDataByMidpoint(readFileMetaData(from), filter);
       }
     });
-    if (Log.DEBUG) LOG.debug(fileMetaData);
+    LOG.debug("{}", fileMetaData);
     ParquetMetadata parquetMetadata = fromParquetMetadata(fileMetaData);
-    if (Log.DEBUG) LOG.debug(ParquetMetadata.toPrettyJSON(parquetMetadata));
+    if (LOG.isDebugEnabled()) LOG.debug(ParquetMetadata.toPrettyJSON(parquetMetadata));
     return parquetMetadata;
   }
 

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageReadStore.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageReadStore.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageReadStore.java
index f428e85..f067679 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageReadStore.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageReadStore.java
@@ -25,7 +25,6 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.parquet.Ints;
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.page.DataPage;
 import org.apache.parquet.column.page.DataPageV1;
@@ -36,6 +35,8 @@ import org.apache.parquet.column.page.PageReadStore;
 import org.apache.parquet.column.page.PageReader;
 import org.apache.parquet.hadoop.CodecFactory.BytesDecompressor;
 import org.apache.parquet.io.ParquetDecodingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * TODO: should this actually be called RowGroupImpl or something?
@@ -44,7 +45,7 @@ import org.apache.parquet.io.ParquetDecodingException;
  *
  */
 class ColumnChunkPageReadStore implements PageReadStore, DictionaryPageReadStore {
-  private static final Log LOG = Log.getLog(ColumnChunkPageReadStore.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ColumnChunkPageReadStore.class);
 
   /**
    * PageReader for a single column chunk. A column chunk contains

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageWriteStore.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageWriteStore.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageWriteStore.java
index 0fb9a18..ac3cd3b 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageWriteStore.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ColumnChunkPageWriteStore.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.hadoop;
 
-import static org.apache.parquet.Log.INFO;
 import static org.apache.parquet.column.statistics.Statistics.getStatsBasedOnType;
 
 import java.io.ByteArrayOutputStream;
@@ -30,7 +29,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.bytes.ConcatenatingByteArrayCollector;
 import org.apache.parquet.column.ColumnDescriptor;
@@ -44,9 +42,11 @@ import org.apache.parquet.hadoop.CodecFactory.BytesCompressor;
 import org.apache.parquet.io.ParquetEncodingException;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.bytes.ByteBufferAllocator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 class ColumnChunkPageWriteStore implements PageWriteStore {
-  private static final Log LOG = Log.getLog(ColumnChunkPageWriteStore.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ColumnChunkPageWriteStore.class);
 
   private static ParquetMetadataConverter parquetMetadataConverter = new ParquetMetadataConverter();
 
@@ -191,8 +191,8 @@ class ColumnChunkPageWriteStore implements PageWriteStore {
       writer.writeDataPages(buf, uncompressedLength, compressedLength, totalStatistics,
           rlEncodings, dlEncodings, dataEncodings);
       writer.endColumn();
-      if (INFO) {
-        LOG.info(
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
             String.format(
                 "written %,dB for %s: %,d values, %,dB raw, %,dB comp, %d pages, encodings: %s",
                 buf.size(), path, totalValueCount, uncompressedLength, compressedLength, pageCount, new HashSet<Encoding>(dataEncodings))

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/DirectCodecFactory.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/DirectCodecFactory.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/DirectCodecFactory.java
index d90ab51..344f3ec 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/DirectCodecFactory.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/DirectCodecFactory.java
@@ -33,12 +33,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.Decompressor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.xerial.snappy.Snappy;
 
 import org.apache.parquet.bytes.ByteBufferAllocator;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.hadoop.metadata.CompressionCodecName;
-import org.apache.parquet.Log;
 import org.apache.parquet.ParquetRuntimeException;
 import org.apache.parquet.Preconditions;
 
@@ -47,7 +48,7 @@ import org.apache.parquet.Preconditions;
  * direct memory, without requiring a copy into heap memory (where possible).
  */
 class DirectCodecFactory extends CodecFactory implements AutoCloseable {
-  private static final Log LOG = Log.getLog(DirectCodecFactory.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DirectCodecFactory.class);
 
   private final ByteBufferAllocator allocator;
 
@@ -372,7 +373,7 @@ class DirectCodecFactory extends CodecFactory implements AutoCloseable {
             cPools.put(com.getClass(), compressorPool);
             compressorPool.returnObject(com);
           } else {
-            if (Log.DEBUG) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug(String.format(BYTE_BUF_IMPL_NOT_FOUND_MSG, "compressor", codec.getClass().getName()));
             }
           }
@@ -388,7 +389,7 @@ class DirectCodecFactory extends CodecFactory implements AutoCloseable {
             dePools.put(decom.getClass(), decompressorPool);
             decompressorPool.returnObject(decom);
           } else {
-            if (Log.DEBUG) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug(String.format(BYTE_BUF_IMPL_NOT_FOUND_MSG, "decompressor", codec.getClass().getName()));
             }
           }
@@ -408,7 +409,7 @@ class DirectCodecFactory extends CodecFactory implements AutoCloseable {
 
             } else {
               supportDirectDecompressor = false;
-              if (Log.DEBUG) {
+              if (LOG.isDebugEnabled()) {
                 LOG.debug(String.format(BYTE_BUF_IMPL_NOT_FOUND_MSG, "compressor", codec.getClass().getName()));
               }
             }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordReader.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordReader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordReader.java
index 85b6691..88b3d2d 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordReader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordReader.java
@@ -1,4 +1,4 @@
-/* 
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *   http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing,
  * software distributed under the License is distributed on an
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -27,7 +27,6 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.page.PageReadStore;
 import org.apache.parquet.filter.UnboundRecordFilter;
 import org.apache.parquet.filter2.compat.FilterCompat;
@@ -42,16 +41,17 @@ import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.io.api.RecordMaterializer;
 import org.apache.parquet.io.api.RecordMaterializer.RecordMaterializationException;
 import org.apache.parquet.schema.MessageType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static java.lang.String.format;
-import static org.apache.parquet.Log.DEBUG;
 import static org.apache.parquet.Preconditions.checkNotNull;
 import static org.apache.parquet.hadoop.ParquetInputFormat.RECORD_FILTERING_ENABLED;
 import static org.apache.parquet.hadoop.ParquetInputFormat.RECORD_FILTERING_ENABLED_DEFAULT;
 import static org.apache.parquet.hadoop.ParquetInputFormat.STRICT_TYPE_CHECKING;
 
 class InternalParquetRecordReader<T> {
-  private static final Log LOG = Log.getLog(InternalParquetRecordReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(InternalParquetRecordReader.class);
 
   private ColumnIOFactory columnIOFactory = null;
   private final Filter filter;
@@ -110,7 +110,7 @@ class InternalParquetRecordReader<T> {
     if (current == totalCountLoadedSoFar) {
       if (current != 0) {
         totalTimeSpentProcessingRecords += (System.currentTimeMillis() - startedAssemblingCurrentBlockAt);
-        if (Log.INFO) {
+        if (LOG.isInfoEnabled()) {
             LOG.info("Assembled and processed " + totalCountLoadedSoFar + " records from " + columnCount + " columns in " + totalTimeSpentProcessingRecords + " ms: "+((float)totalCountLoadedSoFar / totalTimeSpentProcessingRecords) + " rec/ms, " + ((float)totalCountLoadedSoFar * columnCount / totalTimeSpentProcessingRecords) + " cell/ms");
             final long totalTime = totalTimeSpentProcessingRecords + totalTimeSpentReadingBytes;
             if (totalTime != 0) {
@@ -130,8 +130,8 @@ class InternalParquetRecordReader<T> {
       long timeSpentReading = System.currentTimeMillis() - t0;
       totalTimeSpentReadingBytes += timeSpentReading;
       BenchmarkCounter.incrementTime(timeSpentReading);
-      if (Log.INFO) LOG.info("block read in memory in " + timeSpentReading + " ms. row count = " + pages.getRowCount());
-      if (Log.DEBUG) LOG.debug("initializing Record assembly with requested schema " + requestedSchema);
+      if (LOG.isInfoEnabled()) LOG.info("block read in memory in {} ms. row count = {}", timeSpentReading, pages.getRowCount());
+      LOG.debug("initializing Record assembly with requested schema {}", requestedSchema);
       MessageColumnIO columnIO = columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
       recordReader = columnIO.getRecordReader(pages, recordConverter,
           filterRecords ? filter : FilterCompat.NOOP);
@@ -180,7 +180,7 @@ class InternalParquetRecordReader<T> {
     this.filterRecords = configuration.getBoolean(
         RECORD_FILTERING_ENABLED, RECORD_FILTERING_ENABLED_DEFAULT);
     reader.setRequestedSchema(requestedSchema);
-    LOG.info("RecordReader initialized will read a total of " + total + " records.");
+    LOG.info("RecordReader initialized will read a total of {} records.", total);
   }
 
   public boolean nextKeyValue() throws IOException, InterruptedException {
@@ -199,26 +199,26 @@ class InternalParquetRecordReader<T> {
         } catch (RecordMaterializationException e) {
           // this might throw, but it's fatal if it does.
           unmaterializableRecordCounter.incErrors(e);
-          if (DEBUG) LOG.debug("skipping a corrupt record");
+          LOG.debug("skipping a corrupt record");
           continue;
         }
 
         if (recordReader.shouldSkipCurrentRecord()) {
           // this record is being filtered via the filter2 package
-          if (DEBUG) LOG.debug("skipping record");
+          LOG.debug("skipping record");
           continue;
         }
 
         if (currentValue == null) {
           // only happens with FilteredRecordReader at end of block
           current = totalCountLoadedSoFar;
-          if (DEBUG) LOG.debug("filtered record reader reached end of block");
+          LOG.debug("filtered record reader reached end of block");
           continue;
         }
 
         recordFound = true;
 
-        if (DEBUG) LOG.debug("read value: " + currentValue);
+        LOG.debug("read value: {}", currentValue);
       } catch (RuntimeException e) {
         throw new ParquetDecodingException(format("Can not read value at %d in block %d in file %s", current, currentBlock, reader.getPath()), e);
       }

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java
index 6c3dac5..2a221ac 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java
@@ -21,14 +21,12 @@ package org.apache.parquet.hadoop;
 import static java.lang.Math.max;
 import static java.lang.Math.min;
 import static java.lang.String.format;
-import static org.apache.parquet.Log.DEBUG;
 import static org.apache.parquet.Preconditions.checkNotNull;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.column.ColumnWriteStore;
 import org.apache.parquet.column.ParquetProperties;
 import org.apache.parquet.hadoop.CodecFactory.BytesCompressor;
@@ -38,9 +36,11 @@ import org.apache.parquet.io.ColumnIOFactory;
 import org.apache.parquet.io.MessageColumnIO;
 import org.apache.parquet.io.api.RecordConsumer;
 import org.apache.parquet.schema.MessageType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 class InternalParquetRecordWriter<T> {
-  private static final Log LOG = Log.getLog(InternalParquetRecordWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(InternalParquetRecordWriter.class);
 
   private static final int MINIMUM_RECORD_COUNT_FOR_CHECK = 100;
   private static final int MAXIMUM_RECORD_COUNT_FOR_CHECK = 10000;
@@ -139,7 +139,7 @@ class InternalParquetRecordWriter<T> {
       // flush the row group if it is within ~2 records of the limit
       // it is much better to be slightly under size than to be over at all
       if (memSize > (nextRowGroupSize - 2 * recordSize)) {
-        LOG.info(format("mem size %,d > %,d: flushing %,d records to disk.", memSize, nextRowGroupSize, recordCount));
+        LOG.info("mem size {} > {}: flushing {} records to disk.", memSize, nextRowGroupSize, recordCount);
         flushRowGroupToStore();
         initStore();
         recordCountForNextMemCheck = min(max(MINIMUM_RECORD_COUNT_FOR_CHECK, recordCount / 2), MAXIMUM_RECORD_COUNT_FOR_CHECK);
@@ -149,9 +149,7 @@ class InternalParquetRecordWriter<T> {
             max(MINIMUM_RECORD_COUNT_FOR_CHECK, (recordCount + (long)(nextRowGroupSize / ((float)recordSize))) / 2), // will check halfway
             recordCount + MAXIMUM_RECORD_COUNT_FOR_CHECK // will not look more than max records ahead
             );
-        if (DEBUG) {
-          LOG.debug(format("Checked mem at %,d will check again at: %,d ", recordCount, recordCountForNextMemCheck));
-        }
+        LOG.debug("Checked mem at {} will check again at: {}", recordCount, recordCountForNextMemCheck);
       }
     }
   }
@@ -159,9 +157,9 @@ class InternalParquetRecordWriter<T> {
   private void flushRowGroupToStore()
       throws IOException {
     recordConsumer.flush();
-    LOG.info(format("Flushing mem columnStore to file. allocated memory: %,d", columnStore.getAllocatedSize()));
+    LOG.info("Flushing mem columnStore to file. allocated memory: {}", columnStore.getAllocatedSize());
     if (columnStore.getAllocatedSize() > (3 * rowGroupSizeThreshold)) {
-      LOG.warn("Too much memory used: " + columnStore.memUsageString());
+      LOG.warn("Too much memory used: {}", columnStore.memUsageString());
     }
 
     if (recordCount > 0) {

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/LruCache.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/LruCache.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/LruCache.java
index 44f9eca..c4e18b1 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/LruCache.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/LruCache.java
@@ -18,7 +18,8 @@
  */
 package org.apache.parquet.hadoop;
 
-import org.apache.parquet.Log;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.LinkedHashMap;
 import java.util.Map;
@@ -35,7 +36,7 @@ import java.util.Map;
  *           so that the "staleness" of the value can be easily determined.
  */
 final class LruCache<K, V extends LruCache.Value<K, V>> {
-  private static final Log LOG = Log.getLog(LruCache.class);
+  private static final Logger LOG = LoggerFactory.getLogger(LruCache.class);
 
   private static final float DEFAULT_LOAD_FACTOR = 0.75f;
 
@@ -65,7 +66,7 @@ final class LruCache<K, V extends LruCache.Value<K, V>> {
               public boolean removeEldestEntry(final Map.Entry<K, V> eldest) {
                 boolean result = size() > maxSize;
                 if (result) {
-                  if (Log.DEBUG) {
+                  if (LOG.isDebugEnabled()) {
                     LOG.debug("Removing eldest entry in cache: "
                             + eldest.getKey());
                   }
@@ -84,9 +85,7 @@ final class LruCache<K, V extends LruCache.Value<K, V>> {
   public V remove(final K key) {
     V oldValue = cacheMap.remove(key);
     if (oldValue != null) {
-      if (Log.DEBUG) {
-        LOG.debug("Removed cache entry for '" + key + "'");
-      }
+      LOG.debug("Removed cache entry for '{}'", key);
     }
     return oldValue;
   }
@@ -101,29 +100,29 @@ final class LruCache<K, V extends LruCache.Value<K, V>> {
    */
   public void put(final K key, final V newValue) {
     if (newValue == null || !newValue.isCurrent(key)) {
-      if (Log.WARN) {
-        LOG.warn("Ignoring new cache entry for '" + key + "' because it is "
-                + (newValue == null ? "null" : "not current"));
+      if (LOG.isWarnEnabled()) {
+        LOG.warn("Ignoring new cache entry for '{}' because it is {}", key,
+                (newValue == null ? "null" : "not current"));
       }
       return;
     }
 
     V oldValue = cacheMap.get(key);
     if (oldValue != null && oldValue.isNewerThan(newValue)) {
-      if (Log.WARN) {
-        LOG.warn("Ignoring new cache entry for '" + key + "' because "
-                + "existing cache entry is newer");
+      if (LOG.isWarnEnabled()) {
+        LOG.warn("Ignoring new cache entry for '{}' because "
+                + "existing cache entry is newer", key);
       }
       return;
     }
 
     // no existing value or new value is newer than old value
     oldValue = cacheMap.put(key, newValue);
-    if (Log.DEBUG) {
+    if (LOG.isDebugEnabled()) {
       if (oldValue == null) {
-        LOG.debug("Added new cache entry for '" + key + "'");
+        LOG.debug("Added new cache entry for '{}'", key);
       } else {
-        LOG.debug("Overwrote existing cache entry for '" + key + "'");
+        LOG.debug("Overwrote existing cache entry for '{}'", key);
       }
     }
   }
@@ -145,10 +144,7 @@ final class LruCache<K, V extends LruCache.Value<K, V>> {
    */
   public V getCurrentValue(final K key) {
     V value = cacheMap.get(key);
-    if (Log.DEBUG) {
-      LOG.debug("Value for '" + key + "' " + (value == null ? "not " : "")
-              + "in cache");
-    }
+    LOG.debug("Value for '{}' {} in cache", key, (value == null ? "not " : ""));
     if (value != null && !value.isCurrent(key)) {
       // value is not current; remove it and return null
       remove(key);

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/MemoryManager.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/MemoryManager.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/MemoryManager.java
index 0c56bb2..dc5c31d 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/MemoryManager.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/MemoryManager.java
@@ -18,9 +18,10 @@
  */
 package org.apache.parquet.hadoop;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.ParquetRuntimeException;
 import org.apache.parquet.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.lang.management.ManagementFactory;
 import java.util.Collections;
@@ -40,7 +41,7 @@ import java.util.Map;
  * When the sum exceeds, decrease each writer's allocation size by a ratio.
  */
 public class MemoryManager {
-  private static final Log LOG = Log.getLog(MemoryManager.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MemoryManager.class);
   static final float DEFAULT_MEMORY_POOL_RATIO = 0.95f;
   static final long DEFAULT_MIN_MEMORY_ALLOCATION = 1 * 1024 * 1024; // 1MB
   private final float memoryPoolRatio;
@@ -59,7 +60,7 @@ public class MemoryManager {
     minMemoryAllocation = minAllocation;
     totalMemoryPool = Math.round((double) ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax
         () * ratio);
-    LOG.debug(String.format("Allocated total memory pool is: %,d", totalMemoryPool));
+    LOG.debug("Allocated total memory pool is: {}", totalMemoryPool);
   }
 
   private void checkRatio(float ratio) {

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileReader.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileReader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileReader.java
index 4af26d0..7b7534c 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileReader.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileReader.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.hadoop;
 
-import static org.apache.parquet.Log.DEBUG;
 import static org.apache.parquet.bytes.BytesUtils.readIntLittleEndian;
 import static org.apache.parquet.filter2.compat.RowGroupFilter.FilterLevel.DICTIONARY;
 import static org.apache.parquet.filter2.compat.RowGroupFilter.FilterLevel.STATISTICS;
@@ -66,7 +65,6 @@ import org.apache.parquet.column.page.DictionaryPageReadStore;
 import org.apache.parquet.filter2.compat.FilterCompat;
 import org.apache.parquet.filter2.compat.RowGroupFilter;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.column.ColumnDescriptor;
 import org.apache.parquet.column.page.DataPage;
@@ -97,6 +95,8 @@ import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.io.InputFile;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.PrimitiveType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Internal implementation of the Parquet file reader as a block container
@@ -106,7 +106,7 @@ import org.apache.parquet.schema.PrimitiveType;
  */
 public class ParquetFileReader implements Closeable {
 
-  private static final Log LOG = Log.getLog(ParquetFileReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetFileReader.class);
 
   public static String PARQUET_READ_PARALLELISM = "parquet.metadata.read.parallelism";
 
@@ -204,9 +204,7 @@ public class ParquetFileReader implements Closeable {
 
     if (toRead.size() > 0) {
       // read the footers of the files that did not have a summary file
-      if (Log.INFO) {
-        LOG.info("reading another " + toRead.size() + " footers");
-      }
+      LOG.info("reading another {} footers", toRead.size());
       result.addAll(readAllFootersInParallel(configuration, toRead, skipRowGroups));
     }
 
@@ -214,7 +212,7 @@ public class ParquetFileReader implements Closeable {
   }
 
   private static <T> List<T> runAllInParallel(int parallelism, List<Callable<T>> toRun) throws ExecutionException {
-    LOG.info("Initiating action with parallelism: " + parallelism);
+    LOG.info("Initiating action with parallelism: {}", parallelism);
     ExecutorService threadPool = Executors.newFixedThreadPool(parallelism);
     try {
       List<Future<T>> futures = new ArrayList<Future<T>>();
@@ -360,12 +358,10 @@ public class ParquetFileReader implements Closeable {
     FileSystem fileSystem = basePath.getFileSystem(configuration);
     if (skipRowGroups && fileSystem.exists(commonMetaDataFile)) {
       // reading the summary file that does not contain the row groups
-      if (Log.INFO) LOG.info("reading summary file: " + commonMetaDataFile);
+      LOG.info("reading summary file: {}", commonMetaDataFile);
       return readFooter(configuration, commonMetaDataFile, filter(skipRowGroups));
     } else if (fileSystem.exists(metadataFile)) {
-      if (Log.INFO) {
-        LOG.info("reading summary file: " + metadataFile);
-      }
+      LOG.info("reading summary file: {}", metadataFile);
       return readFooter(configuration, metadataFile, filter(skipRowGroups));
     } else {
       return null;
@@ -472,17 +468,13 @@ public class ParquetFileReader implements Closeable {
    * @throws IOException if an error occurs while reading the file
    */
   private static final ParquetMetadata readFooter(ParquetMetadataConverter converter, long fileLen, String filePath, SeekableInputStream f, MetadataFilter filter) throws IOException {
-    if (Log.DEBUG) {
-      LOG.debug("File length " + fileLen);
-    }
+    LOG.debug("File length {}", fileLen);
     int FOOTER_LENGTH_SIZE = 4;
     if (fileLen < MAGIC.length + FOOTER_LENGTH_SIZE + MAGIC.length) { // MAGIC + data + footer + footerIndex + MAGIC
       throw new RuntimeException(filePath + " is not a Parquet file (too small)");
     }
     long footerLengthIndex = fileLen - FOOTER_LENGTH_SIZE - MAGIC.length;
-    if (Log.DEBUG) {
-      LOG.debug("reading footer index at " + footerLengthIndex);
-    }
+    LOG.debug("reading footer index at {}", footerLengthIndex);
 
     f.seek(footerLengthIndex);
     int footerLength = readIntLittleEndian(f);
@@ -492,9 +484,7 @@ public class ParquetFileReader implements Closeable {
       throw new RuntimeException(filePath + " is not a Parquet file. expected magic number at tail " + Arrays.toString(MAGIC) + " but found " + Arrays.toString(magic));
     }
     long footerIndex = footerLengthIndex - footerLength;
-    if (Log.DEBUG) {
-      LOG.debug("read footer length: " + footerLength + ", footer index: " + footerIndex);
-    }
+    LOG.debug("read footer length: {}, footer index: {}", footerLength, footerIndex);
     if (footerIndex < MAGIC.length || footerIndex >= footerLengthIndex) {
       throw new RuntimeException("corrupted file: the footer index is not within the file");
     }
@@ -926,9 +916,7 @@ public class ParquetFileReader implements Closeable {
             valuesCountReadSoFar += dataHeaderV2.getNum_values();
             break;
           default:
-            if (DEBUG) {
-              LOG.debug("skipping page of type " + pageHeader.getType() + " of size " + compressedPageSize);
-            }
+            LOG.debug("skipping page of type {} of size {}", pageHeader.getType(), compressedPageSize);
             this.skip(compressedPageSize);
             break;
         }
@@ -1013,7 +1001,7 @@ public class ParquetFileReader implements Closeable {
         // usually 13 to 19 bytes are missing
         int l1 = initPos + count - pos();
         int l2 = size - l1;
-        LOG.info("completed the column chunk with " + l2 + " bytes");
+        LOG.info("completed the column chunk with {} bytes", l2);
         return BytesInput.concat(super.readAsBytesInput(l1), BytesInput.copy(BytesInput.from(f, l2)));
       }
       return super.readAsBytesInput(size);

http://git-wip-us.apache.org/repos/asf/parquet-mr/blob/df9d8e41/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java
index f0fa7f5..57500bf 100644
--- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java
+++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java
@@ -18,7 +18,6 @@
  */
 package org.apache.parquet.hadoop;
 
-import static org.apache.parquet.Log.DEBUG;
 import static org.apache.parquet.format.Util.writeFileMetaData;
 import static org.apache.parquet.hadoop.ParquetWriter.DEFAULT_BLOCK_SIZE;
 import static org.apache.parquet.hadoop.ParquetWriter.MAX_PADDING_SIZE_DEFAULT;
@@ -41,7 +40,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
-import org.apache.parquet.Log;
 import org.apache.parquet.Preconditions;
 import org.apache.parquet.Strings;
 import org.apache.parquet.Version;
@@ -67,6 +65,8 @@ import org.apache.parquet.io.ParquetEncodingException;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
 import org.apache.parquet.schema.TypeUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Internal implementation of the Parquet file writer as a block container
@@ -75,7 +75,7 @@ import org.apache.parquet.schema.TypeUtil;
  *
  */
 public class ParquetFileWriter {
-  private static final Log LOG = Log.getLog(ParquetFileWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ParquetFileWriter.class);
 
   private static ParquetMetadataConverter metadataConverter = new ParquetMetadataConverter();
 
@@ -274,7 +274,7 @@ public class ParquetFileWriter {
    */
   public void start() throws IOException {
     state = state.start();
-    if (DEBUG) LOG.debug(out.getPos() + ": start");
+    LOG.debug("{}: start", out.getPos());
     out.write(MAGIC);
   }
 
@@ -285,7 +285,7 @@ public class ParquetFileWriter {
    */
   public void startBlock(long recordCount) throws IOException {
     state = state.startBlock();
-    if (DEBUG) LOG.debug(out.getPos() + ": start block");
+    LOG.debug("{}: start block", out.getPos());
 //    out.write(MAGIC); // TODO: add a magic delimiter
 
     alignment.alignForRowGroup(out);
@@ -325,7 +325,7 @@ public class ParquetFileWriter {
    */
   public void writeDictionaryPage(DictionaryPage dictionaryPage) throws IOException {
     state = state.write();
-    if (DEBUG) LOG.debug(out.getPos() + ": write dictionary page: " + dictionaryPage.getDictionarySize() + " values");
+    LOG.debug("{}: write dictionary page: {} values", out.getPos(), dictionaryPage.getDictionarySize());
     currentChunkDictionaryPageOffset = out.getPos();
     int uncompressedSize = dictionaryPage.getUncompressedSize();
     int compressedPageSize = (int)dictionaryPage.getBytes().size(); // TODO: fix casts
@@ -338,7 +338,7 @@ public class ParquetFileWriter {
     long headerSize = out.getPos() - currentChunkDictionaryPageOffset;
     this.uncompressedLength += uncompressedSize + headerSize;
     this.compressedLength += compressedPageSize + headerSize;
-    if (DEBUG) LOG.debug(out.getPos() + ": write dictionary page content " + compressedPageSize);
+    LOG.debug("{}: write dictionary page content {}", out.getPos(), compressedPageSize);
     dictionaryPage.getBytes().writeAllTo(out);
     encodingStatsBuilder.addDictEncoding(dictionaryPage.getEncoding());
     currentEncodings.add(dictionaryPage.getEncoding());
@@ -363,7 +363,7 @@ public class ParquetFileWriter {
       Encoding valuesEncoding) throws IOException {
     state = state.write();
     long beforeHeader = out.getPos();
-    if (DEBUG) LOG.debug(beforeHeader + ": write data page: " + valueCount + " values");
+    LOG.debug("{}: write data page: {} values", beforeHeader, valueCount);
     int compressedPageSize = (int)bytes.size();
     metadataConverter.writeDataPageHeader(
         uncompressedPageSize, compressedPageSize,
@@ -375,7 +375,7 @@ public class ParquetFileWriter {
     long headerSize = out.getPos() - beforeHeader;
     this.uncompressedLength += uncompressedPageSize + headerSize;
     this.compressedLength += compressedPageSize + headerSize;
-    if (DEBUG) LOG.debug(out.getPos() + ": write data page content " + compressedPageSize);
+    LOG.debug("{}: write data page content {}", out.getPos(), compressedPageSize);
     bytes.writeAllTo(out);
     encodingStatsBuilder.addDataEncoding(valuesEncoding);
     currentEncodings.add(rlEncoding);
@@ -401,7 +401,7 @@ public class ParquetFileWriter {
       Encoding valuesEncoding) throws IOException {
     state = state.write();
     long beforeHeader = out.getPos();
-    if (DEBUG) LOG.debug(beforeHeader + ": write data page: " + valueCount + " values");
+    LOG.debug("{}: write data page: {} values", beforeHeader, valueCount);
     int compressedPageSize = (int)bytes.size();
     metadataConverter.writeDataPageHeader(
         uncompressedPageSize, compressedPageSize,
@@ -414,7 +414,7 @@ public class ParquetFileWriter {
     long headerSize = out.getPos() - beforeHeader;
     this.uncompressedLength += uncompressedPageSize + headerSize;
     this.compressedLength += compressedPageSize + headerSize;
-    if (DEBUG) LOG.debug(out.getPos() + ": write data page content " + compressedPageSize);
+    LOG.debug("{}: write data page content {}", out.getPos(), compressedPageSize);
     bytes.writeAllTo(out);
     currentStatistics.mergeStatistics(statistics);
     encodingStatsBuilder.addDataEncoding(valuesEncoding);
@@ -438,11 +438,11 @@ public class ParquetFileWriter {
                       Set<Encoding> dlEncodings,
                       List<Encoding> dataEncodings) throws IOException {
     state = state.write();
-    if (DEBUG) LOG.debug(out.getPos() + ": write data pages");
+    LOG.debug("{}: write data pages", out.getPos());
     long headersSize = bytes.size() - compressedTotalPageSize;
     this.uncompressedLength += uncompressedTotalPageSize + headersSize;
     this.compressedLength += compressedTotalPageSize + headersSize;
-    if (DEBUG) LOG.debug(out.getPos() + ": write data pages content");
+    LOG.debug("{}: write data pages content", out.getPos());
     bytes.writeAllTo(out);
     encodingStatsBuilder.addDataEncodings(dataEncodings);
     if (rlEncodings.isEmpty()) {
@@ -460,7 +460,7 @@ public class ParquetFileWriter {
    */
   public void endColumn() throws IOException {
     state = state.endColumn();
-    if (DEBUG) LOG.debug(out.getPos() + ": end column");
+    LOG.debug("{}: end column", out.getPos());
     currentBlock.addColumn(ColumnChunkMetaData.get(
         currentChunkPath,
         currentChunkType,
@@ -484,7 +484,7 @@ public class ParquetFileWriter {
    */
   public void endBlock() throws IOException {
     state = state.endBlock();
-    if (DEBUG) LOG.debug(out.getPos() + ": end block");
+    LOG.debug("{}: end block", out.getPos());
     currentBlock.setRowCount(currentRecordCount);
     blocks.add(currentBlock);
     currentBlock = null;
@@ -611,8 +611,7 @@ public class ParquetFileWriter {
    */
   private static void copy(SeekableInputStream from, FSDataOutputStream to,
                            long start, long length) throws IOException{
-    if (DEBUG) LOG.debug(
-        "Copying " + length + " bytes at " + start + " to " + to.getPos());
+    LOG.debug("Copying {} bytes at {} to {}" ,length , start , to.getPos());
     from.seek(start);
     long bytesCopied = 0;
     byte[] buffer = COPY_BUFFER.get();
@@ -637,7 +636,7 @@ public class ParquetFileWriter {
    */
   public void end(Map<String, String> extraMetaData) throws IOException {
     state = state.end();
-    if (DEBUG) LOG.debug(out.getPos() + ": end");
+    LOG.debug("{}: end", out.getPos());
     ParquetMetadata footer = new ParquetMetadata(new FileMetaData(schema, extraMetaData, Version.FULL_VERSION), blocks);
     serializeFooter(footer, out);
     out.close();
@@ -647,7 +646,7 @@ public class ParquetFileWriter {
     long footerIndex = out.getPos();
     org.apache.parquet.format.FileMetaData parquetMetadata = metadataConverter.toParquetMetadata(CURRENT_VERSION, footer);
     writeFileMetaData(parquetMetadata, out);
-    if (DEBUG) LOG.debug(out.getPos() + ": footer length = " + (out.getPos() - footerIndex));
+    LOG.debug("{}: footer length = {}" , out.getPos(), (out.getPos() - footerIndex));
     BytesUtils.writeIntLittleEndian(out, (int) (out.getPos() - footerIndex));
     out.write(MAGIC);
   }
@@ -905,9 +904,7 @@ public class ParquetFileWriter {
       long remaining = dfsBlockSize - (out.getPos() % dfsBlockSize);
 
       if (isPaddingNeeded(remaining)) {
-        if (DEBUG) LOG.debug("Adding " + remaining + " bytes of padding (" +
-            "row group size=" + rowGroupSize + "B, " +
-            "block size=" + dfsBlockSize + "B)");
+        LOG.debug("Adding {} bytes of padding (row group size={}B, block size={}B)", remaining, rowGroupSize, dfsBlockSize);
         for (; remaining > 0; remaining -= zeros.length) {
           out.write(zeros, 0, (int) Math.min((long) zeros.length, remaining));
         }