You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@parquet.apache.org by ju...@apache.org on 2014/07/29 23:39:15 UTC

[1/4] Add a unified and optionally more constrained API for expressing filters on columns

Repository: incubator-parquet-mr
Updated Branches:
  refs/heads/master fc2c29df7 -> ad32bf0fd


http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/test/java/parquet/filter2/compat/TestRowGroupFilter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/java/parquet/filter2/compat/TestRowGroupFilter.java b/parquet-hadoop/src/test/java/parquet/filter2/compat/TestRowGroupFilter.java
new file mode 100644
index 0000000..a688ef8
--- /dev/null
+++ b/parquet-hadoop/src/test/java/parquet/filter2/compat/TestRowGroupFilter.java
@@ -0,0 +1,84 @@
+package parquet.filter2.compat;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.junit.Test;
+
+import parquet.column.statistics.IntStatistics;
+import parquet.filter2.predicate.Operators.IntColumn;
+import parquet.hadoop.metadata.BlockMetaData;
+import parquet.schema.MessageType;
+import parquet.schema.MessageTypeParser;
+
+import static org.junit.Assert.assertEquals;
+import static parquet.filter2.predicate.FilterApi.eq;
+import static parquet.filter2.predicate.FilterApi.intColumn;
+import static parquet.filter2.predicate.FilterApi.notEq;
+import static parquet.hadoop.TestInputFormat.makeBlockFromStats;
+
+public class TestRowGroupFilter {
+  @Test
+  public void testApplyRowGroupFilters() {
+
+    List<BlockMetaData> blocks = new ArrayList<BlockMetaData>();
+
+    IntStatistics stats1 = new IntStatistics();
+    stats1.setMinMax(10, 100);
+    stats1.setNumNulls(4);
+    BlockMetaData b1 = makeBlockFromStats(stats1, 301);
+    blocks.add(b1);
+
+    IntStatistics stats2 = new IntStatistics();
+    stats2.setMinMax(8, 102);
+    stats2.setNumNulls(0);
+    BlockMetaData b2 = makeBlockFromStats(stats2, 302);
+    blocks.add(b2);
+
+    IntStatistics stats3 = new IntStatistics();
+    stats3.setMinMax(100, 102);
+    stats3.setNumNulls(12);
+    BlockMetaData b3 = makeBlockFromStats(stats3, 303);
+    blocks.add(b3);
+
+
+    IntStatistics stats4 = new IntStatistics();
+    stats4.setMinMax(0, 0);
+    stats4.setNumNulls(304);
+    BlockMetaData b4 = makeBlockFromStats(stats4, 304);
+    blocks.add(b4);
+
+
+    IntStatistics stats5 = new IntStatistics();
+    stats5.setMinMax(50, 50);
+    stats5.setNumNulls(7);
+    BlockMetaData b5 = makeBlockFromStats(stats5, 305);
+    blocks.add(b5);
+
+    IntStatistics stats6 = new IntStatistics();
+    stats6.setMinMax(0, 0);
+    stats6.setNumNulls(12);
+    BlockMetaData b6 = makeBlockFromStats(stats6, 306);
+    blocks.add(b6);
+
+    MessageType schema = MessageTypeParser.parseMessageType("message Document { optional int32 foo; }");
+    IntColumn foo = intColumn("foo");
+
+    List<BlockMetaData> filtered = RowGroupFilter.filterRowGroups(FilterCompat.get(eq(foo, 50)), blocks, schema);
+    assertEquals(Arrays.asList(b1, b2, b5), filtered);
+
+    filtered = RowGroupFilter.filterRowGroups(FilterCompat.get(notEq(foo, 50)), blocks, schema);
+    assertEquals(Arrays.asList(b1, b2, b3, b4, b5, b6), filtered);
+
+    filtered = RowGroupFilter.filterRowGroups(FilterCompat.get(eq(foo, null)), blocks, schema);
+    assertEquals(Arrays.asList(b1, b3, b4, b5, b6), filtered);
+
+    filtered = RowGroupFilter.filterRowGroups(FilterCompat.get(notEq(foo, null)), blocks, schema);
+    assertEquals(Arrays.asList(b1, b2, b3, b5, b6), filtered);
+
+    filtered = RowGroupFilter.filterRowGroups(FilterCompat.get(eq(foo, 0)), blocks, schema);
+    assertEquals(Arrays.asList(b6), filtered);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/test/java/parquet/filter2/recordlevel/PhoneBookWriter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/java/parquet/filter2/recordlevel/PhoneBookWriter.java b/parquet-hadoop/src/test/java/parquet/filter2/recordlevel/PhoneBookWriter.java
new file mode 100644
index 0000000..99f0a4e
--- /dev/null
+++ b/parquet-hadoop/src/test/java/parquet/filter2/recordlevel/PhoneBookWriter.java
@@ -0,0 +1,251 @@
+package parquet.filter2.recordlevel;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+
+import parquet.example.data.Group;
+import parquet.example.data.simple.SimpleGroup;
+import parquet.filter2.compat.FilterCompat.Filter;
+import parquet.hadoop.ParquetReader;
+import parquet.hadoop.ParquetWriter;
+import parquet.hadoop.example.GroupReadSupport;
+import parquet.hadoop.example.GroupWriteSupport;
+import parquet.schema.MessageType;
+import parquet.schema.MessageTypeParser;
+
+public class PhoneBookWriter {
+  private static final String schemaString =
+      "message user {\n"
+          + "  required int64 id;\n"
+          + "  optional binary name (UTF8);\n"
+          + "  optional group location {\n"
+          + "    optional double lon;\n"
+          + "    optional double lat;\n"
+          + "  }\n"
+          + "  optional group phoneNumbers {\n"
+          + "    repeated group phone {\n"
+          + "      required int64 number;\n"
+          + "      optional binary kind (UTF8);\n"
+          + "    }\n"
+          + "  }\n"
+          + "}\n";
+
+  private static final MessageType schema = MessageTypeParser.parseMessageType(schemaString);
+
+  public static class Location {
+    private final Double lon;
+    private final Double lat;
+
+    public Location(Double lon, Double lat) {
+      this.lon = lon;
+      this.lat = lat;
+    }
+
+    public Double getLon() {
+      return lon;
+    }
+
+    public Double getLat() {
+      return lat;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      Location location = (Location) o;
+
+      if (lat != null ? !lat.equals(location.lat) : location.lat != null) return false;
+      if (lon != null ? !lon.equals(location.lon) : location.lon != null) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = lon != null ? lon.hashCode() : 0;
+      result = 31 * result + (lat != null ? lat.hashCode() : 0);
+      return result;
+    }
+  }
+
+  public static class PhoneNumber {
+    private final long number;
+    private final String kind;
+
+    public PhoneNumber(long number, String kind) {
+      this.number = number;
+      this.kind = kind;
+    }
+
+    public long getNumber() {
+      return number;
+    }
+
+    public String getKind() {
+      return kind;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      PhoneNumber that = (PhoneNumber) o;
+
+      if (number != that.number) return false;
+      if (kind != null ? !kind.equals(that.kind) : that.kind != null) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = (int) (number ^ (number >>> 32));
+      result = 31 * result + (kind != null ? kind.hashCode() : 0);
+      return result;
+    }
+  }
+
+  public static class User {
+    private final long id;
+    private final String name;
+    private final List<PhoneNumber> phoneNumbers;
+    private final Location location;
+
+    public User(long id, String name, List<PhoneNumber> phoneNumbers, Location location) {
+      this.id = id;
+      this.name = name;
+      this.phoneNumbers = phoneNumbers;
+      this.location = location;
+    }
+
+    public long getId() {
+      return id;
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public List<PhoneNumber> getPhoneNumbers() {
+      return phoneNumbers;
+    }
+
+    public Location getLocation() {
+      return location;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      User user = (User) o;
+
+      if (id != user.id) return false;
+      if (location != null ? !location.equals(user.location) : user.location != null) return false;
+      if (name != null ? !name.equals(user.name) : user.name != null) return false;
+      if (phoneNumbers != null ? !phoneNumbers.equals(user.phoneNumbers) : user.phoneNumbers != null) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = (int) (id ^ (id >>> 32));
+      result = 31 * result + (name != null ? name.hashCode() : 0);
+      result = 31 * result + (phoneNumbers != null ? phoneNumbers.hashCode() : 0);
+      result = 31 * result + (location != null ? location.hashCode() : 0);
+      return result;
+    }
+  }
+
+  public static SimpleGroup groupFromUser(User user) {
+    SimpleGroup root = new SimpleGroup(schema);
+    root.append("id", user.getId());
+
+    if (user.getName() != null) {
+      root.append("name", user.getName());
+    }
+
+    if (user.getPhoneNumbers() != null) {
+      Group phoneNumbers = root.addGroup("phoneNumbers");
+      for (PhoneNumber number : user.getPhoneNumbers()) {
+        Group phone = phoneNumbers.addGroup("phone");
+        phone.append("number", number.getNumber());
+        if (number.getKind() != null) {
+          phone.append("kind", number.getKind());
+        }
+      }
+    }
+
+    if (user.getLocation() != null) {
+      Group location = root.addGroup("location");
+      if (user.getLocation().getLon() != null) {
+        location.append("lon", user.getLocation().getLon());
+      }
+      if (user.getLocation().getLat() != null) {
+        location.append("lat", user.getLocation().getLat());
+      }
+    }
+    return root;
+  }
+
+  public static File writeToFile(List<User> users) throws IOException {
+    File f = File.createTempFile("phonebook", ".parquet");
+    f.deleteOnExit();
+    if (!f.delete()) {
+      throw new IOException("couldn't delete tmp file" + f);
+    }
+
+    writeToFile(f, users);
+
+    return f;
+  }
+
+  public static void writeToFile(File f, List<User> users) throws IOException {
+    Configuration conf = new Configuration();
+    GroupWriteSupport.setSchema(schema, conf);
+
+    ParquetWriter<Group> writer = new ParquetWriter<Group>(new Path(f.getAbsolutePath()), conf, new GroupWriteSupport());
+    for (User u : users) {
+      writer.write(groupFromUser(u));
+    }
+    writer.close();
+  }
+
+  public static List<Group> readFile(File f, Filter filter) throws IOException {
+    Configuration conf = new Configuration();
+    GroupWriteSupport.setSchema(schema, conf);
+
+    ParquetReader<Group> reader =
+        ParquetReader.builder(new GroupReadSupport(), new Path(f.getAbsolutePath()))
+                     .withConf(conf)
+                     .withFilter(filter)
+                     .build();
+
+    Group current;
+    List<Group> users = new ArrayList<Group>();
+
+    current = reader.read();
+    while (current != null) {
+      users.add(current);
+      current = reader.read();
+    }
+
+    return users;
+  }
+
+  public static void main(String[] args) throws IOException {
+    File f = new File(args[0]);
+    writeToFile(f, TestRecordLevelFilters.makeUsers());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/test/java/parquet/filter2/recordlevel/TestRecordLevelFilters.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/java/parquet/filter2/recordlevel/TestRecordLevelFilters.java b/parquet-hadoop/src/test/java/parquet/filter2/recordlevel/TestRecordLevelFilters.java
new file mode 100644
index 0000000..d771ead
--- /dev/null
+++ b/parquet-hadoop/src/test/java/parquet/filter2/recordlevel/TestRecordLevelFilters.java
@@ -0,0 +1,205 @@
+package parquet.filter2.recordlevel;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import parquet.example.data.Group;
+import parquet.filter2.compat.FilterCompat;
+import parquet.filter2.predicate.FilterPredicate;
+import parquet.filter2.predicate.Operators.BinaryColumn;
+import parquet.filter2.predicate.Operators.DoubleColumn;
+import parquet.filter2.predicate.Statistics;
+import parquet.filter2.predicate.UserDefinedPredicate;
+import parquet.filter2.recordlevel.PhoneBookWriter.Location;
+import parquet.filter2.recordlevel.PhoneBookWriter.PhoneNumber;
+import parquet.filter2.recordlevel.PhoneBookWriter.User;
+import parquet.io.api.Binary;
+
+import static org.junit.Assert.assertEquals;
+import static parquet.filter2.predicate.FilterApi.and;
+import static parquet.filter2.predicate.FilterApi.binaryColumn;
+import static parquet.filter2.predicate.FilterApi.doubleColumn;
+import static parquet.filter2.predicate.FilterApi.eq;
+import static parquet.filter2.predicate.FilterApi.gt;
+import static parquet.filter2.predicate.FilterApi.not;
+import static parquet.filter2.predicate.FilterApi.notEq;
+import static parquet.filter2.predicate.FilterApi.or;
+import static parquet.filter2.predicate.FilterApi.userDefined;
+
+public class TestRecordLevelFilters {
+
+  public static List<User> makeUsers() {
+    List<User> users = new ArrayList<User>();
+
+    users.add(new User(17, null, null, null));
+
+    users.add(new User(18, "bob", null, null));
+
+    users.add(new User(19, "alice", new ArrayList<PhoneNumber>(), null));
+
+    users.add(new User(20, "thing1", Arrays.asList(new PhoneNumber(5555555555L, null)), null));
+
+    users.add(new User(27, "thing2", Arrays.asList(new PhoneNumber(1111111111L, "home")), null));
+
+    users.add(new User(28, "popular", Arrays.asList(
+        new PhoneNumber(1111111111L, "home"),
+        new PhoneNumber(2222222222L, null),
+        new PhoneNumber(3333333333L, "mobile")
+    ), null));
+
+    users.add(new User(30, null, Arrays.asList(new PhoneNumber(1111111111L, "home")), null));
+
+    for (int i = 100; i < 200; i++) {
+      Location location = null;
+      if (i % 3 == 1) {
+        location = new Location((double)i, (double)i*2);
+      }
+      if (i % 3 == 2) {
+        location = new Location((double)i, null);
+      }
+      users.add(new User(i, "p" + i, Arrays.asList(new PhoneNumber(i, "cell")), location));
+    }
+
+    return users;
+  }
+
+  private static File phonebookFile;
+  private static List<User> users;
+
+  @BeforeClass
+  public static void setup() throws IOException{
+    users = makeUsers();
+    phonebookFile = PhoneBookWriter.writeToFile(users);
+  }
+
+  private static interface UserFilter {
+    boolean keep(User u);
+  }
+
+  private static List<Group> getExpected(UserFilter f) {
+    List<Group> expected = new ArrayList<Group>();
+    for (User u : users) {
+      if (f.keep(u)) {
+        expected.add(PhoneBookWriter.groupFromUser(u));
+      }
+    }
+    return expected;
+  }
+
+  private static void assertFilter(List<Group> found, UserFilter f) {
+    List<Group> expected = getExpected(f);
+    assertEquals(expected.size(), found.size());
+    Iterator<Group> expectedIter = expected.iterator();
+    Iterator<Group> foundIter = found.iterator();
+    while(expectedIter.hasNext()) {
+      assertEquals(expectedIter.next().toString(), foundIter.next().toString());
+    }
+  }
+
+  @Test
+  public void testNoFilter() throws Exception {
+    List<Group> found = PhoneBookWriter.readFile(phonebookFile, FilterCompat.NOOP);
+    assertFilter(found, new UserFilter() {
+      @Override
+      public boolean keep(User u) {
+        return true;
+      }
+    });
+  }
+
+  @Test
+  public void testAllFilter() throws Exception {
+    BinaryColumn name = binaryColumn("name");
+
+    FilterPredicate pred = eq(name, Binary.fromString("no matches"));
+
+    List<Group> found = PhoneBookWriter.readFile(phonebookFile, FilterCompat.get(pred));
+    assertEquals(new ArrayList<Group>(), found);
+  }
+
+  @Test
+  public void testNameNotNull() throws Exception {
+    BinaryColumn name = binaryColumn("name");
+
+    FilterPredicate pred = notEq(name, null);
+
+    List<Group> found = PhoneBookWriter.readFile(phonebookFile, FilterCompat.get(pred));
+
+    assertFilter(found, new UserFilter() {
+      @Override
+      public boolean keep(User u) {
+        return u.getName() != null;
+      }
+    });
+  }
+
+  public static class StartWithP extends UserDefinedPredicate<Binary> {
+
+    @Override
+    public boolean keep(Binary value) {
+      if (value == null) {
+        return false;
+      }
+      return value.toStringUsingUTF8().startsWith("p");
+    }
+
+    @Override
+    public boolean canDrop(Statistics<Binary> statistics) {
+      return false;
+    }
+
+    @Override
+    public boolean inverseCanDrop(Statistics<Binary> statistics) {
+      return false;
+    }
+  }
+
+  @Test
+  public void testNameNotStartWithP() throws Exception {
+    BinaryColumn name = binaryColumn("name");
+
+    FilterPredicate pred = not(userDefined(name, StartWithP.class));
+
+    List<Group> found = PhoneBookWriter.readFile(phonebookFile, FilterCompat.get(pred));
+
+    assertFilter(found, new UserFilter() {
+      @Override
+      public boolean keep(User u) {
+        return u.getName() == null || !u.getName().startsWith("p");
+      }
+    });
+  }
+
+  @Test
+  public void testComplex() throws Exception {
+    BinaryColumn name = binaryColumn("name");
+    DoubleColumn lon = doubleColumn("location.lon");
+    DoubleColumn lat = doubleColumn("location.lat");
+
+    FilterPredicate pred = or(and(gt(lon, 150.0), notEq(lat, null)), eq(name, Binary.fromString("alice")));
+
+    List<Group> found = PhoneBookWriter.readFile(phonebookFile, FilterCompat.get(pred));
+
+    assertFilter(found, new UserFilter() {
+      @Override
+      public boolean keep(User u) {
+        String name = u.getName();
+        Double lat = null;
+        Double lon = null;
+        if (u.getLocation() != null) {
+          lat = u.getLocation().getLat();
+          lon = u.getLocation().getLon();
+        }
+
+        return (lon != null && lon > 150.0 && lat != null) || "alice".equals(name);
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/test/java/parquet/filter2/statisticslevel/TestStatisticsFilter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/java/parquet/filter2/statisticslevel/TestStatisticsFilter.java b/parquet-hadoop/src/test/java/parquet/filter2/statisticslevel/TestStatisticsFilter.java
new file mode 100644
index 0000000..4e75b20
--- /dev/null
+++ b/parquet-hadoop/src/test/java/parquet/filter2/statisticslevel/TestStatisticsFilter.java
@@ -0,0 +1,307 @@
+package parquet.filter2.statisticslevel;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+
+import org.junit.Test;
+
+import parquet.column.Encoding;
+import parquet.column.statistics.DoubleStatistics;
+import parquet.column.statistics.IntStatistics;
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.predicate.FilterPredicate;
+import parquet.filter2.predicate.LogicalInverseRewriter;
+import parquet.filter2.predicate.Operators.DoubleColumn;
+import parquet.filter2.predicate.Operators.IntColumn;
+import parquet.filter2.predicate.Statistics;
+import parquet.filter2.predicate.UserDefinedPredicate;
+import parquet.hadoop.metadata.ColumnChunkMetaData;
+import parquet.hadoop.metadata.CompressionCodecName;
+import parquet.schema.PrimitiveType.PrimitiveTypeName;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static parquet.filter2.predicate.FilterApi.and;
+import static parquet.filter2.predicate.FilterApi.doubleColumn;
+import static parquet.filter2.predicate.FilterApi.eq;
+import static parquet.filter2.predicate.FilterApi.gt;
+import static parquet.filter2.predicate.FilterApi.gtEq;
+import static parquet.filter2.predicate.FilterApi.intColumn;
+import static parquet.filter2.predicate.FilterApi.lt;
+import static parquet.filter2.predicate.FilterApi.ltEq;
+import static parquet.filter2.predicate.FilterApi.not;
+import static parquet.filter2.predicate.FilterApi.notEq;
+import static parquet.filter2.predicate.FilterApi.or;
+import static parquet.filter2.predicate.FilterApi.userDefined;
+import static parquet.filter2.statisticslevel.StatisticsFilter.canDrop;
+
+public class TestStatisticsFilter {
+
+  private static ColumnChunkMetaData getIntColumnMeta(IntStatistics stats, long valueCount) {
+    return ColumnChunkMetaData.get(ColumnPath.get("int", "column"),
+        PrimitiveTypeName.INT32,
+        CompressionCodecName.GZIP,
+        new HashSet<Encoding>(Arrays.asList(Encoding.PLAIN)),
+        stats,
+        0L, 0L, valueCount, 0L, 0L);
+  }
+
+  private static ColumnChunkMetaData getDoubleColumnMeta(DoubleStatistics stats, long valueCount) {
+    return ColumnChunkMetaData.get(ColumnPath.get("double", "column"),
+        PrimitiveTypeName.DOUBLE,
+        CompressionCodecName.GZIP,
+        new HashSet<Encoding>(Arrays.asList(Encoding.PLAIN)),
+        stats,
+        0L, 0L, valueCount, 0L, 0L);
+  }
+
+  private static final IntColumn intColumn = intColumn("int.column");
+  private static final DoubleColumn doubleColumn = doubleColumn("double.column");
+
+  private static final IntStatistics intStats = new IntStatistics();
+  private static final IntStatistics nullIntStats = new IntStatistics();
+  private static final DoubleStatistics doubleStats = new DoubleStatistics();
+
+  static {
+    intStats.setMinMax(10, 100);
+    doubleStats.setMinMax(10, 100);
+
+    nullIntStats.setMinMax(0, 0);
+    nullIntStats.setNumNulls(177);
+  }
+
+  private static final List<ColumnChunkMetaData> columnMetas = Arrays.asList(
+      getIntColumnMeta(intStats, 177L),
+      getDoubleColumnMeta(doubleStats, 177L));
+
+  private static final List<ColumnChunkMetaData> nullColumnMetas = Arrays.asList(
+      getIntColumnMeta(nullIntStats, 177L), // column of all nulls
+      getDoubleColumnMeta(doubleStats, 177L));
+
+
+  @Test
+  public void testEqNonNull() {
+    assertTrue(canDrop(eq(intColumn, 9), columnMetas));
+    assertFalse(canDrop(eq(intColumn, 10), columnMetas));
+    assertFalse(canDrop(eq(intColumn, 100), columnMetas));
+    assertTrue(canDrop(eq(intColumn, 101), columnMetas));
+
+    // drop columns of all nulls when looking for non-null value
+    assertTrue(canDrop(eq(intColumn, 0), nullColumnMetas));
+  }
+
+  @Test
+  public void testEqNull() {
+    IntStatistics statsNoNulls = new IntStatistics();
+    statsNoNulls.setMinMax(10, 100);
+    statsNoNulls.setNumNulls(0);
+
+    IntStatistics statsSomeNulls = new IntStatistics();
+    statsSomeNulls.setMinMax(10, 100);
+    statsSomeNulls.setNumNulls(3);
+
+    assertTrue(canDrop(eq(intColumn, null), Arrays.asList(
+        getIntColumnMeta(statsNoNulls, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+    assertFalse(canDrop(eq(intColumn, null), Arrays.asList(
+        getIntColumnMeta(statsSomeNulls, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+  }
+
+  @Test
+  public void testNotEqNonNull() {
+    assertFalse(canDrop(notEq(intColumn, 9), columnMetas));
+    assertFalse(canDrop(notEq(intColumn, 10), columnMetas));
+    assertFalse(canDrop(notEq(intColumn, 100), columnMetas));
+    assertFalse(canDrop(notEq(intColumn, 101), columnMetas));
+
+    IntStatistics allSevens = new IntStatistics();
+    allSevens.setMinMax(7, 7);
+    assertTrue(canDrop(notEq(intColumn, 7), Arrays.asList(
+        getIntColumnMeta(allSevens, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+  }
+
+  @Test
+  public void testNotEqNull() {
+    IntStatistics statsNoNulls = new IntStatistics();
+    statsNoNulls.setMinMax(10, 100);
+    statsNoNulls.setNumNulls(0);
+
+    IntStatistics statsSomeNulls = new IntStatistics();
+    statsSomeNulls.setMinMax(10, 100);
+    statsSomeNulls.setNumNulls(3);
+
+    IntStatistics statsAllNulls = new IntStatistics();
+    statsAllNulls.setMinMax(0, 0);
+    statsAllNulls.setNumNulls(177);
+
+    assertFalse(canDrop(notEq(intColumn, null), Arrays.asList(
+        getIntColumnMeta(statsNoNulls, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+    assertFalse(canDrop(notEq(intColumn, null), Arrays.asList(
+        getIntColumnMeta(statsSomeNulls, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+    assertTrue(canDrop(notEq(intColumn, null), Arrays.asList(
+        getIntColumnMeta(statsAllNulls, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+  }
+
+  @Test
+  public void testLt() {
+    assertTrue(canDrop(lt(intColumn, 9), columnMetas));
+    assertTrue(canDrop(lt(intColumn, 10), columnMetas));
+    assertFalse(canDrop(lt(intColumn, 100), columnMetas));
+    assertFalse(canDrop(lt(intColumn, 101), columnMetas));
+
+    assertTrue(canDrop(lt(intColumn, 0), nullColumnMetas));
+    assertTrue(canDrop(lt(intColumn, 7), nullColumnMetas));
+  }
+
+  @Test
+  public void testLtEq() {
+    assertTrue(canDrop(ltEq(intColumn, 9), columnMetas));
+    assertFalse(canDrop(ltEq(intColumn, 10), columnMetas));
+    assertFalse(canDrop(ltEq(intColumn, 100), columnMetas));
+    assertFalse(canDrop(ltEq(intColumn, 101), columnMetas));
+
+    assertTrue(canDrop(ltEq(intColumn, 0), nullColumnMetas));
+    assertTrue(canDrop(ltEq(intColumn, 7), nullColumnMetas));
+  }
+
+  @Test
+  public void testGt() {
+    assertFalse(canDrop(gt(intColumn, 9), columnMetas));
+    assertFalse(canDrop(gt(intColumn, 10), columnMetas));
+    assertTrue(canDrop(gt(intColumn, 100), columnMetas));
+    assertTrue(canDrop(gt(intColumn, 101), columnMetas));
+
+    assertTrue(canDrop(gt(intColumn, 0), nullColumnMetas));
+    assertTrue(canDrop(gt(intColumn, 7), nullColumnMetas));
+  }
+
+  @Test
+  public void testGtEq() {
+    assertFalse(canDrop(gtEq(intColumn, 9), columnMetas));
+    assertFalse(canDrop(gtEq(intColumn, 10), columnMetas));
+    assertFalse(canDrop(gtEq(intColumn, 100), columnMetas));
+    assertTrue(canDrop(gtEq(intColumn, 101), columnMetas));
+
+    assertTrue(canDrop(gtEq(intColumn, 0), nullColumnMetas));
+    assertTrue(canDrop(gtEq(intColumn, 7), nullColumnMetas));
+  }
+
+  @Test
+  public void testAnd() {
+    FilterPredicate yes = eq(intColumn, 9);
+    FilterPredicate no = eq(doubleColumn, 50D);
+    assertTrue(canDrop(and(yes, yes), columnMetas));
+    assertFalse(canDrop(and(yes, no), columnMetas));
+    assertFalse(canDrop(and(no, yes), columnMetas));
+    assertFalse(canDrop(and(no, no), columnMetas));
+  }
+
+  @Test
+  public void testOr() {
+    FilterPredicate yes = eq(intColumn, 9);
+    FilterPredicate no = eq(doubleColumn, 50D);
+    assertTrue(canDrop(or(yes, yes), columnMetas));
+    assertFalse(canDrop(or(yes, no), columnMetas));
+    assertFalse(canDrop(or(no, yes), columnMetas));
+    assertFalse(canDrop(or(no, no), columnMetas));
+  }
+
+  public static class SevensAndEightsUdp extends UserDefinedPredicate<Integer> {
+
+    @Override
+    public boolean keep(Integer value) {
+      throw new RuntimeException("this method should not be called");
+    }
+
+    @Override
+    public boolean canDrop(Statistics<Integer> statistics) {
+      return statistics.getMin() == 7 && statistics.getMax() == 7;
+    }
+
+    @Override
+    public boolean inverseCanDrop(Statistics<Integer> statistics) {
+      return statistics.getMin() == 8 && statistics.getMax() == 8;
+    }
+  }
+
+  @Test
+  public void testUdp() {
+    FilterPredicate pred = userDefined(intColumn, SevensAndEightsUdp.class);
+    FilterPredicate invPred = LogicalInverseRewriter.rewrite(not(userDefined(intColumn, SevensAndEightsUdp.class)));
+
+    IntStatistics seven = new IntStatistics();
+    seven.setMinMax(7, 7);
+
+    IntStatistics eight = new IntStatistics();
+    eight.setMinMax(8, 8);
+
+    IntStatistics neither = new IntStatistics();
+    neither.setMinMax(1 , 2);
+
+    assertTrue(canDrop(pred, Arrays.asList(
+        getIntColumnMeta(seven, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+    assertFalse(canDrop(pred, Arrays.asList(
+        getIntColumnMeta(eight, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+    assertFalse(canDrop(pred, Arrays.asList(
+        getIntColumnMeta(neither, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+    assertFalse(canDrop(invPred, Arrays.asList(
+        getIntColumnMeta(seven, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+    assertTrue(canDrop(invPred, Arrays.asList(
+        getIntColumnMeta(eight, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+
+    assertFalse(canDrop(invPred, Arrays.asList(
+        getIntColumnMeta(neither, 177L),
+        getDoubleColumnMeta(doubleStats, 177L))));
+  }
+
+  @Test
+  public void testClearExceptionForNots() {
+    List<ColumnChunkMetaData> columnMetas = Arrays.asList(
+        getIntColumnMeta(new IntStatistics(), 0L),
+        getDoubleColumnMeta(new DoubleStatistics(), 0L));
+
+    FilterPredicate pred = and(eq(intColumn, 17), not(eq(doubleColumn, 12.0)));
+
+    try {
+      canDrop(pred, columnMetas);
+      fail("This should throw");
+    } catch (IllegalArgumentException e) {
+      assertEquals("This predicate contains a not! Did you forget to run this predicate through LogicalInverseRewriter?"
+          + " not(eq(double.column, 12.0))", e.getMessage());
+    }
+  }
+
+  @Test
+  public void testMissingColumn() {
+    List<ColumnChunkMetaData> columnMetas = Arrays.asList(getIntColumnMeta(new IntStatistics(), 0L));
+    try {
+      canDrop(and(eq(intColumn, 17), eq(doubleColumn, 12.0)), columnMetas);
+      fail("This should throw");
+    } catch (IllegalArgumentException e) {
+      assertEquals("Column double.column not found in schema!", e.getMessage());
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/test/java/parquet/hadoop/TestInputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/java/parquet/hadoop/TestInputFormat.java b/parquet-hadoop/src/test/java/parquet/hadoop/TestInputFormat.java
index 1ab1cc5..1823716 100644
--- a/parquet-hadoop/src/test/java/parquet/hadoop/TestInputFormat.java
+++ b/parquet-hadoop/src/test/java/parquet/hadoop/TestInputFormat.java
@@ -15,18 +15,37 @@
  */
 package parquet.hadoop;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Job;
 import org.junit.Before;
 import org.junit.Test;
+
+import parquet.column.ColumnReader;
 import parquet.column.Encoding;
 import parquet.column.statistics.BinaryStatistics;
+import parquet.column.statistics.IntStatistics;
+import parquet.common.schema.ColumnPath;
+import parquet.filter.RecordFilter;
+import parquet.filter.UnboundRecordFilter;
+import parquet.filter2.compat.FilterCompat;
+import parquet.filter2.compat.FilterCompat.Filter;
+import parquet.filter2.compat.FilterCompat.FilterPredicateCompat;
+import parquet.filter2.predicate.FilterPredicate;
+import parquet.filter2.predicate.Operators.IntColumn;
 import parquet.hadoop.metadata.BlockMetaData;
 import parquet.hadoop.metadata.ColumnChunkMetaData;
-import parquet.hadoop.metadata.ColumnPath;
 import parquet.hadoop.metadata.CompressionCodecName;
 import parquet.hadoop.metadata.FileMetaData;
 import parquet.hadoop.metadata.ParquetMetadata;
@@ -35,16 +54,17 @@ import parquet.schema.MessageType;
 import parquet.schema.MessageTypeParser;
 import parquet.schema.PrimitiveType.PrimitiveTypeName;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Arrays;
-
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
+import static parquet.filter2.predicate.FilterApi.and;
+import static parquet.filter2.predicate.FilterApi.eq;
+import static parquet.filter2.predicate.FilterApi.intColumn;
+import static parquet.filter2.predicate.FilterApi.not;
+import static parquet.filter2.predicate.FilterApi.notEq;
+import static parquet.filter2.predicate.FilterApi.or;
 
 public class TestInputFormat {
 
@@ -93,6 +113,25 @@ public class TestInputFormat {
     }
   }
 
+  @Test
+  public void testGetFilter() throws IOException {
+    IntColumn intColumn = intColumn("foo");
+    FilterPredicate p = or(eq(intColumn, 7), eq(intColumn, 12));
+    Configuration conf = new Configuration();
+    ParquetInputFormat.setFilterPredicate(conf, p);
+    Filter read = ParquetInputFormat.getFilter(conf);
+    assertTrue(read instanceof FilterPredicateCompat);
+    assertEquals(p, ((FilterPredicateCompat) read).getFilterPredicate());
+
+    conf = new Configuration();
+    ParquetInputFormat.setFilterPredicate(conf, not(p));
+    read = ParquetInputFormat.getFilter(conf);
+    assertTrue(read instanceof FilterPredicateCompat);
+    assertEquals(and(notEq(intColumn, 7), notEq(intColumn, 12)), ((FilterPredicateCompat) read).getFilterPredicate());
+
+    assertEquals(FilterCompat.NOOP, ParquetInputFormat.getFilter(new Configuration()));
+  }
+
   /*
     aaaaa bbbbb
    */
@@ -246,6 +285,57 @@ public class TestInputFormat {
     shouldSplitLengthBe(splits, 20, 20, 10, 20, 20, 10);
   }
 
+  public static final class DummyUnboundRecordFilter implements UnboundRecordFilter {
+    @Override
+    public RecordFilter bind(Iterable<ColumnReader> readers) {
+      return null;
+    }
+  }
+
+  @Test
+  public void testOnlyOneKindOfFilterSupported() throws Exception {
+    IntColumn foo = intColumn("foo");
+    FilterPredicate p = or(eq(foo, 10), eq(foo, 11));
+
+    Job job = new Job();
+
+    Configuration conf = job.getConfiguration();
+    ParquetInputFormat.setUnboundRecordFilter(job, DummyUnboundRecordFilter.class);
+    try {
+      ParquetInputFormat.setFilterPredicate(conf, p);
+      fail("this should throw");
+    } catch (IllegalArgumentException e) {
+      assertEquals("You cannot provide a FilterPredicate after providing an UnboundRecordFilter", e.getMessage());
+    }
+
+    job = new Job();
+    conf = job.getConfiguration();
+
+    ParquetInputFormat.setFilterPredicate(conf, p);
+    try {
+      ParquetInputFormat.setUnboundRecordFilter(job, DummyUnboundRecordFilter.class);
+      fail("this should throw");
+    } catch (IllegalArgumentException e) {
+      assertEquals("You cannot provide an UnboundRecordFilter after providing a FilterPredicate", e.getMessage());
+    }
+
+  }
+
+  public static BlockMetaData makeBlockFromStats(IntStatistics stats, long valueCount) {
+    BlockMetaData blockMetaData = new BlockMetaData();
+
+    ColumnChunkMetaData column = ColumnChunkMetaData.get(ColumnPath.get("foo"),
+        PrimitiveTypeName.INT32,
+        CompressionCodecName.GZIP,
+        new HashSet<Encoding>(Arrays.asList(Encoding.PLAIN)),
+        stats,
+        100l, 100l, valueCount, 100l, 100l);
+    blockMetaData.addColumn(column);
+    blockMetaData.setTotalByteSize(200l);
+    blockMetaData.setRowCount(valueCount);
+    return blockMetaData;
+  }
+
   @Test
   public void testFooterCacheValueIsCurrent() throws IOException, InterruptedException {
     File tempFile = getTempFile();

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/test/java/parquet/hadoop/metadata/TestColumnChunkMetaData.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/java/parquet/hadoop/metadata/TestColumnChunkMetaData.java b/parquet-hadoop/src/test/java/parquet/hadoop/metadata/TestColumnChunkMetaData.java
index a449818..9ecb238 100644
--- a/parquet-hadoop/src/test/java/parquet/hadoop/metadata/TestColumnChunkMetaData.java
+++ b/parquet-hadoop/src/test/java/parquet/hadoop/metadata/TestColumnChunkMetaData.java
@@ -1,9 +1,5 @@
 package parquet.hadoop.metadata;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY;
-
 import java.util.HashSet;
 import java.util.Set;
 
@@ -11,8 +7,13 @@ import org.junit.Test;
 
 import parquet.column.Encoding;
 import parquet.column.statistics.BinaryStatistics;
+import parquet.common.schema.ColumnPath;
 import parquet.schema.PrimitiveType.PrimitiveTypeName;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY;
+
 public class TestColumnChunkMetaData {
 
 

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/test/java/parquet/hadoop/util/TestSerializationUtil.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/test/java/parquet/hadoop/util/TestSerializationUtil.java b/parquet-hadoop/src/test/java/parquet/hadoop/util/TestSerializationUtil.java
new file mode 100644
index 0000000..9b305db
--- /dev/null
+++ b/parquet-hadoop/src/test/java/parquet/hadoop/util/TestSerializationUtil.java
@@ -0,0 +1,53 @@
+package parquet.hadoop.util;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+/**
+ * Serialization utils copied from:
+ * https://github.com/kevinweil/elephant-bird/blob/master/core/src/test/java/com/twitter/elephantbird/util/TestHadoopUtils.java
+ *
+ * TODO: Refactor elephant-bird so that we can depend on utils like this without extra baggage.
+ */
+public class TestSerializationUtil {
+
+  @Test
+  public void testReadWriteObjectToConfAsBase64() throws Exception {
+    Map<Integer, String> anObject = new HashMap<Integer, String>();
+    anObject.put(7, "seven");
+    anObject.put(8, "eight");
+
+    Configuration conf = new Configuration();
+
+    SerializationUtil.writeObjectToConfAsBase64("anobject", anObject, conf);
+    Map<Integer, String> copy = SerializationUtil.readObjectFromConfAsBase64("anobject", conf);
+    assertEquals(anObject, copy);
+
+    try {
+      Set<String> bad = SerializationUtil.readObjectFromConfAsBase64("anobject", conf);
+      fail("This should throw a ClassCastException");
+    } catch (ClassCastException e) {
+
+    }
+
+    conf = new Configuration();
+    Object nullObj = null;
+
+    SerializationUtil.writeObjectToConfAsBase64("anobject", null, conf);
+    Object copyObj = SerializationUtil.readObjectFromConfAsBase64("anobject", conf);
+    assertEquals(nullObj, copyObj);
+  }
+
+  @Test
+  public void readObjectFromConfAsBase64UnsetKey() throws Exception {
+    assertNull(SerializationUtil.readObjectFromConfAsBase64("non-existant-key", new Configuration()));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hive-bundle/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hive-bundle/pom.xml b/parquet-hive-bundle/pom.xml
index 9b55437..6b6fda2 100644
--- a/parquet-hive-bundle/pom.xml
+++ b/parquet-hive-bundle/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/pom.xml b/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/pom.xml
index 652661e..4b03f95 100644
--- a/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/pom.xml
+++ b/parquet-hive/parquet-hive-binding/parquet-hive-0.10-binding/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet-hive-binding</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/pom.xml b/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/pom.xml
index 54abca7..b5ada25 100644
--- a/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/pom.xml
+++ b/parquet-hive/parquet-hive-binding/parquet-hive-0.12-binding/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet-hive-binding</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hive/parquet-hive-binding/parquet-hive-binding-bundle/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-binding/parquet-hive-binding-bundle/pom.xml b/parquet-hive/parquet-hive-binding/parquet-hive-binding-bundle/pom.xml
index 00966c9..420beb6 100644
--- a/parquet-hive/parquet-hive-binding/parquet-hive-binding-bundle/pom.xml
+++ b/parquet-hive/parquet-hive-binding/parquet-hive-binding-bundle/pom.xml
@@ -17,7 +17,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet-hive-binding</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/pom.xml b/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/pom.xml
index 2c30d07..2749bbc 100644
--- a/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/pom.xml
+++ b/parquet-hive/parquet-hive-binding/parquet-hive-binding-factory/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet-hive-binding</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hive/parquet-hive-binding/parquet-hive-binding-interface/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-binding/parquet-hive-binding-interface/pom.xml b/parquet-hive/parquet-hive-binding/parquet-hive-binding-interface/pom.xml
index d326f84..e417d13 100644
--- a/parquet-hive/parquet-hive-binding/parquet-hive-binding-interface/pom.xml
+++ b/parquet-hive/parquet-hive-binding/parquet-hive-binding-interface/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet-hive-binding</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hive/parquet-hive-binding/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-binding/pom.xml b/parquet-hive/parquet-hive-binding/pom.xml
index aa89505..a11ff69 100644
--- a/parquet-hive/parquet-hive-binding/pom.xml
+++ b/parquet-hive/parquet-hive-binding/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet-hive</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hive/parquet-hive-storage-handler/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hive/parquet-hive-storage-handler/pom.xml b/parquet-hive/parquet-hive-storage-handler/pom.xml
index dfafd4a..01dbc4c 100644
--- a/parquet-hive/parquet-hive-storage-handler/pom.xml
+++ b/parquet-hive/parquet-hive-storage-handler/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet-hive</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hive/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hive/pom.xml b/parquet-hive/pom.xml
index d7866fb..cfb3480 100644
--- a/parquet-hive/pom.xml
+++ b/parquet-hive/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-jackson/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-jackson/pom.xml b/parquet-jackson/pom.xml
index 265d48e..acffda6 100644
--- a/parquet-jackson/pom.xml
+++ b/parquet-jackson/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-pig-bundle/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-pig-bundle/pom.xml b/parquet-pig-bundle/pom.xml
index 82231d5..ab5c11d 100644
--- a/parquet-pig-bundle/pom.xml
+++ b/parquet-pig-bundle/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-pig/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-pig/pom.xml b/parquet-pig/pom.xml
index ad30441..5caf926 100644
--- a/parquet-pig/pom.xml
+++ b/parquet-pig/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-protobuf/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-protobuf/pom.xml b/parquet-protobuf/pom.xml
index e1e39dc..f46a35f 100644
--- a/parquet-protobuf/pom.xml
+++ b/parquet-protobuf/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-protobuf/src/main/java/parquet/proto/ProtoParquetReader.java
----------------------------------------------------------------------
diff --git a/parquet-protobuf/src/main/java/parquet/proto/ProtoParquetReader.java b/parquet-protobuf/src/main/java/parquet/proto/ProtoParquetReader.java
index efd6b12..6ea9d55 100644
--- a/parquet-protobuf/src/main/java/parquet/proto/ProtoParquetReader.java
+++ b/parquet-protobuf/src/main/java/parquet/proto/ProtoParquetReader.java
@@ -15,25 +15,40 @@
  */
 package parquet.proto;
 
+import java.io.IOException;
+
 import com.google.protobuf.MessageOrBuilder;
+
 import org.apache.hadoop.fs.Path;
+
 import parquet.filter.UnboundRecordFilter;
 import parquet.hadoop.ParquetReader;
-import parquet.hadoop.api.ReadSupport;
-
-import java.io.IOException;
 
 /**
  * Read Protobuf records from a Parquet file.
  */
 public class ProtoParquetReader<T extends MessageOrBuilder> extends ParquetReader<T> {
 
+  @SuppressWarnings("unchecked")
+  public Builder<T> builder(Path file) {
+    return ParquetReader.builder(new ProtoReadSupport(), file);
+  }
+
+  /**
+   * @deprecated use {@link #builder(Path)}
+   */
+  @Deprecated
+  @SuppressWarnings("unchecked")
   public ProtoParquetReader(Path file) throws IOException {
     super(file, new ProtoReadSupport());
   }
 
+  /**
+   * @deprecated use {@link #builder(Path)}
+   */
+  @Deprecated
+  @SuppressWarnings("unchecked")
   public ProtoParquetReader(Path file, UnboundRecordFilter recordFilter) throws IOException {
     super(file, new ProtoReadSupport(), recordFilter);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-scala/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-scala/pom.xml b/parquet-scala/pom.xml
new file mode 100644
index 0000000..33663b5
--- /dev/null
+++ b/parquet-scala/pom.xml
@@ -0,0 +1,67 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <groupId>com.twitter</groupId>
+    <artifactId>parquet</artifactId>
+    <relativePath>../pom.xml</relativePath>
+    <version>1.6.0-SNAPSHOT</version>
+  </parent>
+
+  <modelVersion>4.0.0</modelVersion>
+
+  <artifactId>parquet-scala</artifactId>
+  <packaging>jar</packaging>
+
+  <name>Parquet Scala</name>
+  <url>https://github.com/Parquet/parquet-mr</url>
+
+  <repositories>
+    <repository>
+      <id>scala-tools.org</id>
+      <name>Scala-tools Maven2 Repository</name>
+      <url>http://scala-tools.org/repo-releases</url>
+    </repository>
+  </repositories>
+  <pluginRepositories>
+    <pluginRepository>
+      <id>scala-tools.org</id>
+      <name>Scala-tools Maven2 Repository</name>
+      <url>http://scala-tools.org/repo-releases</url>
+    </pluginRepository>
+  </pluginRepositories>
+    
+  <dependencies>
+    <dependency>
+      <groupId>com.twitter</groupId>
+      <artifactId>parquet-column</artifactId>
+      <version>${project.version}</version>
+    </dependency>  
+    <dependency>
+      <groupId>org.scala-lang</groupId>
+      <artifactId>scala-library</artifactId>
+      <version>${scala.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_2.9.3</artifactId>
+      <version>1.9.2</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.scala-tools</groupId>
+        <artifactId>maven-scala-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>compile</goal>
+              <goal>testCompile</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-scala/src/main/scala/parquet/filter2/dsl/Dsl.scala
----------------------------------------------------------------------
diff --git a/parquet-scala/src/main/scala/parquet/filter2/dsl/Dsl.scala b/parquet-scala/src/main/scala/parquet/filter2/dsl/Dsl.scala
new file mode 100644
index 0000000..c60b804
--- /dev/null
+++ b/parquet-scala/src/main/scala/parquet/filter2/dsl/Dsl.scala
@@ -0,0 +1,89 @@
+package parquet.filter2.dsl
+
+import java.lang.{Boolean => JBoolean, Double => JDouble, Float => JFloat, Integer => JInt, Long => JLong}
+
+import parquet.filter2.predicate.{FilterApi, FilterPredicate, Operators, UserDefinedPredicate}
+import parquet.io.api.Binary
+
+/**
+ * Instead of using the methods in [[FilterApi]] directly in scala code,
+ * use this Dsl instead. Example usage:
+ *
+ * {{{
+ * import parquet.filter2.dsl.Dsl._
+ *
+ * val abc = IntColumn("a.b.c")
+ * val xyz = DoubleColumn("x.y.z")
+ *
+ * val myPredicate = !(abc > 10 && (xyz === 17 || ((xyz !== 13) && (xyz <= 20))))
+ *
+ * }}}
+ *
+ * Note that while the operators >, >=, <, <= all work, the == and != operators do not.
+ * Using == or != will result in a runtime exception. Instead use === and !==
+ *
+ * This is due to a limitation in overriding the the equals method.
+ */
+object Dsl {
+
+  private[Dsl] trait Column[T <: Comparable[T], C <: Operators.Column[T]] {
+    val javaColumn: C
+
+    def filterBy[U <: UserDefinedPredicate[T]](clazz: Class[U]) = FilterApi.userDefined(javaColumn, clazz)
+
+    // this is not supported because it allows for easy mistakes. For example:
+    // val pred = IntColumn("foo") == "hello"
+    // will compile, but pred will be of type boolean instead of FilterPredicate
+    override def equals(x: Any) =
+      throw new UnsupportedOperationException("You probably meant to use === or !==")
+  }
+
+  case class IntColumn(columnPath: String) extends Column[JInt, Operators.IntColumn] {
+    override val javaColumn = FilterApi.intColumn(columnPath)
+  }
+
+  case class LongColumn(columnPath: String) extends Column[JLong, Operators.LongColumn] {
+    override val javaColumn = FilterApi.longColumn(columnPath)
+  }
+
+  case class FloatColumn(columnPath: String) extends Column[JFloat, Operators.FloatColumn] {
+    override val javaColumn = FilterApi.floatColumn(columnPath)
+  }
+
+  case class DoubleColumn(columnPath: String) extends Column[JDouble, Operators.DoubleColumn] {
+    override val javaColumn = FilterApi.doubleColumn(columnPath)
+  }
+
+  case class BooleanColumn(columnPath: String) extends Column[JBoolean, Operators.BooleanColumn] {
+    override val javaColumn = FilterApi.booleanColumn(columnPath)
+  }
+
+  case class BinaryColumn(columnPath: String) extends Column[Binary, Operators.BinaryColumn] {
+    override val javaColumn = FilterApi.binaryColumn(columnPath)
+  }
+
+  implicit def enrichEqNotEq[T <: Comparable[T], C <: Operators.Column[T] with Operators.SupportsEqNotEq](column: Column[T, C]): SupportsEqNotEq[T,C] = new SupportsEqNotEq(column)
+
+  class SupportsEqNotEq[T <: Comparable[T], C <: Operators.Column[T] with Operators.SupportsEqNotEq](val column: Column[T, C]) {
+    def ===(v: T) = FilterApi.eq(column.javaColumn, v)
+    def !== (v: T) = FilterApi.notEq(column.javaColumn, v)
+  }
+
+  implicit def enrichLtGt[T <: Comparable[T], C <: Operators.Column[T] with Operators.SupportsLtGt](column: Column[T, C]): SupportsLtGt[T,C] = new SupportsLtGt(column)
+
+  class SupportsLtGt[T <: Comparable[T], C <: Operators.Column[T] with Operators.SupportsLtGt](val column: Column[T, C]) {
+    def >(v: T) = FilterApi.gt(column.javaColumn, v)
+    def >=(v: T) = FilterApi.gtEq(column.javaColumn, v)
+    def <(v: T) = FilterApi.lt(column.javaColumn, v)
+    def <=(v: T) = FilterApi.ltEq(column.javaColumn, v)
+  }
+
+  implicit def enrichPredicate(pred: FilterPredicate): RichPredicate = new RichPredicate(pred)
+  
+  class RichPredicate(val pred: FilterPredicate) {
+    def &&(other: FilterPredicate) = FilterApi.and(pred, other)
+    def ||(other: FilterPredicate) = FilterApi.or(pred, other)
+    def unary_! = FilterApi.not(pred)
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-scala/src/test/scala/parquet/filter2/dsl/DslTest.scala
----------------------------------------------------------------------
diff --git a/parquet-scala/src/test/scala/parquet/filter2/dsl/DslTest.scala b/parquet-scala/src/test/scala/parquet/filter2/dsl/DslTest.scala
new file mode 100644
index 0000000..23aa537
--- /dev/null
+++ b/parquet-scala/src/test/scala/parquet/filter2/dsl/DslTest.scala
@@ -0,0 +1,61 @@
+package parquet.filter2.dsl
+
+import java.lang.{Double => JDouble, Integer => JInt}
+
+import org.junit.runner.RunWith
+import org.scalatest.FlatSpec
+import org.scalatest.junit.JUnitRunner
+import parquet.filter2.predicate.Operators.{Or, UserDefined, DoubleColumn => JDoubleColumn, IntColumn => JIntColumn}
+import parquet.filter2.predicate.{FilterApi, Statistics, UserDefinedPredicate}
+
+class DummyFilter extends UserDefinedPredicate[JInt] {
+  override def keep(value: JInt): Boolean = false
+
+  override def canDrop(statistics: Statistics[JInt]): Boolean = false
+
+  override def inverseCanDrop(statistics: Statistics[JInt]): Boolean = false
+}
+
+@RunWith(classOf[JUnitRunner])
+class DslTest extends FlatSpec{
+  import parquet.filter2.dsl.Dsl._
+
+  "predicates" should "be correctly constructed using the dsl" in {
+    val abc = IntColumn("a.b.c")
+    val xyz = DoubleColumn("x.y.z")
+
+    val complexPredicate = !(abc > 10 && (xyz === 17 || ((xyz !== 13) && (xyz <= 20))))
+    val abcGt = FilterApi.gt[JInt, JIntColumn](abc.javaColumn, 10)
+    val xyzAnd = FilterApi.and(FilterApi.notEq[JDouble, JDoubleColumn](xyz.javaColumn, 13.0),
+      FilterApi.ltEq[JDouble, JDoubleColumn](xyz.javaColumn, 20.0))
+    val xyzEq = FilterApi.eq[JDouble, JDoubleColumn](xyz.javaColumn, 17.0)
+    val xyzPred = FilterApi.or(xyzEq, xyzAnd)
+    val expected = FilterApi.not(FilterApi.and(abcGt, xyzPred))
+
+    assert(complexPredicate === expected)
+  }
+
+  "user defined predicates" should "be correctly constructed" in {
+    val abc = IntColumn("a.b.c")
+    val pred = (abc > 10) || abc.filterBy(classOf[DummyFilter])
+
+    val expected = FilterApi.or(FilterApi.gt[JInt, JIntColumn](abc.javaColumn, 10), FilterApi.userDefined(abc.javaColumn, classOf[DummyFilter]))
+    assert(pred === expected)
+    val intUserDefined = pred.asInstanceOf[Or].getRight.asInstanceOf[UserDefined[JInt, DummyFilter]]
+
+    assert(intUserDefined.getUserDefinedPredicateClass === classOf[DummyFilter])
+    assert(intUserDefined.getUserDefinedPredicate.isInstanceOf[DummyFilter])
+  }
+
+  "Column == and != " should "throw a helpful warning" in {
+    val abc = IntColumn("a.b.c")
+
+    intercept[UnsupportedOperationException] {
+      abc == 10
+    }
+
+    intercept[UnsupportedOperationException] {
+      abc != 10
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-scrooge/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-scrooge/pom.xml b/parquet-scrooge/pom.xml
index 62ac360..a4f4366 100644
--- a/parquet-scrooge/pom.xml
+++ b/parquet-scrooge/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>
@@ -86,7 +86,7 @@
     <dependency>
       <groupId>org.scala-lang</groupId>
       <artifactId>scala-library</artifactId>
-      <version>2.9.2</version>
+      <version>${scala.version}</version>
     </dependency>
     <dependency>
       <groupId>cascading</groupId>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-test-hadoop2/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-test-hadoop2/pom.xml b/parquet-test-hadoop2/pom.xml
index 6d543dc..2a32332 100644
--- a/parquet-test-hadoop2/pom.xml
+++ b/parquet-test-hadoop2/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-thrift/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-thrift/pom.xml b/parquet-thrift/pom.xml
index baf3bc5..890e402 100644
--- a/parquet-thrift/pom.xml
+++ b/parquet-thrift/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-thrift/src/main/java/parquet/thrift/ThriftParquetReader.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/main/java/parquet/thrift/ThriftParquetReader.java b/parquet-thrift/src/main/java/parquet/thrift/ThriftParquetReader.java
index 09a5f72..fe779a8 100644
--- a/parquet-thrift/src/main/java/parquet/thrift/ThriftParquetReader.java
+++ b/parquet-thrift/src/main/java/parquet/thrift/ThriftParquetReader.java
@@ -21,13 +21,17 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.thrift.TBase;
 
+import parquet.filter2.compat.FilterCompat;
+import parquet.filter2.compat.FilterCompat.Filter;
 import parquet.hadoop.ParquetReader;
+import parquet.hadoop.api.ReadSupport;
 import parquet.hadoop.thrift.ThriftReadSupport;
 
+import static parquet.Preconditions.checkNotNull;
+
 /**
  * To read a parquet file into thrift objects
  * @author Julien Le Dem
- *
  * @param <T> the thrift type
  */
 public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T> {
@@ -36,7 +40,9 @@ public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T>
    * @param file the file to read
    * @param thriftClass the class used to read
    * @throws IOException
+   * @deprecated use {@link #build(Path)}
    */
+  @Deprecated
   public ThriftParquetReader(Path file, Class<T> thriftClass) throws IOException {
     super(file, new ThriftReadSupport<T>(thriftClass));
   }
@@ -46,7 +52,9 @@ public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T>
    * @param file the file to read
    * @param thriftClass the class used to read
    * @throws IOException
+   * @deprecated use {@link #build(Path)}
    */
+  @Deprecated
   public ThriftParquetReader(Configuration conf, Path file, Class<T> thriftClass) throws IOException {
     super(conf, file, new ThriftReadSupport<T>(thriftClass));
   }
@@ -55,7 +63,9 @@ public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T>
    * will use the thrift class based on the file metadata if a thrift class information is present
    * @param file the file to read
    * @throws IOException
+   * @deprecated use {@link #build(Path)}
    */
+  @Deprecated
   public ThriftParquetReader(Path file) throws IOException {
     super(file, new ThriftReadSupport<T>());
   }
@@ -65,9 +75,61 @@ public class ThriftParquetReader<T extends TBase<?,?>> extends ParquetReader<T>
    * @param conf the configuration
    * @param file the file to read
    * @throws IOException
+   * @deprecated use {@link #build(Path)}
    */
+  @Deprecated
   public ThriftParquetReader(Configuration conf, Path file) throws IOException {
     super(conf, file, new ThriftReadSupport<T>());
   }
 
+  public static <T extends TBase<?,?>> Builder<T> build(Path file) {
+    return new Builder<T>(file);
+  }
+
+  public static class Builder<T extends TBase<?,?>> {
+    private final Path file;
+    private Configuration conf;
+    private Filter filter;
+    private Class<T> thriftClass;
+
+    private Builder(Path file) {
+      this.file = checkNotNull(file, "file");
+      this.conf = new Configuration();
+      this.filter = FilterCompat.NOOP;
+      this.thriftClass = null;
+    }
+
+    public Builder<T> withConf(Configuration conf) {
+      this.conf = checkNotNull(conf, "conf");
+      return this;
+    }
+
+    public Builder<T> withFilter(Filter filter) {
+      this.filter = checkNotNull(filter, "filter");
+      return this;
+    }
+
+    /**
+     * If this is called, the thrift class is used.
+     * If not, will use the thrift class based on the file
+     * metadata if a thrift class information is present.
+     */
+    public Builder<T> withThriftClass(Class<T> thriftClass) {
+      this.thriftClass = checkNotNull(thriftClass, "thriftClass");
+      return this;
+    }
+
+    public ParquetReader<T> build() throws IOException {
+      ReadSupport<T> readSupport;
+
+      if (thriftClass != null) {
+        readSupport = new ThriftReadSupport<T>(thriftClass);
+      } else {
+        readSupport = new ThriftReadSupport<T>();
+      }
+
+      return ParquetReader.builder(readSupport, file).withConf(conf).withFilter(filter).build();
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-thrift/src/main/java/parquet/thrift/ThriftRecordConverter.java
----------------------------------------------------------------------
diff --git a/parquet-thrift/src/main/java/parquet/thrift/ThriftRecordConverter.java b/parquet-thrift/src/main/java/parquet/thrift/ThriftRecordConverter.java
index bf0a7df..4157693 100644
--- a/parquet-thrift/src/main/java/parquet/thrift/ThriftRecordConverter.java
+++ b/parquet-thrift/src/main/java/parquet/thrift/ThriftRecordConverter.java
@@ -811,6 +811,11 @@ public class ThriftRecordConverter<T> extends RecordMaterializer<T> {
     }
   }
 
+  @Override
+  public void skipCurrentRecord() {
+    rootEvents.clear();
+  }
+
   /**
    *
    * {@inheritDoc}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-tools/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-tools/pom.xml b/parquet-tools/pom.xml
index 6220deb..c649334 100644
--- a/parquet-tools/pom.xml
+++ b/parquet-tools/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-tools/src/main/java/parquet/tools/command/DumpCommand.java
----------------------------------------------------------------------
diff --git a/parquet-tools/src/main/java/parquet/tools/command/DumpCommand.java b/parquet-tools/src/main/java/parquet/tools/command/DumpCommand.java
index 5c9c6c3..387c6bb 100644
--- a/parquet-tools/src/main/java/parquet/tools/command/DumpCommand.java
+++ b/parquet-tools/src/main/java/parquet/tools/command/DumpCommand.java
@@ -314,7 +314,6 @@ public class DumpCommand extends ArgsOnlyCommand {
     }
 
     private static final class DumpConverter extends PrimitiveConverter {
-        @Override public boolean isPrimitive() { return true; }
         @Override public GroupConverter asGroupConverter() { return new DumpGroupConverter(); }
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 8b9595a..fddf204 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
 
   <groupId>com.twitter</groupId>
   <artifactId>parquet</artifactId>
-  <version>1.5.1-SNAPSHOT</version>
+  <version>1.6.0-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <name>Parquet MR</name>
@@ -87,8 +87,9 @@
     <cascading.version>2.5.3</cascading.version>
     <parquet.format.version>2.1.0</parquet.format.version>
     <log4j.version>1.2.17</log4j.version>
-    <previous.version>1.4.0</previous.version>
+    <previous.version>1.5.0</previous.version>
     <thrift.executable>thrift</thrift.executable>
+    <scala.version>2.9.2</scala.version>
   </properties>
 
   <modules>
@@ -103,6 +104,7 @@
     <module>parquet-pig</module>
     <module>parquet-pig-bundle</module>
     <module>parquet-protobuf</module>
+    <module>parquet-scala</module>
     <module>parquet-scrooge</module>
     <module>parquet-thrift</module>
     <module>parquet-test-hadoop2</module>
@@ -120,6 +122,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.easymock</groupId>
+      <artifactId>easymock</artifactId>
+      <version>3.2</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>commons-httpclient</groupId>
       <artifactId>commons-httpclient</artifactId>
       <version>3.0.1</version>
@@ -201,6 +209,8 @@
                    <previousVersion>${previous.version}</previousVersion>
                    <excludes>
                      <exclude>parquet/org/**</exclude>
+                     <!-- one time exclusions that should be removed -->
+                     <exclude>parquet/io/api/Binary</exclude>
                    </excludes>
                  </requireBackwardCompatibility>
                </rules>


[4/4] git commit: Add a unified and optionally more constrained API for expressing filters on columns

Posted by ju...@apache.org.
Add a unified and optionally more constrained API for expressing filters on columns

This is a re-opened version of:
https://github.com/Parquet/parquet-mr/pull/412

The idea behind this pull request is to add a way to express filters on columns using DSL that allows parquet visibility into what is being filtered and how. This visibility will allow us to make optimizations at read time, the biggest one being filtering entire row groups or pages of records without even reading them based on the statistics / metadata that is stored along with each row group or page.

Included in this api are interfaces for user defined predicates, which must operate at the value level by may opt in to operating at the row group / page level as well. This should make this new API a superset of the `parquet.filter` package. This new api will need to be reconciled with the column filters currently in the `parquet.filter` package, but I wanted to get feedback on this first.

A limitation in both this api and the old one is that you can't do cross-column filters, eg: columX > columnY.

Author: Alex Levenson <al...@twitter.com>

Closes #4 from isnotinvain/alexlevenson/filter-api and squashes the following commits:

c1ab7e3 [Alex Levenson] Address feedback
c1bd610 [Alex Levenson] cleanup dotString in ColumnPath
418bfc1 [Alex Levenson] Update version, add temporary hacks for semantic enforcer
6643bd3 [Alex Levenson] Fix some more non backward incompatible changes
39f977f [Alex Levenson] Put a bunch of backwards compatible stuff back in, add @Deprecated
13a02c6 [Alex Levenson] Fix compile errors, add back in overloaded getRecordReader
f82edb7 [Alex Levenson] Merge branch 'master' into alexlevenson/filter-api
9bd014f [Alex Levenson] clean up TODOs and reference jiras
4cc7e87 [Alex Levenson] Add some comments
30e3d61 [Alex Levenson] Create a common interface for both kinds of filters
ac153a6 [Alex Levenson] Create a Statistics class for use in UDPs
fbbf601 [Alex Levenson] refactor IncrementallyUpdatedFilterPredicateGenerator to only generate the parts that require generation
5df47cd [Alex Levenson] Static imports of checkNotNull
c1d1823 [Alex Levenson] address some of the minor feedback items
67a3ba0 [Alex Levenson] update binary's toString
3d7372b [Alex Levenson] minor fixes
fed9531 [Alex Levenson] Add skipCurrentRecord method to clear events in thrift converter
2e632d5 [Alex Levenson] Make Binary Serializable
09c024f [Alex Levenson] update comments
3169849 [Alex Levenson] fix compilation error
0185030 [Alex Levenson] Add integration test for value level filters
4fde18c [Alex Levenson] move to right package
ae36b37 [Alex Levenson] Handle merge issues
af69486 [Alex Levenson] Merge branch 'master' into alexlevenson/filter-api
0665271 [Alex Levenson] Add tests for value inspector
c5e3b07 [Alex Levenson] Add tests for resetter and evaluator
29f677a [Alex Levenson] Fix scala DSL
8897a28 [Alex Levenson] Fix some tests
b448bee [Alex Levenson] Fix mistake in MessageColumnIO
c8133f8 [Alex Levenson] Fix some tests
4cf686d [Alex Levenson] more null checks
69e683b [Alex Levenson] check all the nulls
220a682 [Alex Levenson] more cleanup
aad5af3 [Alex Levenson] rm generated src file from git
5075243 [Alex Levenson] more minor cleanup
9966713 [Alex Levenson] Hook generation into maven build
8282725 [Alex Levenson] minor cleanup
fea3ea9 [Alex Levenson] minor cleanup
9e35406 [Alex Levenson] move statistics filter
c52750c [Alex Levenson] finish moving things around
97a6bfd [Alex Levenson] Move things around pt2
843b9fe [Alex Levenson] Move some files around pt 1
5eedcc0 [Alex Levenson] turn off dictionary support for AtomicConverter
541319e [Alex Levenson] various cleanup and fixes
08e9638 [Alex Levenson] rm ColumnPathUtil
bfe6795 [Alex Levenson] Add type bounds to FilterApi
6c831ab [Alex Levenson] don't double log exception in SerializationUtil
a7a58d1 [Alex Levenson] use ColumnPath instead of String
8f11a6b [Alex Levenson] Move ColumnPath and Canonicalizer to parquet-common
9164359 [Alex Levenson] stash
abc2be2 [Alex Levenson] Add null handling to record filters -- this impl is still broken though
90ba8f7 [Alex Levenson] Update Serialization Util
0a261f1 [Alex Levenson] Add compression in SerializationUtil
f1278be [Alex Levenson] Add comment, fix tests
cbd1a85 [Alex Levenson] Replace some specialization with generic views
e496cbf [Alex Levenson] Fix short circuiting in StatisticsFilter
db6b32d [Alex Levenson] Address some comments, fix constructor in ParquetReader
fd6f44d [Alex Levenson] Fix semver backward compat
2fdd304 [Alex Levenson] Some more cleanup
d34fb89 [Alex Levenson] Cleanup some TODOs
544499c [Alex Levenson] stash
7b32016 [Alex Levenson] Merge branch 'master' into alexlevenson/filter-api
0e31251 [Alex Levenson] First pass at values filter, needs reworking
470e409 [Alex Levenson] fix java6/7 bug, minor cleanup
ee7b221 [Alex Levenson] more InputFormat tests
5ef849e [Alex Levenson] Add guards for not specifying both kinds of filter
0186b1f [Alex Levenson] Add logging to ParquetInputFormat and tests for configuration
a622648 [Alex Levenson] cleanup imports
9b1ea88 [Alex Levenson] Add tests for statistics filter
d517373 [Alex Levenson] tests for filter validator
b25fc44 [Alex Levenson] small cleanup of filter validator
32067a1 [Alex Levenson] add test for collapse logical nots
1efc198 [Alex Levenson] Add tests for invert filter predicate
046b106 [Alex Levenson] some more fixes
d3c4d7a [Alex Levenson] fix some more types, add in test for SerializationUtil
cc51274 [Alex Levenson] fix generics in FilterPredicateInverter
ea08349 [Alex Levenson] First pass at rowgroup filter, needs testing
156d91b [Alex Levenson] Add runtime type checker
4dfb4f2 [Alex Levenson] Add serialization util
8f80b20 [Alex Levenson] update comment
7c25121 [Alex Levenson] Add class to Column struct
58f1190 [Alex Levenson] Remove filterByUniqueValues
7f20de6 [Alex Levenson] rename user predicates
af14b42 [Alex Levenson] Update dsl
04409c5 [Alex Levenson] Add generic types into Visitor
ba42884 [Alex Levenson] rm getClassName
65f8af9 [Alex Levenson] Add in support for user defined predicates on columns
6926337 [Alex Levenson] Add explicit tokens for notEq, ltEq, gtEq
667ec9f [Alex Levenson] remove test for collapsing double negation
db2f71a [Alex Levenson] rename FilterPredicatesTest
a0a0533 [Alex Levenson] Address first round of comments
b2bca94 [Alex Levenson] Add scala DSL and tests
bedda87 [Alex Levenson] Add tests for FilterPredicate building
238cbbe [Alex Levenson] Add scala dsl
39f7b24 [Alex Levenson] add scala mvn boilerplate
2ec71a7 [Alex Levenson] Add predicate API


Project: http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/commit/ad32bf0f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/tree/ad32bf0f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/diff/ad32bf0f

Branch: refs/heads/master
Commit: ad32bf0fd111ab473ad1080cde11de39e3c5a67f
Parents: fc2c29d
Author: Alex Levenson <al...@twitter.com>
Authored: Tue Jul 29 14:38:59 2014 -0700
Committer: julien <ju...@twitter.com>
Committed: Tue Jul 29 14:38:59 2014 -0700

----------------------------------------------------------------------
 parquet-avro/pom.xml                            |   2 +-
 .../java/parquet/avro/AvroParquetReader.java    |  35 +-
 parquet-cascading/pom.xml                       |   2 +-
 parquet-column/pom.xml                          |  22 +-
 .../column/statistics/BinaryStatistics.java     |  12 +-
 .../column/statistics/BooleanStatistics.java    |  12 +-
 .../column/statistics/DoubleStatistics.java     |  12 +-
 .../column/statistics/FloatStatistics.java      |  12 +-
 .../column/statistics/IntStatistics.java        |  12 +-
 .../column/statistics/LongStatistics.java       |  12 +-
 .../parquet/column/statistics/Statistics.java   |   5 +-
 .../parquet/filter2/compat/FilterCompat.java    | 140 ++++++
 .../parquet/filter2/predicate/FilterApi.java    | 177 +++++++
 .../filter2/predicate/FilterPredicate.java      |  54 +++
 .../predicate/LogicalInverseRewriter.java       |  95 ++++
 .../filter2/predicate/LogicalInverter.java      |  90 ++++
 .../parquet/filter2/predicate/Operators.java    | 455 +++++++++++++++++
 .../predicate/SchemaCompatibilityValidator.java | 172 +++++++
 .../parquet/filter2/predicate/Statistics.java   |  24 +
 .../filter2/predicate/UserDefinedPredicate.java |  90 ++++
 .../parquet/filter2/predicate/ValidTypeMap.java | 160 ++++++
 .../recordlevel/FilteringGroupConverter.java    |  97 ++++
 .../FilteringPrimitiveConverter.java            |  91 ++++
 .../FilteringRecordMaterializer.java            |  97 ++++
 .../IncrementallyUpdatedFilterPredicate.java    | 139 ++++++
 ...ntallyUpdatedFilterPredicateBuilderBase.java |  79 +++
 ...mentallyUpdatedFilterPredicateEvaluator.java |  45 ++
 ...ementallyUpdatedFilterPredicateResetter.java |  42 ++
 .../java/parquet/io/FilteredRecordReader.java   |   6 +
 .../main/java/parquet/io/MessageColumnIO.java   | 100 +++-
 .../src/main/java/parquet/io/RecordReader.java  |   9 +-
 .../parquet/io/RecordReaderImplementation.java  |  14 +-
 .../src/main/java/parquet/io/api/Binary.java    | 486 +++++++++++--------
 .../java/parquet/io/api/RecordMaterializer.java |   5 +
 .../parquet/filter2/predicate/DummyUdp.java     |  19 +
 .../filter2/predicate/TestFilterApiMethods.java | 103 ++++
 .../predicate/TestLogicalInverseRewriter.java   |  85 ++++
 .../filter2/predicate/TestLogicalInverter.java  |  76 +++
 .../TestSchemaCompatibilityValidator.java       | 124 +++++
 .../filter2/predicate/TestValidTypeMap.java     |  93 ++++
 ...mentallyUpdatedFilterPredicateEvaluator.java | 191 ++++++++
 ...ementallyUpdatedFilterPredicateResetter.java |  51 ++
 .../filter2/recordlevel/TestValueInspector.java |  79 +++
 .../src/test/java/parquet/io/TestFiltered.java  |  58 +--
 parquet-common/pom.xml                          |   2 +-
 .../src/main/java/parquet/Closeables.java       |  37 ++
 .../parquet/common/internal/Canonicalizer.java  |  59 +++
 .../java/parquet/common/schema/ColumnPath.java  |  96 ++++
 parquet-encoding/pom.xml                        |   2 +-
 parquet-generator/pom.xml                       |   2 +-
 .../main/java/parquet/encoding/Generator.java   |   2 +-
 .../main/java/parquet/filter2/Generator.java    |  10 +
 ...mentallyUpdatedFilterPredicateGenerator.java | 251 ++++++++++
 parquet-hadoop-bundle/pom.xml                   |   2 +-
 parquet-hadoop/pom.xml                          |   4 +-
 .../parquet/filter2/compat/RowGroupFilter.java  |  63 +++
 .../statisticslevel/StatisticsFilter.java       | 244 ++++++++++
 .../converter/ParquetMetadataConverter.java     |  12 +-
 .../hadoop/InternalParquetRecordReader.java     |  59 ++-
 .../java/parquet/hadoop/ParquetFileReader.java  |  12 +-
 .../java/parquet/hadoop/ParquetFileWriter.java  |   8 +-
 .../java/parquet/hadoop/ParquetInputFormat.java | 118 ++++-
 .../java/parquet/hadoop/ParquetInputSplit.java  |   2 +-
 .../main/java/parquet/hadoop/ParquetReader.java |  96 +++-
 .../parquet/hadoop/ParquetRecordReader.java     |  24 +-
 .../main/java/parquet/hadoop/ParquetWriter.java |  13 +
 .../mapred/DeprecatedParquetInputFormat.java    |  13 +-
 .../parquet/hadoop/metadata/Canonicalizer.java  |  59 ---
 .../hadoop/metadata/ColumnChunkMetaData.java    |   3 +-
 .../hadoop/metadata/ColumnChunkProperties.java  |   2 +
 .../parquet/hadoop/metadata/ColumnPath.java     |  73 ---
 .../parquet/hadoop/metadata/EncodingList.java   |   1 +
 .../parquet/hadoop/util/SerializationUtil.java  |  93 ++++
 .../filter2/compat/TestRowGroupFilter.java      |  84 ++++
 .../filter2/recordlevel/PhoneBookWriter.java    | 251 ++++++++++
 .../recordlevel/TestRecordLevelFilters.java     | 205 ++++++++
 .../statisticslevel/TestStatisticsFilter.java   | 307 ++++++++++++
 .../java/parquet/hadoop/TestInputFormat.java    | 110 ++++-
 .../metadata/TestColumnChunkMetaData.java       |   9 +-
 .../hadoop/util/TestSerializationUtil.java      |  53 ++
 parquet-hive-bundle/pom.xml                     |   2 +-
 .../parquet-hive-0.10-binding/pom.xml           |   2 +-
 .../parquet-hive-0.12-binding/pom.xml           |   2 +-
 .../parquet-hive-binding-bundle/pom.xml         |   2 +-
 .../parquet-hive-binding-factory/pom.xml        |   2 +-
 .../parquet-hive-binding-interface/pom.xml      |   2 +-
 parquet-hive/parquet-hive-binding/pom.xml       |   2 +-
 .../parquet-hive-storage-handler/pom.xml        |   2 +-
 parquet-hive/pom.xml                            |   2 +-
 parquet-jackson/pom.xml                         |   2 +-
 parquet-pig-bundle/pom.xml                      |   2 +-
 parquet-pig/pom.xml                             |   2 +-
 parquet-protobuf/pom.xml                        |   2 +-
 .../java/parquet/proto/ProtoParquetReader.java  |  23 +-
 parquet-scala/pom.xml                           |  67 +++
 .../main/scala/parquet/filter2/dsl/Dsl.scala    |  89 ++++
 .../scala/parquet/filter2/dsl/DslTest.scala     |  61 +++
 parquet-scrooge/pom.xml                         |   4 +-
 parquet-test-hadoop2/pom.xml                    |   2 +-
 parquet-thrift/pom.xml                          |   2 +-
 .../parquet/thrift/ThriftParquetReader.java     |  64 ++-
 .../parquet/thrift/ThriftRecordConverter.java   |   5 +
 parquet-tools/pom.xml                           |   2 +-
 .../java/parquet/tools/command/DumpCommand.java |   1 -
 pom.xml                                         |  14 +-
 105 files changed, 5871 insertions(+), 554 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-avro/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-avro/pom.xml b/parquet-avro/pom.xml
index 16f34f7..9556a60 100644
--- a/parquet-avro/pom.xml
+++ b/parquet-avro/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-avro/src/main/java/parquet/avro/AvroParquetReader.java
----------------------------------------------------------------------
diff --git a/parquet-avro/src/main/java/parquet/avro/AvroParquetReader.java b/parquet-avro/src/main/java/parquet/avro/AvroParquetReader.java
index f002f21..54cfc8b 100644
--- a/parquet-avro/src/main/java/parquet/avro/AvroParquetReader.java
+++ b/parquet-avro/src/main/java/parquet/avro/AvroParquetReader.java
@@ -17,34 +17,51 @@ package parquet.avro;
 
 import java.io.IOException;
 
-import org.apache.avro.Schema;
 import org.apache.avro.generic.IndexedRecord;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 
 import parquet.filter.UnboundRecordFilter;
 import parquet.hadoop.ParquetReader;
-import parquet.hadoop.api.ReadSupport;
 
 /**
  * Read Avro records from a Parquet file.
  */
 public class AvroParquetReader<T extends IndexedRecord> extends ParquetReader<T> {
 
+  public Builder<T> builder(Path file) {
+    return ParquetReader.builder(new AvroReadSupport<T>(), file);
+  }
+
+  /**
+   * @deprecated use {@link #builder(Path)}
+   */
+  @Deprecated
   public AvroParquetReader(Path file) throws IOException {
-    super(file, (ReadSupport<T>) new AvroReadSupport());
+    super(file, new AvroReadSupport<T>());
   }
 
-  public AvroParquetReader(Path file, UnboundRecordFilter recordFilter) throws IOException {
-    super(file, (ReadSupport<T>) new AvroReadSupport(), recordFilter);
+  /**
+   * @deprecated use {@link #builder(Path)}
+   */
+  @Deprecated
+  public AvroParquetReader(Path file, UnboundRecordFilter unboundRecordFilter) throws IOException {
+    super(file, new AvroReadSupport<T>(), unboundRecordFilter);
   }
 
+  /**
+   * @deprecated use {@link #builder(Path)}
+   */
+  @Deprecated
   public AvroParquetReader(Configuration conf, Path file) throws IOException {
-    super(conf, file, (ReadSupport<T>) new AvroReadSupport());
+    super(conf, file, new AvroReadSupport<T>());
   }
 
-  public AvroParquetReader(Configuration conf, Path file, UnboundRecordFilter recordFilter ) throws IOException {
-    super(conf, file, (ReadSupport<T>) new AvroReadSupport(), recordFilter);
+  /**
+   * @deprecated use {@link #builder(Path)}
+   */
+  @Deprecated
+  public AvroParquetReader(Configuration conf, Path file, UnboundRecordFilter unboundRecordFilter) throws IOException {
+    super(conf, file, new AvroReadSupport<T>(), unboundRecordFilter);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-cascading/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-cascading/pom.xml b/parquet-cascading/pom.xml
index 6b2bd45..ad209af 100644
--- a/parquet-cascading/pom.xml
+++ b/parquet-cascading/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-column/pom.xml b/parquet-column/pom.xml
index 10f469d..94473f6 100644
--- a/parquet-column/pom.xml
+++ b/parquet-column/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>
@@ -96,6 +96,26 @@
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.2.1</version>
+        <executions>
+          <execution>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>java</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <mainClass>parquet.filter2.Generator</mainClass>          
+          <arguments>
+            <argument>${basedir}/target/generated-src</argument>
+          </arguments>
+          <sourceRoot>${basedir}/target/generated-src</sourceRoot>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 </project>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/column/statistics/BinaryStatistics.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/column/statistics/BinaryStatistics.java b/parquet-column/src/main/java/parquet/column/statistics/BinaryStatistics.java
index 8072439..f125b2f 100644
--- a/parquet-column/src/main/java/parquet/column/statistics/BinaryStatistics.java
+++ b/parquet-column/src/main/java/parquet/column/statistics/BinaryStatistics.java
@@ -17,7 +17,7 @@ package parquet.column.statistics;
 
 import parquet.io.api.Binary;
 
-public class BinaryStatistics extends Statistics{
+public class BinaryStatistics extends Statistics<Binary> {
 
   private Binary max;
   private Binary min;
@@ -77,6 +77,16 @@ public class BinaryStatistics extends Statistics{
       this.markAsNotEmpty();
   }
 
+  @Override
+  public Binary genericGetMin() {
+    return min;
+  }
+
+  @Override
+  public Binary genericGetMax() {
+    return max;
+  }
+
   public Binary getMax() {
     return max;
   }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/column/statistics/BooleanStatistics.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/column/statistics/BooleanStatistics.java b/parquet-column/src/main/java/parquet/column/statistics/BooleanStatistics.java
index 4552fd4..6741343 100644
--- a/parquet-column/src/main/java/parquet/column/statistics/BooleanStatistics.java
+++ b/parquet-column/src/main/java/parquet/column/statistics/BooleanStatistics.java
@@ -17,7 +17,7 @@ package parquet.column.statistics;
 
 import parquet.bytes.BytesUtils;
 
-public class BooleanStatistics extends Statistics{
+public class BooleanStatistics extends Statistics<Boolean> {
 
   private boolean max;
   private boolean min;
@@ -77,6 +77,16 @@ public class BooleanStatistics extends Statistics{
       this.markAsNotEmpty();
   }
 
+  @Override
+  public Boolean genericGetMin() {
+    return min;
+  }
+
+  @Override
+  public Boolean genericGetMax() {
+    return max;
+  }
+
   public boolean getMax() {
     return max;
   }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/column/statistics/DoubleStatistics.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/column/statistics/DoubleStatistics.java b/parquet-column/src/main/java/parquet/column/statistics/DoubleStatistics.java
index 5dfe161..c9695f3 100644
--- a/parquet-column/src/main/java/parquet/column/statistics/DoubleStatistics.java
+++ b/parquet-column/src/main/java/parquet/column/statistics/DoubleStatistics.java
@@ -17,7 +17,7 @@ package parquet.column.statistics;
 
 import parquet.bytes.BytesUtils;
 
-public class DoubleStatistics extends Statistics{
+public class DoubleStatistics extends Statistics<Double> {
 
   private double max;
   private double min;
@@ -77,6 +77,16 @@ public class DoubleStatistics extends Statistics{
       this.markAsNotEmpty();
   }
 
+  @Override
+  public Double genericGetMin() {
+    return min;
+  }
+
+  @Override
+  public Double genericGetMax() {
+    return max;
+  }
+
   public double getMax() {
     return max;
   }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/column/statistics/FloatStatistics.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/column/statistics/FloatStatistics.java b/parquet-column/src/main/java/parquet/column/statistics/FloatStatistics.java
index 1e85839..b13aafa 100644
--- a/parquet-column/src/main/java/parquet/column/statistics/FloatStatistics.java
+++ b/parquet-column/src/main/java/parquet/column/statistics/FloatStatistics.java
@@ -17,7 +17,7 @@ package parquet.column.statistics;
 
 import parquet.bytes.BytesUtils;
 
-public class FloatStatistics extends Statistics{
+public class FloatStatistics extends Statistics<Float> {
 
   private float max;
   private float min;
@@ -77,6 +77,16 @@ public class FloatStatistics extends Statistics{
       this.markAsNotEmpty();
   }
 
+  @Override
+  public Float genericGetMin() {
+    return min;
+  }
+
+  @Override
+  public Float genericGetMax() {
+    return max;
+  }
+
   public float getMax() {
     return max;
   }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/column/statistics/IntStatistics.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/column/statistics/IntStatistics.java b/parquet-column/src/main/java/parquet/column/statistics/IntStatistics.java
index 5871553..7bdd6be 100644
--- a/parquet-column/src/main/java/parquet/column/statistics/IntStatistics.java
+++ b/parquet-column/src/main/java/parquet/column/statistics/IntStatistics.java
@@ -17,7 +17,7 @@ package parquet.column.statistics;
 
 import parquet.bytes.BytesUtils;
 
-public class IntStatistics extends Statistics{
+public class IntStatistics extends Statistics<Integer> {
 
   private int max;
   private int min;
@@ -77,6 +77,16 @@ public class IntStatistics extends Statistics{
       this.markAsNotEmpty();
   }
 
+  @Override
+  public Integer genericGetMin() {
+    return min;
+  }
+
+  @Override
+  public Integer genericGetMax() {
+    return max;
+  }
+
   public int getMax() {
     return max;
   }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/column/statistics/LongStatistics.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/column/statistics/LongStatistics.java b/parquet-column/src/main/java/parquet/column/statistics/LongStatistics.java
index c1cf94e..bae63a9 100644
--- a/parquet-column/src/main/java/parquet/column/statistics/LongStatistics.java
+++ b/parquet-column/src/main/java/parquet/column/statistics/LongStatistics.java
@@ -17,7 +17,7 @@ package parquet.column.statistics;
 
 import parquet.bytes.BytesUtils;
 
-public class LongStatistics extends Statistics{
+public class LongStatistics extends Statistics<Long> {
 
   private long max;
   private long min;
@@ -77,6 +77,16 @@ public class LongStatistics extends Statistics{
       this.markAsNotEmpty();
   }
 
+  @Override
+  public Long genericGetMin() {
+    return min;
+  }
+
+  @Override
+  public Long genericGetMax() {
+    return max;
+  }
+
   public long getMax() {
     return max;
   }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/column/statistics/Statistics.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/column/statistics/Statistics.java b/parquet-column/src/main/java/parquet/column/statistics/Statistics.java
index 2c5ac14..b29b76b 100644
--- a/parquet-column/src/main/java/parquet/column/statistics/Statistics.java
+++ b/parquet-column/src/main/java/parquet/column/statistics/Statistics.java
@@ -26,7 +26,7 @@ import java.util.Arrays;
  *
  * @author Katya Gonina
  */
-public abstract class Statistics {
+public abstract class Statistics<T extends Comparable<T>> {
 
   private boolean firstValueAccountedFor;
   private long num_nulls;
@@ -162,6 +162,9 @@ public abstract class Statistics {
    */
   abstract public void setMinMaxFromBytes(byte[] minBytes, byte[] maxBytes);
 
+  abstract public T genericGetMin();
+  abstract public T genericGetMax();
+
   /**
    * Abstract method to return the max value as a byte array
    * @return byte array corresponding to the max value

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/compat/FilterCompat.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/compat/FilterCompat.java b/parquet-column/src/main/java/parquet/filter2/compat/FilterCompat.java
new file mode 100644
index 0000000..826bd52
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/compat/FilterCompat.java
@@ -0,0 +1,140 @@
+package parquet.filter2.compat;
+
+import parquet.Log;
+import parquet.filter.UnboundRecordFilter;
+import parquet.filter2.predicate.FilterPredicate;
+import parquet.filter2.predicate.LogicalInverseRewriter;
+
+import static parquet.Preconditions.checkArgument;
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * Parquet currently has two ways to specify a filter for dropping records at read time.
+ * The first way, that only supports filtering records during record assembly, is found
+ * in {@link parquet.filter}. The new API (found in {@link parquet.filter2}) supports
+ * also filtering entire rowgroups of records without reading them at all.
+ *
+ * This class defines a common interface that both of these filters share,
+ * {@link Filter}. A Filter can be either an {@link UnboundRecordFilter} from the old API, or
+ * a {@link FilterPredicate} from the new API, or a sentinel no-op filter.
+ *
+ * Having this common interface simplifies passing a filter through the read path of parquet's
+ * codebase.
+ */
+public class FilterCompat {
+  private static final Log LOG = Log.getLog(FilterCompat.class);
+
+  /**
+   * Anyone wanting to use a {@link Filter} need only implement this interface,
+   * per the visitor pattern.
+   */
+  public static interface Visitor<T> {
+    T visit(FilterPredicateCompat filterPredicateCompat);
+    T visit(UnboundRecordFilterCompat unboundRecordFilterCompat);
+    T visit(NoOpFilter noOpFilter);
+  }
+
+  public static interface Filter {
+    <R> R accept(Visitor<R> visitor);
+  }
+
+  // sentinel no op filter that signals "do no filtering"
+  public static final Filter NOOP = new NoOpFilter();
+
+  /**
+   * Given a FilterPredicate, return a Filter that wraps it.
+   * This method also logs the filter being used and rewrites
+   * the predicate to not include the not() operator.
+   */
+  public static Filter get(FilterPredicate filterPredicate) {
+    checkNotNull(filterPredicate, "filterPredicate");
+
+    LOG.info("Filtering using predicate: " + filterPredicate);
+
+    // rewrite the predicate to not include the not() operator
+    FilterPredicate collapsedPredicate = LogicalInverseRewriter.rewrite(filterPredicate);
+
+    if (!filterPredicate.equals(collapsedPredicate)) {
+      LOG.info("Predicate has been collapsed to: " + collapsedPredicate);
+    }
+
+    return new FilterPredicateCompat(collapsedPredicate);
+  }
+
+  /**
+   * Given an UnboundRecordFilter, return a Filter that wraps it.
+   */
+  public static Filter get(UnboundRecordFilter unboundRecordFilter) {
+    return new UnboundRecordFilterCompat(unboundRecordFilter);
+  }
+
+  /**
+   * Given either a FilterPredicate or the class of an UnboundRecordFilter, or neither (but not both)
+   * return a Filter that wraps whichever was provided.
+   *
+   * Either filterPredicate or unboundRecordFilterClass must be null, or an exception is thrown.
+   *
+   * If both are null, the no op filter will be returned.
+   */
+  public static Filter get(FilterPredicate filterPredicate, UnboundRecordFilter unboundRecordFilter) {
+    checkArgument(filterPredicate == null || unboundRecordFilter == null,
+        "Cannot provide both a FilterPredicate and an UnboundRecordFilter");
+
+    if (filterPredicate != null) {
+      return get(filterPredicate);
+    }
+
+    if (unboundRecordFilter != null) {
+      return get(unboundRecordFilter);
+    }
+
+    return NOOP;
+  }
+
+  // wraps a FilterPredicate
+  public static final class FilterPredicateCompat implements Filter {
+    private final FilterPredicate filterPredicate;
+
+    private FilterPredicateCompat(FilterPredicate filterPredicate) {
+      this.filterPredicate = checkNotNull(filterPredicate, "filterPredicate");
+    }
+
+    public FilterPredicate getFilterPredicate() {
+      return filterPredicate;
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  // wraps an UnboundRecordFilter
+  public static final class UnboundRecordFilterCompat implements Filter {
+    private final UnboundRecordFilter unboundRecordFilter;
+
+    private UnboundRecordFilterCompat(UnboundRecordFilter unboundRecordFilter) {
+      this.unboundRecordFilter = checkNotNull(unboundRecordFilter, "unboundRecordFilter");
+    }
+
+    public UnboundRecordFilter getUnboundRecordFilter() {
+      return unboundRecordFilter;
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  // sentinel no op filter
+  public static final class NoOpFilter implements Filter {
+    private NoOpFilter() {}
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/predicate/FilterApi.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/predicate/FilterApi.java b/parquet-column/src/main/java/parquet/filter2/predicate/FilterApi.java
new file mode 100644
index 0000000..1dd2bbc
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/predicate/FilterApi.java
@@ -0,0 +1,177 @@
+package parquet.filter2.predicate;
+
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.predicate.Operators.And;
+import parquet.filter2.predicate.Operators.BinaryColumn;
+import parquet.filter2.predicate.Operators.BooleanColumn;
+import parquet.filter2.predicate.Operators.Column;
+import parquet.filter2.predicate.Operators.DoubleColumn;
+import parquet.filter2.predicate.Operators.Eq;
+import parquet.filter2.predicate.Operators.FloatColumn;
+import parquet.filter2.predicate.Operators.Gt;
+import parquet.filter2.predicate.Operators.GtEq;
+import parquet.filter2.predicate.Operators.IntColumn;
+import parquet.filter2.predicate.Operators.LongColumn;
+import parquet.filter2.predicate.Operators.Lt;
+import parquet.filter2.predicate.Operators.LtEq;
+import parquet.filter2.predicate.Operators.Not;
+import parquet.filter2.predicate.Operators.NotEq;
+import parquet.filter2.predicate.Operators.Or;
+import parquet.filter2.predicate.Operators.SupportsEqNotEq;
+import parquet.filter2.predicate.Operators.SupportsLtGt;
+import parquet.filter2.predicate.Operators.UserDefined;
+
+/**
+ * The Filter API is expressed through these static methods.
+ *
+ * Example usage:
+ * {@code
+ *
+ *   IntColumn foo = intColumn("foo");
+ *   DoubleColumn bar = doubleColumn("x.y.bar");
+ *
+ *   // foo == 10 || bar <= 17.0
+ *   FilterPredicate pred = or(eq(foo, 10), ltEq(bar, 17.0));
+ *
+ * }
+ */
+// TODO: Support repeated columns (https://issues.apache.org/jira/browse/PARQUET-34)
+//
+// TODO: Support filtering on groups (eg, filter where this group is / isn't null)
+// TODO: (https://issues.apache.org/jira/browse/PARQUET-43)
+
+// TODO: Consider adding support for more column types that aren't coupled with parquet types, eg Column<String>
+// TODO: (https://issues.apache.org/jira/browse/PARQUET-35)
+public final class FilterApi {
+  private FilterApi() { }
+
+  public static IntColumn intColumn(String columnPath) {
+    return new IntColumn(ColumnPath.fromDotString(columnPath));
+  }
+
+  public static LongColumn longColumn(String columnPath) {
+    return new LongColumn(ColumnPath.fromDotString(columnPath));
+  }
+
+  public static FloatColumn floatColumn(String columnPath) {
+    return new FloatColumn(ColumnPath.fromDotString(columnPath));
+  }
+
+  public static DoubleColumn doubleColumn(String columnPath) {
+    return new DoubleColumn(ColumnPath.fromDotString(columnPath));
+  }
+
+  public static BooleanColumn booleanColumn(String columnPath) {
+    return new BooleanColumn(ColumnPath.fromDotString(columnPath));
+  }
+
+  public static BinaryColumn binaryColumn(String columnPath) {
+    return new BinaryColumn(ColumnPath.fromDotString(columnPath));
+  }
+
+  /**
+   * Keeps records if their value is equal to the provided value.
+   * Nulls are treated the same way the java programming language does.
+   * For example:
+   *   eq(column, null) will keep all records whose value is null.
+   *   eq(column, 7) will keep all records whose value is 7, and will drop records whose value is null
+   */
+  public static <T extends Comparable<T>, C extends Column<T> & SupportsEqNotEq> Eq<T> eq(C column, T value) {
+    return new Eq<T>(column, value);
+  }
+
+  /**
+   * Keeps records if their value is not equal to the provided value.
+   * Nulls are treated the same way the java programming language does.
+   * For example:
+   *   notEq(column, null) will keep all records whose value is not null.
+   *   notEq(column, 7) will keep all records whose value is not 7, including records whose value is null.
+   *
+   *   NOTE: this is different from how some query languages handle null. For example, SQL and pig will drop
+   *   nulls when you filter by not equal to 7. To achieve similar behavior in this api, do:
+   *   and(notEq(column, 7), notEq(column, null))
+   *
+   *   NOTE: be sure to read the {@link #lt}, {@link #ltEq}, {@link #gt}, {@link #gtEq} operator's docs
+   *         for how they handle nulls
+   */
+  public static <T extends Comparable<T>, C extends Column<T> & SupportsEqNotEq> NotEq<T> notEq(C column, T value) {
+    return new NotEq<T>(column, value);
+  }
+
+  /**
+   * Keeps records if their value is less than (but not equal to) the provided value.
+   * The provided value cannot be null, as less than null has no meaning.
+   * Records with null values will be dropped.
+   * For example:
+   *   lt(column, 7) will keep all records whose value is less than (but not equal to) 7, and not null.
+   */
+  public static <T extends Comparable<T>, C extends Column<T> & SupportsLtGt> Lt<T> lt(C column, T value) {
+    return new Lt<T>(column, value);
+  }
+
+  /**
+   * Keeps records if their value is less than or equal to the provided value.
+   * The provided value cannot be null, as less than null has no meaning.
+   * Records with null values will be dropped.
+   * For example:
+   *   ltEq(column, 7) will keep all records whose value is less than or equal to 7, and not null.
+   */
+  public static <T extends Comparable<T>, C extends Column<T> & SupportsLtGt> LtEq<T> ltEq(C column, T value) {
+    return new LtEq<T>(column, value);
+  }
+
+  /**
+   * Keeps records if their value is greater than (but not equal to) the provided value.
+   * The provided value cannot be null, as less than null has no meaning.
+   * Records with null values will be dropped.
+   * For example:
+   *   gt(column, 7) will keep all records whose value is greater than (but not equal to) 7, and not null.
+   */
+  public static <T extends Comparable<T>, C extends Column<T> & SupportsLtGt> Gt<T> gt(C column, T value) {
+    return new Gt<T>(column, value);
+  }
+
+  /**
+   * Keeps records if their value is greater than or equal to the provided value.
+   * The provided value cannot be null, as less than null has no meaning.
+   * Records with null values will be dropped.
+   * For example:
+   *   gtEq(column, 7) will keep all records whose value is greater than or equal to 7, and not null.
+   */
+  public static <T extends Comparable<T>, C extends Column<T> & SupportsLtGt> GtEq<T> gtEq(C column, T value) {
+    return new GtEq<T>(column, value);
+  }
+
+  /**
+   * Keeps records that pass the provided {@link UserDefinedPredicate}
+   */
+  public static <T extends Comparable<T>, U extends UserDefinedPredicate<T>>
+    UserDefined<T, U> userDefined(Column<T> column, Class<U> clazz) {
+    return new UserDefined<T, U>(column, clazz);
+  }
+
+  /**
+   * Constructs the logical and of two predicates. Records will be kept if both the left and right predicate agree
+   * that the record should be kept.
+   */
+  public static FilterPredicate and(FilterPredicate left, FilterPredicate right) {
+    return new And(left, right);
+  }
+
+  /**
+   * Constructs the logical or of two predicates. Records will be kept if either the left or right predicate
+   * is satisfied (or both).
+   */
+  public static FilterPredicate or(FilterPredicate left, FilterPredicate right) {
+    return new Or(left, right);
+  }
+
+  /**
+   * Constructs the logical not (or inverse) of a predicate.
+   * Records will be kept if the provided predicate is not satisfied.
+   */
+  public static FilterPredicate not(FilterPredicate predicate) {
+    return new Not(predicate);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/predicate/FilterPredicate.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/predicate/FilterPredicate.java b/parquet-column/src/main/java/parquet/filter2/predicate/FilterPredicate.java
new file mode 100644
index 0000000..9cdaabe
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/predicate/FilterPredicate.java
@@ -0,0 +1,54 @@
+package parquet.filter2.predicate;
+
+import parquet.filter2.predicate.Operators.And;
+import parquet.filter2.predicate.Operators.Eq;
+import parquet.filter2.predicate.Operators.Gt;
+import parquet.filter2.predicate.Operators.GtEq;
+import parquet.filter2.predicate.Operators.LogicalNotUserDefined;
+import parquet.filter2.predicate.Operators.Lt;
+import parquet.filter2.predicate.Operators.LtEq;
+import parquet.filter2.predicate.Operators.Not;
+import parquet.filter2.predicate.Operators.NotEq;
+import parquet.filter2.predicate.Operators.Or;
+import parquet.filter2.predicate.Operators.UserDefined;
+
+/**
+ * A FilterPredicate is an expression tree describing the criteria for which records to keep when loading data from
+ * a parquet file. These predicates are applied in multiple places. Currently, they are applied to all row groups at
+ * job submission time to see if we can potentially drop entire row groups, and then they are applied during column
+ * assembly to drop individual records that are not wanted.
+ *
+ * FilterPredicates do not contain closures or instances of anonymous classes, rather they are expressed as
+ * an expression tree of operators.
+ *
+ * FilterPredicates are implemented in terms of the visitor pattern.
+ *
+ * See {@link Operators} for the implementation of the operator tokens,
+ * and {@link FilterApi} for the dsl functions for constructing an expression tree.
+ */
+public interface FilterPredicate {
+
+  /**
+   * A FilterPredicate must accept a Visitor, per the visitor pattern.
+   */
+  <R> R accept(Visitor<R> visitor);
+
+  /**
+   * A FilterPredicate Visitor must visit all the operators in a FilterPredicate expression tree,
+   * and must handle recursion itself, per the visitor pattern.
+   */
+  public static interface Visitor<R> {
+    <T extends Comparable<T>> R visit(Eq<T> eq);
+    <T extends Comparable<T>> R visit(NotEq<T> notEq);
+    <T extends Comparable<T>> R visit(Lt<T> lt);
+    <T extends Comparable<T>> R visit(LtEq<T> ltEq);
+    <T extends Comparable<T>> R visit(Gt<T> gt);
+    <T extends Comparable<T>> R visit(GtEq<T> gtEq);
+    R visit(And and);
+    R visit(Or or);
+    R visit(Not not);
+    <T extends Comparable<T>, U extends UserDefinedPredicate<T>> R visit(UserDefined<T, U> udp);
+    <T extends Comparable<T>, U extends UserDefinedPredicate<T>> R visit(LogicalNotUserDefined<T, U> udp);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/predicate/LogicalInverseRewriter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/predicate/LogicalInverseRewriter.java b/parquet-column/src/main/java/parquet/filter2/predicate/LogicalInverseRewriter.java
new file mode 100644
index 0000000..4151bff
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/predicate/LogicalInverseRewriter.java
@@ -0,0 +1,95 @@
+package parquet.filter2.predicate;
+
+import parquet.filter2.predicate.FilterPredicate.Visitor;
+import parquet.filter2.predicate.Operators.And;
+import parquet.filter2.predicate.Operators.Eq;
+import parquet.filter2.predicate.Operators.Gt;
+import parquet.filter2.predicate.Operators.GtEq;
+import parquet.filter2.predicate.Operators.LogicalNotUserDefined;
+import parquet.filter2.predicate.Operators.Lt;
+import parquet.filter2.predicate.Operators.LtEq;
+import parquet.filter2.predicate.Operators.Not;
+import parquet.filter2.predicate.Operators.NotEq;
+import parquet.filter2.predicate.Operators.Or;
+import parquet.filter2.predicate.Operators.UserDefined;
+
+import static parquet.Preconditions.checkNotNull;
+import static parquet.filter2.predicate.FilterApi.and;
+import static parquet.filter2.predicate.FilterApi.or;
+
+/**
+ * Recursively removes all use of the not() operator in a predicate
+ * by replacing all instances of not(x) with the inverse(x),
+ * eg: not(and(eq(), not(eq(y))) -> or(notEq(), eq(y))
+ *
+ * The returned predicate should have the same meaning as the original, but
+ * without the use of the not() operator.
+ *
+ * See also {@link LogicalInverter}, which is used
+ * to do the inversion.
+ */
+public final class LogicalInverseRewriter implements Visitor<FilterPredicate> {
+  private static final LogicalInverseRewriter INSTANCE = new LogicalInverseRewriter();
+
+  public static FilterPredicate rewrite(FilterPredicate pred) {
+    checkNotNull(pred, "pred");
+    return pred.accept(INSTANCE);
+  }
+
+  private LogicalInverseRewriter() { }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(Eq<T> eq) {
+    return eq;
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(NotEq<T> notEq) {
+    return notEq;
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(Lt<T> lt) {
+    return lt;
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(LtEq<T> ltEq) {
+    return ltEq;
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(Gt<T> gt) {
+    return gt;
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(GtEq<T> gtEq) {
+    return gtEq;
+  }
+
+  @Override
+  public FilterPredicate visit(And and) {
+    return and(and.getLeft().accept(this), and.getRight().accept(this));
+  }
+
+  @Override
+  public FilterPredicate visit(Or or) {
+    return or(or.getLeft().accept(this), or.getRight().accept(this));
+  }
+
+  @Override
+  public FilterPredicate visit(Not not) {
+    return LogicalInverter.invert(not.getPredicate().accept(this));
+  }
+
+  @Override
+  public <T extends Comparable<T>, U extends UserDefinedPredicate<T>> FilterPredicate visit(UserDefined<T, U> udp) {
+    return udp;
+  }
+
+  @Override
+  public <T extends Comparable<T>, U extends UserDefinedPredicate<T>> FilterPredicate visit(LogicalNotUserDefined<T, U> udp) {
+    return udp;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/predicate/LogicalInverter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/predicate/LogicalInverter.java b/parquet-column/src/main/java/parquet/filter2/predicate/LogicalInverter.java
new file mode 100644
index 0000000..15b0715
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/predicate/LogicalInverter.java
@@ -0,0 +1,90 @@
+package parquet.filter2.predicate;
+
+import parquet.filter2.predicate.FilterPredicate.Visitor;
+import parquet.filter2.predicate.Operators.And;
+import parquet.filter2.predicate.Operators.Eq;
+import parquet.filter2.predicate.Operators.Gt;
+import parquet.filter2.predicate.Operators.GtEq;
+import parquet.filter2.predicate.Operators.LogicalNotUserDefined;
+import parquet.filter2.predicate.Operators.Lt;
+import parquet.filter2.predicate.Operators.LtEq;
+import parquet.filter2.predicate.Operators.Not;
+import parquet.filter2.predicate.Operators.NotEq;
+import parquet.filter2.predicate.Operators.Or;
+import parquet.filter2.predicate.Operators.UserDefined;
+
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * Converts a {@link FilterPredicate} to its logical inverse.
+ * The returned predicate should be equivalent to not(p), but without
+ * the use of a not() operator.
+ *
+ * See also {@link LogicalInverseRewriter}, which can remove the use
+ * of all not() operators without inverting the overall predicate.
+ */
+public final class LogicalInverter implements Visitor<FilterPredicate> {
+  private static final LogicalInverter INSTANCE = new LogicalInverter();
+
+  public static FilterPredicate invert(FilterPredicate pred) {
+    checkNotNull(pred, "pred");
+    return pred.accept(INSTANCE);
+  }
+
+  private LogicalInverter() {}
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(Eq<T> eq) {
+    return new NotEq<T>(eq.getColumn(), eq.getValue());
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(NotEq<T> notEq) {
+    return new Eq<T>(notEq.getColumn(), notEq.getValue());
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(Lt<T> lt) {
+    return new GtEq<T>(lt.getColumn(), lt.getValue());
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(LtEq<T> ltEq) {
+    return new Gt<T>(ltEq.getColumn(), ltEq.getValue());
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(Gt<T> gt) {
+    return new LtEq<T>(gt.getColumn(), gt.getValue());
+  }
+
+  @Override
+  public <T extends Comparable<T>> FilterPredicate visit(GtEq<T> gtEq) {
+    return new Lt<T>(gtEq.getColumn(), gtEq.getValue());
+  }
+
+  @Override
+  public FilterPredicate visit(And and) {
+    return new Or(and.getLeft().accept(this), and.getRight().accept(this));
+  }
+
+  @Override
+  public FilterPredicate visit(Or or) {
+    return new And(or.getLeft().accept(this), or.getRight().accept(this));
+  }
+
+  @Override
+  public FilterPredicate visit(Not not) {
+    return not.getPredicate();
+  }
+
+  @Override
+  public <T extends Comparable<T>,  U extends UserDefinedPredicate<T>> FilterPredicate visit(UserDefined<T, U> udp) {
+    return new LogicalNotUserDefined<T, U>(udp);
+  }
+
+  @Override
+  public <T extends Comparable<T>,  U extends UserDefinedPredicate<T>> FilterPredicate visit(LogicalNotUserDefined<T, U> udp) {
+    return udp.getUserDefined();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/predicate/Operators.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/predicate/Operators.java b/parquet-column/src/main/java/parquet/filter2/predicate/Operators.java
new file mode 100644
index 0000000..5d13f8c
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/predicate/Operators.java
@@ -0,0 +1,455 @@
+package parquet.filter2.predicate;
+
+import java.io.Serializable;
+
+import parquet.common.schema.ColumnPath;
+import parquet.io.api.Binary;
+
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * These are the operators in a filter predicate expression tree.
+ * They are constructed by using the methods in {@link FilterApi}
+ */
+public final class Operators {
+  private Operators() { }
+
+  public static abstract class Column<T extends Comparable<T>> implements Serializable {
+    private final ColumnPath columnPath;
+    private final Class<T> columnType;
+
+    protected Column(ColumnPath columnPath, Class<T> columnType) {
+      checkNotNull(columnPath, "columnPath");
+      checkNotNull(columnType, "columnType");
+      this.columnPath = columnPath;
+      this.columnType = columnType;
+    }
+
+    public Class<T> getColumnType() {
+      return columnType;
+    }
+
+    public ColumnPath getColumnPath() {
+      return columnPath;
+    }
+
+    @Override
+    public String toString() {
+      return "column(" + columnPath.toDotString() + ")";
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      Column column = (Column) o;
+
+      if (!columnType.equals(column.columnType)) return false;
+      if (!columnPath.equals(column.columnPath)) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = columnPath.hashCode();
+      result = 31 * result + columnType.hashCode();
+      return result;
+    }
+  }
+
+  public static interface SupportsEqNotEq { } // marker for columns that can be used with eq() and notEq()
+  public static interface SupportsLtGt extends SupportsEqNotEq { } // marker for columns that can be used with lt(), ltEq(), gt(), gtEq()
+
+  public static final class IntColumn extends Column<Integer> implements SupportsLtGt {
+    IntColumn(ColumnPath columnPath) {
+      super(columnPath, Integer.class);
+    }
+  }
+
+  public static final class LongColumn extends Column<Long> implements SupportsLtGt {
+    LongColumn(ColumnPath columnPath) {
+      super(columnPath, Long.class);
+    }
+  }
+
+  public static final class DoubleColumn extends Column<Double> implements SupportsLtGt {
+    DoubleColumn(ColumnPath columnPath) {
+      super(columnPath, Double.class);
+    }
+  }
+
+  public static final class FloatColumn extends Column<Float> implements SupportsLtGt {
+    FloatColumn(ColumnPath columnPath) {
+      super(columnPath, Float.class);
+    }
+  }
+
+  public static final class BooleanColumn extends Column<Boolean> implements SupportsEqNotEq {
+    BooleanColumn(ColumnPath columnPath) {
+      super(columnPath, Boolean.class);
+    }
+  }
+
+  public static final class BinaryColumn extends Column<Binary> implements SupportsLtGt {
+    BinaryColumn(ColumnPath columnPath) {
+      super(columnPath, Binary.class);
+    }
+  }
+
+  // base class for Eq, NotEq, Lt, Gt, LtEq, GtEq
+  static abstract class ColumnFilterPredicate<T extends Comparable<T>> implements FilterPredicate, Serializable  {
+    private final Column<T> column;
+    private final T value;
+    private final String toString;
+
+    protected ColumnFilterPredicate(Column<T> column, T value) {
+      this.column = checkNotNull(column, "column");
+
+      // Eq and NotEq allow value to be null, Lt, Gt, LtEq, GtEq however do not, so they guard against
+      // null in their own constructors.
+      this.value = value;
+
+      String name = getClass().getSimpleName().toLowerCase();
+      this.toString = name + "(" + column.getColumnPath().toDotString() + ", " + value + ")";
+    }
+
+    public Column<T> getColumn() {
+      return column;
+    }
+
+    public T getValue() {
+      return value;
+    }
+
+    @Override
+    public String toString() {
+      return toString;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      ColumnFilterPredicate that = (ColumnFilterPredicate) o;
+
+      if (!column.equals(that.column)) return false;
+      if (value != null ? !value.equals(that.value) : that.value != null) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = column.hashCode();
+      result = 31 * result + (value != null ? value.hashCode() : 0);
+      result = 31 * result + getClass().hashCode();
+      return result;
+    }
+  }
+
+  public static final class Eq<T extends Comparable<T>> extends ColumnFilterPredicate<T> {
+
+    // value can be null
+    Eq(Column<T> column, T value) {
+      super(column, value);
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+
+  }
+
+  public static final class NotEq<T extends Comparable<T>> extends ColumnFilterPredicate<T> {
+
+    // value can be null
+    NotEq(Column<T> column, T value) {
+      super(column, value);
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+
+  public static final class Lt<T extends Comparable<T>> extends ColumnFilterPredicate<T> {
+
+    // value cannot be null
+    Lt(Column<T> column, T value) {
+      super(column, checkNotNull(value, "value"));
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  public static final class LtEq<T extends Comparable<T>> extends ColumnFilterPredicate<T> {
+
+    // value cannot be null
+    LtEq(Column<T> column, T value) {
+      super(column, checkNotNull(value, "value"));
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+
+  public static final class Gt<T extends Comparable<T>> extends ColumnFilterPredicate<T> {
+
+    // value cannot be null
+    Gt(Column<T> column, T value) {
+      super(column, checkNotNull(value, "value"));
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  public static final class GtEq<T extends Comparable<T>> extends ColumnFilterPredicate<T> {
+
+    // value cannot be null
+    GtEq(Column<T> column, T value) {
+      super(column, checkNotNull(value, "value"));
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  // base class for And, Or
+  private static abstract class BinaryLogicalFilterPredicate implements FilterPredicate, Serializable {
+    private final FilterPredicate left;
+    private final FilterPredicate right;
+    private final String toString;
+
+    protected BinaryLogicalFilterPredicate(FilterPredicate left, FilterPredicate right) {
+      this.left = checkNotNull(left, "left");
+      this.right = checkNotNull(right, "right");
+      String name = getClass().getSimpleName().toLowerCase();
+      this.toString = name + "(" + left + ", " + right + ")";
+    }
+
+    public FilterPredicate getLeft() {
+      return left;
+    }
+
+    public FilterPredicate getRight() {
+      return right;
+    }
+
+    @Override
+    public String toString() {
+      return toString;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      BinaryLogicalFilterPredicate that = (BinaryLogicalFilterPredicate) o;
+
+      if (!left.equals(that.left)) return false;
+      if (!right.equals(that.right)) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = left.hashCode();
+      result = 31 * result + right.hashCode();
+      result = 31 * result + getClass().hashCode();
+      return result;
+    }
+  }
+
+  public static final class And extends BinaryLogicalFilterPredicate {
+
+    And(FilterPredicate left, FilterPredicate right) {
+      super(left, right);
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  public static final class Or extends BinaryLogicalFilterPredicate {
+
+    Or(FilterPredicate left, FilterPredicate right) {
+      super(left, right);
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  public static class Not implements FilterPredicate, Serializable {
+    private final FilterPredicate predicate;
+    private final String toString;
+
+    Not(FilterPredicate predicate) {
+      this.predicate = checkNotNull(predicate, "predicate");
+      this.toString = "not(" + predicate + ")";
+    }
+
+    public FilterPredicate getPredicate() {
+      return predicate;
+    }
+
+    @Override
+    public String toString() {
+      return toString;
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+      Not not = (Not) o;
+      return predicate.equals(not.predicate);
+    }
+
+    @Override
+    public int hashCode() {
+      return predicate.hashCode() * 31 + getClass().hashCode();
+    }
+  }
+
+  public static final class UserDefined<T extends Comparable<T>, U extends UserDefinedPredicate<T>> implements FilterPredicate, Serializable {
+    private final Column<T> column;
+    private final Class<U> udpClass;
+    private final String toString;
+    private static final String INSTANTIATION_ERROR_MESSAGE =
+        "Could not instantiate custom filter: %s. User defined predicates must be static classes with a default constructor.";
+
+    UserDefined(Column<T> column, Class<U> udpClass) {
+      this.column = checkNotNull(column, "column");
+      this.udpClass = checkNotNull(udpClass, "udpClass");
+      String name = getClass().getSimpleName().toLowerCase();
+      this.toString = name + "(" + column.getColumnPath().toDotString() + ", " + udpClass.getName() + ")";
+
+      // defensively try to instantiate the class early to make sure that it's possible
+      getUserDefinedPredicate();
+    }
+
+    public Column<T> getColumn() {
+      return column;
+    }
+
+    public Class<U> getUserDefinedPredicateClass() {
+      return udpClass;
+    }
+
+    public U getUserDefinedPredicate() {
+      try {
+        return udpClass.newInstance();
+      } catch (InstantiationException e) {
+        throw new RuntimeException(String.format(INSTANTIATION_ERROR_MESSAGE, udpClass), e);
+      } catch (IllegalAccessException e) {
+        throw new RuntimeException(String.format(INSTANTIATION_ERROR_MESSAGE, udpClass), e);
+      }
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+
+    @Override
+    public String toString() {
+      return toString;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      UserDefined that = (UserDefined) o;
+
+      if (!column.equals(that.column)) return false;
+      if (!udpClass.equals(that.udpClass)) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = column.hashCode();
+      result = 31 * result + udpClass.hashCode();
+      result = result * 31 + getClass().hashCode();
+      return result;
+    }
+  }
+
+  // Represents the inverse of a UserDefined. It is equivalent to not(userDefined), without the use
+  // of the not() operator
+  public static final class LogicalNotUserDefined <T extends Comparable<T>, U extends UserDefinedPredicate<T>> implements FilterPredicate, Serializable {
+    private final UserDefined<T, U> udp;
+    private final String toString;
+
+    LogicalNotUserDefined(UserDefined<T, U> userDefined) {
+      this.udp = checkNotNull(userDefined, "userDefined");
+      this.toString = "inverted(" + udp + ")";
+    }
+
+    public UserDefined<T, U> getUserDefined() {
+      return udp;
+    }
+
+    @Override
+    public <R> R accept(Visitor<R> visitor) {
+      return visitor.visit(this);
+    }
+
+    @Override
+    public String toString() {
+      return toString;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      LogicalNotUserDefined that = (LogicalNotUserDefined) o;
+
+      if (!udp.equals(that.udp)) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = udp.hashCode();
+      result = result * 31 + getClass().hashCode();
+      return result;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/predicate/SchemaCompatibilityValidator.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/predicate/SchemaCompatibilityValidator.java b/parquet-column/src/main/java/parquet/filter2/predicate/SchemaCompatibilityValidator.java
new file mode 100644
index 0000000..da0e122
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/predicate/SchemaCompatibilityValidator.java
@@ -0,0 +1,172 @@
+package parquet.filter2.predicate;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import parquet.column.ColumnDescriptor;
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.predicate.Operators.And;
+import parquet.filter2.predicate.Operators.Column;
+import parquet.filter2.predicate.Operators.ColumnFilterPredicate;
+import parquet.filter2.predicate.Operators.Eq;
+import parquet.filter2.predicate.Operators.Gt;
+import parquet.filter2.predicate.Operators.GtEq;
+import parquet.filter2.predicate.Operators.LogicalNotUserDefined;
+import parquet.filter2.predicate.Operators.Lt;
+import parquet.filter2.predicate.Operators.LtEq;
+import parquet.filter2.predicate.Operators.Not;
+import parquet.filter2.predicate.Operators.NotEq;
+import parquet.filter2.predicate.Operators.Or;
+import parquet.filter2.predicate.Operators.UserDefined;
+import parquet.schema.MessageType;
+import parquet.schema.OriginalType;
+
+import static parquet.Preconditions.checkArgument;
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * Inspects the column types found in the provided {@link FilterPredicate} and compares them
+ * to the actual schema found in the parquet file. If the provided predicate's types are
+ * not consistent with the file schema, and IllegalArgumentException is thrown.
+ *
+ * Ideally, all this would be checked at compile time, and this class wouldn't be needed.
+ * If we can come up with a way to do that, we should.
+ *
+ * This class is stateful, cannot be reused, and is not thread safe.
+ *
+ * TODO: detect if a column is optional or required and validate that eq(null)
+ * TODO: is not called on required fields (is that too strict?)
+ * TODO: (https://issues.apache.org/jira/browse/PARQUET-44)
+ */
+public class SchemaCompatibilityValidator implements FilterPredicate.Visitor<Void> {
+
+  public static void validate(FilterPredicate predicate, MessageType schema) {
+    checkNotNull(predicate, "predicate");
+    checkNotNull(schema, "schema");
+    predicate.accept(new SchemaCompatibilityValidator(schema));
+  }
+
+  // A map of column name to the type the user supplied for this column.
+  // Used to validate that the user did not provide different types for the same
+  // column.
+  private final Map<ColumnPath, Class<?>> columnTypesEncountered = new HashMap<ColumnPath, Class<?>>();
+
+  // the columns (keyed by path) according to the file's schema. This is the source of truth, and
+  // we are validating that what the user provided agrees with these.
+  private final Map<ColumnPath, ColumnDescriptor> columnsAccordingToSchema = new HashMap<ColumnPath, ColumnDescriptor>();
+
+  // the original type of a column, keyed by path
+  private final Map<ColumnPath, OriginalType> originalTypes = new HashMap<ColumnPath, OriginalType>();
+
+  private SchemaCompatibilityValidator(MessageType schema) {
+
+    for (ColumnDescriptor cd : schema.getColumns()) {
+      ColumnPath columnPath = ColumnPath.get(cd.getPath());
+      columnsAccordingToSchema.put(columnPath, cd);
+
+      OriginalType ot = schema.getType(cd.getPath()).getOriginalType();
+      if (ot != null) {
+        originalTypes.put(columnPath, ot);
+      }
+    }
+  }
+
+  @Override
+  public <T extends Comparable<T>> Void visit(Eq<T> pred) {
+    validateColumnFilterPredicate(pred);
+    return null;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Void visit(NotEq<T> pred) {
+    validateColumnFilterPredicate(pred);
+    return null;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Void visit(Lt<T> pred) {
+    validateColumnFilterPredicate(pred);
+    return null;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Void visit(LtEq<T> pred) {
+    validateColumnFilterPredicate(pred);
+    return null;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Void visit(Gt<T> pred) {
+    validateColumnFilterPredicate(pred);
+    return null;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Void visit(GtEq<T> pred) {
+    validateColumnFilterPredicate(pred);
+    return null;
+  }
+
+  @Override
+  public Void visit(And and) {
+    and.getLeft().accept(this);
+    and.getRight().accept(this);
+    return null;
+  }
+
+  @Override
+  public Void visit(Or or) {
+    or.getLeft().accept(this);
+    or.getRight().accept(this);
+    return null;
+  }
+
+  @Override
+  public Void visit(Not not) {
+    not.getPredicate().accept(this);
+    return null;
+  }
+
+  @Override
+  public <T extends Comparable<T>, U extends UserDefinedPredicate<T>> Void visit(UserDefined<T, U> udp) {
+    validateColumn(udp.getColumn());
+    return null;
+  }
+
+  @Override
+  public <T extends Comparable<T>, U extends UserDefinedPredicate<T>> Void visit(LogicalNotUserDefined<T, U> udp) {
+    return udp.getUserDefined().accept(this);
+  }
+
+  private <T extends Comparable<T>> void validateColumnFilterPredicate(ColumnFilterPredicate<T> pred) {
+    validateColumn(pred.getColumn());
+  }
+
+  private <T extends Comparable<T>> void validateColumn(Column<T> column) {
+    ColumnPath path = column.getColumnPath();
+
+    Class<?> alreadySeen = columnTypesEncountered.get(path);
+    if (alreadySeen != null && !alreadySeen.equals(column.getColumnType())) {
+      throw new IllegalArgumentException("Column: "
+          + path.toDotString()
+          + " was provided with different types in the same predicate."
+          + " Found both: (" + alreadySeen + ", " + column.getColumnType() + ")");
+    }
+    columnTypesEncountered.put(path, column.getColumnType());
+
+    ColumnDescriptor descriptor = getColumnDescriptor(path);
+
+    if (descriptor.getMaxRepetitionLevel() > 0) {
+      throw new IllegalArgumentException("FilterPredicates do not currently support repeated columns. "
+          + "Column " + path.toDotString() + " is repeated.");
+    }
+
+    ValidTypeMap.assertTypeValid(column, descriptor.getType(), originalTypes.get(path));
+  }
+
+  private ColumnDescriptor getColumnDescriptor(ColumnPath columnPath) {
+    ColumnDescriptor cd = columnsAccordingToSchema.get(columnPath);
+    checkArgument(cd != null, "Column " + columnPath + " was not found in schema!");
+    return cd;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/predicate/Statistics.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/predicate/Statistics.java b/parquet-column/src/main/java/parquet/filter2/predicate/Statistics.java
new file mode 100644
index 0000000..408bc54
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/predicate/Statistics.java
@@ -0,0 +1,24 @@
+package parquet.filter2.predicate;
+
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * Contains statistics about a group of records
+ */
+public class Statistics<T> {
+  private final T min;
+  private final T max;
+
+  public Statistics(T min, T max) {
+    this.min = checkNotNull(min, "min");
+    this.max = checkNotNull(max, "max");
+  }
+
+  public T getMin() {
+    return min;
+  }
+
+  public T getMax() {
+    return max;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/predicate/UserDefinedPredicate.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/predicate/UserDefinedPredicate.java b/parquet-column/src/main/java/parquet/filter2/predicate/UserDefinedPredicate.java
new file mode 100644
index 0000000..99f6c76
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/predicate/UserDefinedPredicate.java
@@ -0,0 +1,90 @@
+package parquet.filter2.predicate;
+
+/**
+ * A UserDefinedPredicate decides whether a record should be kept or dropped, first by
+ * inspecting meta data about a group of records to see if the entire group can be dropped,
+ * then by inspecting actual values of a single column. These predicates can be combined into
+ * a complex boolean expression via the {@link FilterApi}.
+ *
+ * @param <T> The type of the column this predicate is applied to.
+ */
+// TODO: consider avoiding autoboxing and adding the specialized methods for each type
+// TODO: downside is that's fairly unwieldy for users
+public abstract class UserDefinedPredicate<T extends Comparable<T>> {
+
+  /**
+   * A udp must have a default constructor.
+   * The udp passed to {@link FilterApi} will not be serialized along with its state.
+   * Only its class name will be recorded, it will be instantiated reflectively via the default
+   * constructor.
+   */
+  public UserDefinedPredicate() { }
+
+  /**
+   * Return true to keep the record with this value, false to drop it.
+   */
+  public abstract boolean keep(T value);
+
+  /**
+   * Given information about a group of records (eg, the min and max value)
+   * Return true to drop all the records in this group, false to keep them for further
+   * inspection. Returning false here will cause the records to be loaded and each value
+   * will be passed to {@link #keep} to make the final decision.
+   *
+   * It is safe to always return false here, if you simply want to visit each record via the {@link #keep} method,
+   * though it is much more efficient to drop entire chunks of records here if you can.
+   */
+  public abstract boolean canDrop(Statistics<T> statistics);
+
+  /**
+   * Same as {@link #canDrop} except this method describes the logical inverse
+   * behavior of this predicate. If this predicate is passed to the not() operator, then
+   * {@link #inverseCanDrop} will be called instead of {@link #canDrop}
+   *
+   * It is safe to always return false here, if you simply want to visit each record via the {@link #keep} method,
+   * though it is much more efficient to drop entire chunks of records here if you can.
+   *
+   * It may be valid to simply return !canDrop(statistics) but that is not always the case.
+   * To illustrate, look at this re-implementation of a UDP that checks for values greater than 7:
+   *
+   * {@code 
+   * 
+   * // This is just an example, you should use the built in {@link FilterApi#gt} operator instead of
+   * // implementing your own like this.
+   *  
+   * public class IntGreaterThan7UDP extends UserDefinedPredicate<Integer> {
+   *   @Override
+   *   public boolean keep(Integer value) {
+   *     // here we just check if the value is greater than 7.
+   *     // here, parquet knows that if the predicate not(columnX, IntGreaterThan7UDP) is being evaluated,
+   *     // it is safe to simply use !IntEquals7UDP.keep(value)
+   *     return value > 7;
+   *   }
+   * 
+   *   @Override
+   *   public boolean canDrop(Statistics<Integer> statistics) {
+   *     // here we drop a group of records if they are all less than or equal to 7,
+   *     // (there can't possibly be any values greater than 7 in this group of records)
+   *     return statistics.getMax() <= 7;
+   *   }
+   * 
+   *   @Override
+   *   public boolean inverseCanDrop(Statistics<Integer> statistics) {
+   *     // here the predicate not(columnX, IntGreaterThan7UDP) is being evaluated, which means we want
+   *     // to keep all records whose value is is not greater than 7, or, rephrased, whose value is less than or equal to 7.
+   *     // notice what would happen if parquet just tried to evaluate !IntGreaterThan7UDP.canDrop():
+   *     // !IntGreaterThan7UDP.canDrop(stats) == !(stats.getMax() <= 7) == (stats.getMax() > 7)
+   *     // it would drop the following group of records: [100, 1, 2, 3], even though this group of records contains values
+   *     // less than than or equal to 7.
+   * 
+   *     // what we actually want to do is drop groups of records where the *min* is greater than 7, (not the max)
+   *     // for example: the group of records: [100, 8, 9, 10] has a min of 8, so there's no way there are going
+   *     // to be records with a value
+   *     // less than or equal to 7 in this group.
+   *     return statistics.getMin() > 7;
+   *   }
+   * }
+   * }
+   */
+  public abstract boolean inverseCanDrop(Statistics<T> statistics);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/predicate/ValidTypeMap.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/predicate/ValidTypeMap.java b/parquet-column/src/main/java/parquet/filter2/predicate/ValidTypeMap.java
new file mode 100644
index 0000000..6d216c3
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/predicate/ValidTypeMap.java
@@ -0,0 +1,160 @@
+package parquet.filter2.predicate;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.predicate.Operators.Column;
+import parquet.io.api.Binary;
+import parquet.schema.OriginalType;
+import parquet.schema.PrimitiveType.PrimitiveTypeName;
+
+/**
+ * Contains all valid mappings from class -> parquet type (and vice versa) for use in
+ * {@link FilterPredicate}s
+ *
+ * This is a bit ugly, but it allows us to provide good error messages at runtime
+ * when there are type mismatches.
+ *
+ * TODO: this has some overlap with {@link PrimitiveTypeName#javaType}
+ * TODO: (https://issues.apache.org/jira/browse/PARQUET-30)
+ */
+public class ValidTypeMap {
+  private ValidTypeMap() { }
+
+  // classToParquetType and parquetTypeToClass are used as a bi-directional map
+  private static final Map<Class<?>, Set<FullTypeDescriptor>> classToParquetType = new HashMap<Class<?>, Set<FullTypeDescriptor>>();
+  private static final Map<FullTypeDescriptor, Set<Class<?>>> parquetTypeToClass = new HashMap<FullTypeDescriptor, Set<Class<?>>>();
+
+  // set up the mapping in both directions
+  private static void add(Class<?> c, FullTypeDescriptor f) {
+    Set<FullTypeDescriptor> descriptors = classToParquetType.get(c);
+    if (descriptors == null) {
+      descriptors = new HashSet<FullTypeDescriptor>();
+      classToParquetType.put(c, descriptors);
+    }
+    descriptors.add(f);
+
+    Set<Class<?>> classes = parquetTypeToClass.get(f);
+    if (classes == null) {
+      classes = new HashSet<Class<?>>();
+      parquetTypeToClass.put(f, classes);
+    }
+    classes.add(c);
+  }
+
+  static {
+    // basic primitive columns
+    add(Integer.class, new FullTypeDescriptor(PrimitiveTypeName.INT32, null));
+    add(Long.class, new FullTypeDescriptor(PrimitiveTypeName.INT64, null));
+    add(Float.class, new FullTypeDescriptor(PrimitiveTypeName.FLOAT, null));
+    add(Double.class, new FullTypeDescriptor(PrimitiveTypeName.DOUBLE, null));
+    add(Boolean.class, new FullTypeDescriptor(PrimitiveTypeName.BOOLEAN, null));
+
+    // Both of these binary types are valid
+    add(Binary.class, new FullTypeDescriptor(PrimitiveTypeName.BINARY, null));
+    add(Binary.class, new FullTypeDescriptor(PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY, null));
+
+    add(Binary.class, new FullTypeDescriptor(PrimitiveTypeName.BINARY, OriginalType.UTF8));
+    add(Binary.class, new FullTypeDescriptor(PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY, OriginalType.UTF8));
+  }
+
+  /**
+   * Asserts that foundColumn was declared as a type that is compatible with the type for this column found
+   * in the schema of the parquet file.
+   *
+   * @throws java.lang.IllegalArgumentException if the types do not align
+   *
+   * @param foundColumn the column as declared by the user
+   * @param primitiveType the primitive type according to the schema
+   * @param originalType the original type according to the schema
+   */
+  public static <T extends Comparable<T>> void assertTypeValid(Column<T> foundColumn, PrimitiveTypeName primitiveType, OriginalType originalType) {
+    Class<T> foundColumnType = foundColumn.getColumnType();
+    ColumnPath columnPath = foundColumn.getColumnPath();
+
+    Set<FullTypeDescriptor> validTypeDescriptors = classToParquetType.get(foundColumnType);
+    FullTypeDescriptor typeInFileMetaData = new FullTypeDescriptor(primitiveType, originalType);
+
+    if (validTypeDescriptors == null) {
+      StringBuilder message = new StringBuilder();
+      message
+          .append("Column ")
+          .append(columnPath.toDotString())
+          .append(" was declared as type: ")
+          .append(foundColumnType.getName())
+          .append(" which is not supported in FilterPredicates.");
+
+      Set<Class<?>> supportedTypes = parquetTypeToClass.get(typeInFileMetaData);
+      if (supportedTypes != null) {
+        message
+          .append(" Supported types for this column are: ")
+          .append(supportedTypes);
+      } else {
+        message.append(" There are no supported types for columns of " + typeInFileMetaData);
+      }
+      throw new IllegalArgumentException(message.toString());
+    }
+
+    if (!validTypeDescriptors.contains(typeInFileMetaData)) {
+      StringBuilder message = new StringBuilder();
+      message
+          .append("FilterPredicate column: ")
+          .append(columnPath.toDotString())
+          .append("'s declared type (")
+          .append(foundColumnType.getName())
+          .append(") does not match the schema found in file metadata. Column ")
+          .append(columnPath.toDotString())
+          .append(" is of type: ")
+          .append(typeInFileMetaData)
+          .append("\nValid types for this column are: ")
+          .append(parquetTypeToClass.get(typeInFileMetaData));
+      throw new IllegalArgumentException(message.toString());
+    }
+  }
+
+  private static final class FullTypeDescriptor {
+    private final PrimitiveTypeName primitiveType;
+    private final OriginalType originalType;
+
+    private FullTypeDescriptor(PrimitiveTypeName primitiveType, OriginalType originalType) {
+      this.primitiveType = primitiveType;
+      this.originalType = originalType;
+    }
+
+    public PrimitiveTypeName getPrimitiveType() {
+      return primitiveType;
+    }
+
+    public OriginalType getOriginalType() {
+      return originalType;
+    }
+
+    @Override
+    public String toString() {
+      return "FullTypeDescriptor(" + "PrimitiveType: " + primitiveType + ", OriginalType: " + originalType + ')';
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      FullTypeDescriptor that = (FullTypeDescriptor) o;
+
+      if (originalType != that.originalType) return false;
+      if (primitiveType != that.primitiveType) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = primitiveType != null ? primitiveType.hashCode() : 0;
+      result = 31 * result + (originalType != null ? originalType.hashCode() : 0);
+      return result;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringGroupConverter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringGroupConverter.java b/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringGroupConverter.java
new file mode 100644
index 0000000..4720854
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringGroupConverter.java
@@ -0,0 +1,97 @@
+package parquet.filter2.recordlevel;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;
+import parquet.io.PrimitiveColumnIO;
+import parquet.io.api.Converter;
+import parquet.io.api.GroupConverter;
+
+import static parquet.Preconditions.checkArgument;
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * See {@link FilteringRecordMaterializer}
+ */
+public class FilteringGroupConverter extends GroupConverter {
+  // the real converter
+  private final GroupConverter delegate;
+
+  // the path, from the root of the schema, to this converter
+  // used ultimately by the primitive converter proxy to figure
+  // out which column it represents.
+  private final List<Integer> indexFieldPath;
+
+  // for a given column, which nodes in the filter expression need
+  // to be notified of this column's value
+  private final Map<ColumnPath, List<ValueInspector>> valueInspectorsByColumn;
+
+  // used to go from our indexFieldPath to the PrimitiveColumnIO for that column
+  private final Map<List<Integer>, PrimitiveColumnIO> columnIOsByIndexFieldPath;
+
+  public FilteringGroupConverter(
+      GroupConverter delegate,
+      List<Integer> indexFieldPath,
+      Map<ColumnPath, List<ValueInspector>> valueInspectorsByColumn, Map<List<Integer>,
+      PrimitiveColumnIO> columnIOsByIndexFieldPath) {
+
+    this.delegate = checkNotNull(delegate, "delegate");
+    this.indexFieldPath = checkNotNull(indexFieldPath, "indexFieldPath");
+    this.columnIOsByIndexFieldPath = checkNotNull(columnIOsByIndexFieldPath, "columnIOsByIndexFieldPath");
+    this.valueInspectorsByColumn = checkNotNull(valueInspectorsByColumn, "valueInspectorsByColumn");
+  }
+
+  // When a converter is asked for, we get the real one from the delegate, then wrap it
+  // in a filtering pass-through proxy.
+  // TODO: making the assumption that getConverter(i) is only called once, is that valid?
+  @Override
+  public Converter getConverter(int fieldIndex) {
+
+    // get the real converter from the delegate
+    Converter delegateConverter = checkNotNull(delegate.getConverter(fieldIndex), "delegate converter");
+
+    // determine the indexFieldPath for the converter proxy we're about to make, which is
+    // this converter's path + the requested fieldIndex
+    List<Integer> newIndexFieldPath = new ArrayList<Integer>(indexFieldPath.size() + 1);
+    newIndexFieldPath.addAll(indexFieldPath);
+    newIndexFieldPath.add(fieldIndex);
+
+    if (delegateConverter.isPrimitive()) {
+      PrimitiveColumnIO columnIO = getColumnIO(newIndexFieldPath);
+      ColumnPath columnPath = ColumnPath.get(columnIO.getColumnDescriptor().getPath());
+      ValueInspector[] valueInspectors = getValueInspectors(columnPath);
+      return new FilteringPrimitiveConverter(delegateConverter.asPrimitiveConverter(), valueInspectors);
+    } else {
+      return new FilteringGroupConverter(delegateConverter.asGroupConverter(), newIndexFieldPath, valueInspectorsByColumn, columnIOsByIndexFieldPath);
+    }
+
+  }
+
+  private PrimitiveColumnIO getColumnIO(List<Integer> indexFieldPath) {
+    PrimitiveColumnIO found = columnIOsByIndexFieldPath.get(indexFieldPath);
+    checkArgument(found != null, "Did not find PrimitiveColumnIO for index field path" + indexFieldPath);
+    return found;
+  }
+
+  private ValueInspector[] getValueInspectors(ColumnPath columnPath) {
+    List<ValueInspector> inspectorsList = valueInspectorsByColumn.get(columnPath);
+    if (inspectorsList == null) {
+      return new ValueInspector[] {};
+    } else {
+      return inspectorsList.toArray(new ValueInspector[inspectorsList.size()]);
+    }
+  }
+
+  @Override
+  public void start() {
+    delegate.start();
+  }
+
+  @Override
+  public void end() {
+    delegate.end();
+  }
+}


[3/4] Add a unified and optionally more constrained API for expressing filters on columns

Posted by ju...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java b/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java
new file mode 100644
index 0000000..4cdedf2
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java
@@ -0,0 +1,91 @@
+package parquet.filter2.recordlevel;
+
+import parquet.column.Dictionary;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;
+import parquet.io.api.Binary;
+import parquet.io.api.PrimitiveConverter;
+
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * see {@link FilteringRecordMaterializer}
+ *
+ * This pass-through proxy for a delegate {@link PrimitiveConverter} also
+ * updates the {@link ValueInspector}s of a {@link IncrementallyUpdatedFilterPredicate}
+ */
+public class FilteringPrimitiveConverter extends PrimitiveConverter {
+  private final PrimitiveConverter delegate;
+  private final ValueInspector[] valueInspectors;
+
+  public FilteringPrimitiveConverter(PrimitiveConverter delegate, ValueInspector[] valueInspectors) {
+    this.delegate = checkNotNull(delegate, "delegate");
+    this.valueInspectors = checkNotNull(valueInspectors, "valueInspectors");
+  }
+
+  // TODO: this works, but
+  // TODO: essentially turns off the benefits of dictionary support
+  // TODO: even if the underlying delegate supports it.
+  // TODO: we should support it here. (https://issues.apache.org/jira/browse/PARQUET-36)
+  @Override
+  public boolean hasDictionarySupport() {
+    return false;
+  }
+
+  @Override
+  public void setDictionary(Dictionary dictionary) {
+    throw new UnsupportedOperationException("FilteringPrimitiveConverter doesn't have dictionary support");
+  }
+
+  @Override
+  public void addValueFromDictionary(int dictionaryId) {
+    throw new UnsupportedOperationException("FilteringPrimitiveConverter doesn't have dictionary support");
+  }
+
+  @Override
+  public void addBinary(Binary value) {
+    for (ValueInspector valueInspector : valueInspectors) {
+      valueInspector.update(value);
+    }
+    delegate.addBinary(value);
+  }
+
+  @Override
+  public void addBoolean(boolean value) {
+    for (ValueInspector valueInspector : valueInspectors) {
+      valueInspector.update(value);
+    }
+    delegate.addBoolean(value);
+  }
+
+  @Override
+  public void addDouble(double value) {
+    for (ValueInspector valueInspector : valueInspectors) {
+      valueInspector.update(value);
+    }
+    delegate.addDouble(value);
+  }
+
+  @Override
+  public void addFloat(float value) {
+    for (ValueInspector valueInspector : valueInspectors) {
+      valueInspector.update(value);
+    }
+    delegate.addFloat(value);
+  }
+
+  @Override
+  public void addInt(int value) {
+    for (ValueInspector valueInspector : valueInspectors) {
+      valueInspector.update(value);
+    }
+    delegate.addInt(value);
+  }
+
+  @Override
+  public void addLong(long value) {
+    for (ValueInspector valueInspector : valueInspectors) {
+      valueInspector.update(value);
+    }
+    delegate.addLong(value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringRecordMaterializer.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringRecordMaterializer.java b/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringRecordMaterializer.java
new file mode 100644
index 0000000..41dd5d3
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/recordlevel/FilteringRecordMaterializer.java
@@ -0,0 +1,97 @@
+package parquet.filter2.recordlevel;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;
+import parquet.io.PrimitiveColumnIO;
+import parquet.io.api.GroupConverter;
+import parquet.io.api.RecordMaterializer;
+
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * A pass-through proxy for a {@link RecordMaterializer} that updates a {@link IncrementallyUpdatedFilterPredicate}
+ * as it receives concrete values for the current record. If, after the record assembly signals that
+ * there are no more values, the predicate indicates that this record should be dropped, {@link #getCurrentRecord()}
+ * returns null to signal that this record is being skipped.
+ * Otherwise, the record is retrieved from the delegate.
+ */
+public class FilteringRecordMaterializer<T> extends RecordMaterializer<T> {
+  // the real record materializer
+  private final RecordMaterializer<T> delegate;
+
+  // the proxied root converter
+  private final FilteringGroupConverter rootConverter;
+
+  // the predicate
+  private final IncrementallyUpdatedFilterPredicate filterPredicate;
+
+  public FilteringRecordMaterializer(
+      RecordMaterializer<T> delegate,
+      List<PrimitiveColumnIO> columnIOs,
+      Map<ColumnPath, List<ValueInspector>> valueInspectorsByColumn,
+      IncrementallyUpdatedFilterPredicate filterPredicate) {
+
+    checkNotNull(columnIOs, "columnIOs");
+    checkNotNull(valueInspectorsByColumn, "valueInspectorsByColumn");
+    this.filterPredicate = checkNotNull(filterPredicate, "filterPredicate");
+    this.delegate = checkNotNull(delegate, "delegate");
+
+    // keep track of which path of indices leads to which primitive column
+    Map<List<Integer>, PrimitiveColumnIO> columnIOsByIndexFieldPath = new HashMap<List<Integer>, PrimitiveColumnIO>();
+
+    for (PrimitiveColumnIO c : columnIOs) {
+      columnIOsByIndexFieldPath.put(getIndexFieldPathList(c), c);
+    }
+
+    // create a proxy for the delegate's root converter
+    this.rootConverter = new FilteringGroupConverter(
+        delegate.getRootConverter(), Collections.<Integer>emptyList(), valueInspectorsByColumn, columnIOsByIndexFieldPath);
+  }
+
+  public static List<Integer> getIndexFieldPathList(PrimitiveColumnIO c) {
+    return intArrayToList(c.getIndexFieldPath());
+  }
+
+  public static List<Integer> intArrayToList(int[] arr) {
+    List<Integer> list = new ArrayList<Integer>(arr.length);
+    for (int i : arr) {
+      list.add(i);
+    }
+    return list;
+  }
+
+
+
+  @Override
+  public T getCurrentRecord() {
+
+    // find out if the predicate thinks we should keep this record
+    boolean keep = IncrementallyUpdatedFilterPredicateEvaluator.evaluate(filterPredicate);
+
+    // reset the stateful predicate no matter what
+    IncrementallyUpdatedFilterPredicateResetter.reset(filterPredicate);
+
+    if (keep) {
+      return delegate.getCurrentRecord();
+    } else {
+      // signals a skip
+      return null;
+    }
+  }
+
+  @Override
+  public void skipCurrentRecord() {
+    delegate.skipCurrentRecord();
+  }
+
+  @Override
+  public GroupConverter getRootConverter() {
+    return rootConverter;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicate.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicate.java b/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicate.java
new file mode 100644
index 0000000..457f0c9
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicate.java
@@ -0,0 +1,139 @@
+package parquet.filter2.recordlevel;
+
+import parquet.io.api.Binary;
+
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * A rewritten version of a {@link parquet.filter2.predicate.FilterPredicate} which receives
+ * the values for a record's columns one by one and internally tracks whether the predicate is
+ * satisfied, unsatisfied, or unknown.
+ *
+ * This is used to apply a predicate during record assembly, without assembling a second copy of
+ * a record, and without building a stack of update events.
+ *
+ * IncrementallyUpdatedFilterPredicate is implemented via the visitor pattern, as is
+ * {@link parquet.filter2.predicate.FilterPredicate}
+ */
+public interface IncrementallyUpdatedFilterPredicate {
+
+  /**
+   * A Visitor for an {@link IncrementallyUpdatedFilterPredicate}, per the visitor pattern.
+   */
+  public static interface Visitor {
+    boolean visit(ValueInspector p);
+    boolean visit(And and);
+    boolean visit(Or or);
+  }
+
+  /**
+   * A {@link IncrementallyUpdatedFilterPredicate} must accept a {@link Visitor}, per the visitor pattern.
+   */
+  boolean accept(Visitor visitor);
+
+  /**
+   * This is the leaf node of a filter predicate. It receives the value for the primitive column it represents,
+   * and decides whether or not the predicate represented by this node is satisfied.
+   *
+   * It is stateful, and needs to be rest after use.
+   */
+  public static abstract class ValueInspector implements IncrementallyUpdatedFilterPredicate {
+    // package private constructor
+    ValueInspector() { }
+
+    private boolean result = false;
+    private boolean isKnown = false;
+
+    // these methods signal what the value is
+    public void updateNull() { throw new UnsupportedOperationException(); }
+    public void update(int value) { throw new UnsupportedOperationException(); }
+    public void update(long value) { throw new UnsupportedOperationException(); }
+    public void update(double value) { throw new UnsupportedOperationException(); }
+    public void update(float value) { throw new UnsupportedOperationException(); }
+    public void update(boolean value) { throw new UnsupportedOperationException(); }
+    public void update(Binary value) { throw new UnsupportedOperationException(); }
+
+    /**
+     * Reset to clear state and begin evaluating the next record.
+     */
+    public final void reset() {
+      isKnown = false;
+      result = false;
+    }
+
+    /**
+     * Subclasses should call this method to signal that the result of this predicate is known.
+     */
+    protected final void setResult(boolean result) {
+      if (isKnown) {
+        throw new IllegalStateException("setResult() called on a ValueInspector whose result is already known!"
+          + " Did you forget to call reset()?");
+      }
+      this.result = result;
+      this.isKnown = true;
+    }
+
+    /**
+     * Should only be called if {@link #isKnown} return true.
+     */
+    public final boolean getResult() {
+      if (!isKnown) {
+        throw new IllegalStateException("getResult() called on a ValueInspector whose result is not yet known!");
+      }
+      return result;
+    }
+
+    /**
+     * Return true if this inspector has received a value yet, false otherwise.
+     */
+    public final boolean isKnown() {
+      return isKnown;
+    }
+
+    @Override
+    public boolean accept(Visitor visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  // base class for and / or
+  static abstract class BinaryLogical implements IncrementallyUpdatedFilterPredicate {
+    private final IncrementallyUpdatedFilterPredicate left;
+    private final IncrementallyUpdatedFilterPredicate right;
+
+    BinaryLogical(IncrementallyUpdatedFilterPredicate left, IncrementallyUpdatedFilterPredicate right) {
+      this.left = checkNotNull(left, "left");
+      this.right = checkNotNull(right, "right");
+    }
+
+    public final IncrementallyUpdatedFilterPredicate getLeft() {
+      return left;
+    }
+
+    public final IncrementallyUpdatedFilterPredicate getRight() {
+      return right;
+    }
+  }
+
+  public static final class Or extends BinaryLogical {
+    Or(IncrementallyUpdatedFilterPredicate left, IncrementallyUpdatedFilterPredicate right) {
+      super(left, right);
+    }
+
+    @Override
+    public boolean accept(Visitor visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  public static final class And extends BinaryLogical {
+    And(IncrementallyUpdatedFilterPredicate left, IncrementallyUpdatedFilterPredicate right) {
+      super(left, right);
+    }
+
+    @Override
+    public boolean accept(Visitor visitor) {
+      return visitor.visit(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateBuilderBase.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateBuilderBase.java b/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateBuilderBase.java
new file mode 100644
index 0000000..9481738
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateBuilderBase.java
@@ -0,0 +1,79 @@
+package parquet.filter2.recordlevel;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.predicate.FilterPredicate;
+import parquet.filter2.predicate.FilterPredicate.Visitor;
+import parquet.filter2.predicate.Operators.And;
+import parquet.filter2.predicate.Operators.Not;
+import parquet.filter2.predicate.Operators.Or;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;
+
+import static parquet.Preconditions.checkArgument;
+
+/**
+ * The implementation of this abstract class is auto-generated by
+ * {@link parquet.filter2.IncrementallyUpdatedFilterPredicateGenerator}
+ *
+ * Constructs a {@link IncrementallyUpdatedFilterPredicate} from a {@link parquet.filter2.predicate.FilterPredicate}
+ * This is how records are filtered during record assembly. The implementation is generated in order to avoid autoboxing.
+ *
+ * Note: the supplied predicate must not contain any instances of the not() operator as this is not
+ * supported by this filter.
+ *
+ * the supplied predicate should first be run through {@link parquet.filter2.predicate.LogicalInverseRewriter} to rewrite it
+ * in a form that doesn't make use of the not() operator.
+ *
+ * the supplied predicate should also have already been run through
+ * {@link parquet.filter2.predicate.SchemaCompatibilityValidator}
+ * to make sure it is compatible with the schema of this file.
+ *
+ * TODO: UserDefinedPredicates still autobox however
+ */
+public abstract class IncrementallyUpdatedFilterPredicateBuilderBase implements Visitor<IncrementallyUpdatedFilterPredicate> {
+  private boolean built = false;
+  private final Map<ColumnPath, List<ValueInspector>> valueInspectorsByColumn = new HashMap<ColumnPath, List<ValueInspector>>();
+
+  public IncrementallyUpdatedFilterPredicateBuilderBase() { }
+
+  public final IncrementallyUpdatedFilterPredicate build(FilterPredicate pred) {
+    checkArgument(!built, "This builder has already been used");
+    IncrementallyUpdatedFilterPredicate incremental = pred.accept(this);
+    built = true;
+    return incremental;
+  }
+
+  protected final void addValueInspector(ColumnPath columnPath, ValueInspector valueInspector) {
+    List<ValueInspector> valueInspectors = valueInspectorsByColumn.get(columnPath);
+    if (valueInspectors == null) {
+      valueInspectors = new ArrayList<ValueInspector>();
+      valueInspectorsByColumn.put(columnPath, valueInspectors);
+    }
+    valueInspectors.add(valueInspector);
+  }
+
+  public Map<ColumnPath, List<ValueInspector>> getValueInspectorsByColumn() {
+    return valueInspectorsByColumn;
+  }
+
+  @Override
+  public final IncrementallyUpdatedFilterPredicate visit(And and) {
+    return new IncrementallyUpdatedFilterPredicate.And(and.getLeft().accept(this), and.getRight().accept(this));
+  }
+
+  @Override
+  public final IncrementallyUpdatedFilterPredicate visit(Or or) {
+    return new IncrementallyUpdatedFilterPredicate.Or(or.getLeft().accept(this), or.getRight().accept(this));
+  }
+
+  @Override
+  public final IncrementallyUpdatedFilterPredicate visit(Not not) {
+    throw new IllegalArgumentException(
+        "This predicate contains a not! Did you forget to run this predicate through LogicalInverseRewriter? " + not);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java b/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java
new file mode 100644
index 0000000..7536d8e
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java
@@ -0,0 +1,45 @@
+package parquet.filter2.recordlevel;
+
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.And;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.Or;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.Visitor;
+
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * Determines whether an {@link IncrementallyUpdatedFilterPredicate} is satisfied or not.
+ * This implementation makes the assumption that all {@link ValueInspector}s in an unknown state
+ * represent columns with a null value, and updates them accordingly.
+ *
+ * TODO: We could also build an evaluator that detects if enough values are known to determine the outcome
+ * TODO: of the predicate and quit the record assembly early. (https://issues.apache.org/jira/browse/PARQUET-37)
+ */
+public class IncrementallyUpdatedFilterPredicateEvaluator implements Visitor {
+  private static final IncrementallyUpdatedFilterPredicateEvaluator INSTANCE = new IncrementallyUpdatedFilterPredicateEvaluator();
+
+  public static boolean evaluate(IncrementallyUpdatedFilterPredicate pred) {
+    checkNotNull(pred, "pred");
+    return pred.accept(INSTANCE);
+  }
+
+  private IncrementallyUpdatedFilterPredicateEvaluator() {}
+
+  @Override
+  public boolean visit(ValueInspector p) {
+    if (!p.isKnown()) {
+      p.updateNull();
+    }
+    return p.getResult();
+  }
+
+  @Override
+  public boolean visit(And and) {
+    return and.getLeft().accept(this) && and.getRight().accept(this);
+  }
+
+  @Override
+  public boolean visit(Or or) {
+    return or.getLeft().accept(this) || or.getRight().accept(this);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateResetter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateResetter.java b/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateResetter.java
new file mode 100644
index 0000000..c75ef45
--- /dev/null
+++ b/parquet-column/src/main/java/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateResetter.java
@@ -0,0 +1,42 @@
+package parquet.filter2.recordlevel;
+
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.And;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.Or;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.Visitor;
+
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * Resets all the {@link ValueInspector}s in a {@link IncrementallyUpdatedFilterPredicate}.
+ */
+public final class IncrementallyUpdatedFilterPredicateResetter implements Visitor {
+  private static final IncrementallyUpdatedFilterPredicateResetter INSTANCE = new IncrementallyUpdatedFilterPredicateResetter();
+
+  public static void reset(IncrementallyUpdatedFilterPredicate pred) {
+    checkNotNull(pred, "pred");
+    pred.accept(INSTANCE);
+  }
+
+  private IncrementallyUpdatedFilterPredicateResetter() { }
+
+  @Override
+  public boolean visit(ValueInspector p) {
+    p.reset();
+    return false;
+  }
+
+  @Override
+  public boolean visit(And and) {
+    and.getLeft().accept(this);
+    and.getRight().accept(this);
+    return false;
+  }
+
+  @Override
+  public boolean visit(Or or) {
+    or.getLeft().accept(this);
+    or.getRight().accept(this);
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/io/FilteredRecordReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/io/FilteredRecordReader.java b/parquet-column/src/main/java/parquet/io/FilteredRecordReader.java
index b6239cb..a1a51c2 100644
--- a/parquet-column/src/main/java/parquet/io/FilteredRecordReader.java
+++ b/parquet-column/src/main/java/parquet/io/FilteredRecordReader.java
@@ -62,6 +62,12 @@ class FilteredRecordReader<T> extends RecordReaderImplementation<T> {
     return super.read();
   }
 
+  // FilteredRecordReader skips forwards itself, it never asks the layer above to do the skipping for it.
+  // This is different from how filtering is handled in the filter2 API
+  @Override
+  public boolean shouldSkipCurrentRecord() {
+    return false;
+  }
 
   /**
    * Skips forwards until the filter finds the first match. Returns false

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/io/MessageColumnIO.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/io/MessageColumnIO.java b/parquet-column/src/main/java/parquet/io/MessageColumnIO.java
index c1ffbe6..bc048b0 100644
--- a/parquet-column/src/main/java/parquet/io/MessageColumnIO.java
+++ b/parquet-column/src/main/java/parquet/io/MessageColumnIO.java
@@ -25,11 +25,23 @@ import parquet.column.ColumnWriter;
 import parquet.column.impl.ColumnReadStoreImpl;
 import parquet.column.page.PageReadStore;
 import parquet.filter.UnboundRecordFilter;
+import parquet.filter2.compat.FilterCompat;
+import parquet.filter2.compat.FilterCompat.Filter;
+import parquet.filter2.compat.FilterCompat.FilterPredicateCompat;
+import parquet.filter2.compat.FilterCompat.NoOpFilter;
+import parquet.filter2.compat.FilterCompat.UnboundRecordFilterCompat;
+import parquet.filter2.compat.FilterCompat.Visitor;
+import parquet.filter2.predicate.FilterPredicate;
+import parquet.filter2.recordlevel.FilteringRecordMaterializer;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicateBuilder;
 import parquet.io.api.Binary;
 import parquet.io.api.RecordConsumer;
 import parquet.io.api.RecordMaterializer;
 import parquet.schema.MessageType;
 
+import static parquet.Preconditions.checkNotNull;
+
 /**
  * Message level of the IO structure
  *
@@ -55,32 +67,74 @@ public class MessageColumnIO extends GroupColumnIO {
     return super.getColumnNames();
   }
 
-  public <T> RecordReader<T> getRecordReader(PageReadStore columns, RecordMaterializer<T> recordMaterializer) {
-    if (leaves.size() > 0) {
-      return new RecordReaderImplementation<T>(
-        this,
-        recordMaterializer,
-        validating,
-        new ColumnReadStoreImpl(columns, recordMaterializer.getRootConverter(), getType())
-      );
-    } else {
+  public <T> RecordReader<T> getRecordReader(PageReadStore columns,
+                                             RecordMaterializer<T> recordMaterializer) {
+    return getRecordReader(columns, recordMaterializer, FilterCompat.NOOP);
+  }
+
+  /**
+   * @deprecated use {@link #getRecordReader(PageReadStore, RecordMaterializer, Filter)}
+   */
+  @Deprecated
+  public <T> RecordReader<T> getRecordReader(PageReadStore columns,
+                                             RecordMaterializer<T> recordMaterializer,
+                                             UnboundRecordFilter filter) {
+    return getRecordReader(columns, recordMaterializer, FilterCompat.get(filter));
+  }
+
+  public <T> RecordReader<T> getRecordReader(final PageReadStore columns,
+                                             final RecordMaterializer<T> recordMaterializer,
+                                             final Filter filter) {
+    checkNotNull(columns, "columns");
+    checkNotNull(recordMaterializer, "recordMaterializer");
+    checkNotNull(filter, "filter");
+
+    if (leaves.isEmpty()) {
       return new EmptyRecordReader<T>(recordMaterializer);
     }
-  }
 
-  public <T> RecordReader<T> getRecordReader(PageReadStore columns, RecordMaterializer<T> recordMaterializer,
-                                             UnboundRecordFilter unboundFilter) {
-
-    return (unboundFilter == null)
-      ? getRecordReader(columns, recordMaterializer)
-      : new FilteredRecordReader<T>(
-        this,
-        recordMaterializer,
-        validating,
-        new ColumnReadStoreImpl(columns, recordMaterializer.getRootConverter(), getType()),
-        unboundFilter,
-        columns.getRowCount()
-    );
+    return filter.accept(new Visitor<RecordReader<T>>() {
+      @Override
+      public RecordReader<T> visit(FilterPredicateCompat filterPredicateCompat) {
+
+        FilterPredicate predicate = filterPredicateCompat.getFilterPredicate();
+        IncrementallyUpdatedFilterPredicateBuilder builder = new IncrementallyUpdatedFilterPredicateBuilder();
+        IncrementallyUpdatedFilterPredicate streamingPredicate = builder.build(predicate);
+        RecordMaterializer<T> filteringRecordMaterializer = new FilteringRecordMaterializer<T>(
+            recordMaterializer,
+            leaves,
+            builder.getValueInspectorsByColumn(),
+            streamingPredicate);
+
+        return new RecordReaderImplementation<T>(
+            MessageColumnIO.this,
+            filteringRecordMaterializer,
+            validating,
+            new ColumnReadStoreImpl(columns, filteringRecordMaterializer.getRootConverter(), getType()));
+      }
+
+      @Override
+      public RecordReader<T> visit(UnboundRecordFilterCompat unboundRecordFilterCompat) {
+        return new FilteredRecordReader<T>(
+            MessageColumnIO.this,
+            recordMaterializer,
+            validating,
+            new ColumnReadStoreImpl(columns, recordMaterializer.getRootConverter(), getType()),
+            unboundRecordFilterCompat.getUnboundRecordFilter(),
+            columns.getRowCount()
+        );
+
+      }
+
+      @Override
+      public RecordReader<T> visit(NoOpFilter noOpFilter) {
+        return new RecordReaderImplementation<T>(
+            MessageColumnIO.this,
+            recordMaterializer,
+            validating,
+            new ColumnReadStoreImpl(columns, recordMaterializer.getRootConverter(), getType()));
+      }
+    });
   }
 
   private class MessageColumnIORecordConsumer extends RecordConsumer {

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/io/RecordReader.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/io/RecordReader.java b/parquet-column/src/main/java/parquet/io/RecordReader.java
index f01b02a..e0cfeb6 100644
--- a/parquet-column/src/main/java/parquet/io/RecordReader.java
+++ b/parquet-column/src/main/java/parquet/io/RecordReader.java
@@ -25,9 +25,16 @@ package parquet.io;
 public abstract class RecordReader<T> {
 
   /**
-   * reads one record and returns it
+   * Reads one record and returns it.
    * @return the materialized record
    */
   public abstract T read();
 
+  /**
+   * Returns whether the current record should be skipped (dropped)
+   * Will be called *after* read()
+   */
+  public boolean shouldSkipCurrentRecord() {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/io/RecordReaderImplementation.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/io/RecordReaderImplementation.java b/parquet-column/src/main/java/parquet/io/RecordReaderImplementation.java
index c5d7da7..247900e 100644
--- a/parquet-column/src/main/java/parquet/io/RecordReaderImplementation.java
+++ b/parquet-column/src/main/java/parquet/io/RecordReaderImplementation.java
@@ -234,6 +234,8 @@ class RecordReaderImplementation<T> extends RecordReader<T> {
   private State[] states;
   private ColumnReader[] columnReaders;
 
+  private boolean shouldSkipCurrentRecord = false;
+
   /**
    * @param root the root of the schema
    * @param recordMaterializer responsible of materializing the records
@@ -411,7 +413,17 @@ class RecordReaderImplementation<T> extends RecordReader<T> {
       currentState = currentState.nextState[nextR];
     } while (currentState != null);
     recordRootConverter.end();
-    return recordMaterializer.getCurrentRecord();
+    T record = recordMaterializer.getCurrentRecord();
+    shouldSkipCurrentRecord = record == null;
+    if (shouldSkipCurrentRecord) {
+      recordMaterializer.skipCurrentRecord();
+    }
+    return record;
+  }
+
+  @Override
+  public boolean shouldSkipCurrentRecord() {
+    return shouldSkipCurrentRecord;
   }
 
   private static void log(String string) {

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/io/api/Binary.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/io/api/Binary.java b/parquet-column/src/main/java/parquet/io/api/Binary.java
index 1ef23fb..432f075 100644
--- a/parquet-column/src/main/java/parquet/io/api/Binary.java
+++ b/parquet-column/src/main/java/parquet/io/api/Binary.java
@@ -15,11 +15,11 @@
  */
 package parquet.io.api;
 
-import static parquet.bytes.BytesUtils.UTF8;
-
 import java.io.DataOutput;
 import java.io.IOException;
+import java.io.ObjectStreamException;
 import java.io.OutputStream;
+import java.io.Serializable;
 import java.io.UnsupportedEncodingException;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
@@ -27,230 +27,325 @@ import java.util.Arrays;
 import parquet.bytes.BytesUtils;
 import parquet.io.ParquetEncodingException;
 
-abstract public class Binary {
+import static parquet.bytes.BytesUtils.UTF8;
+
+abstract public class Binary implements Comparable<Binary>, Serializable {
+
+  // this isn't really something others should extend
+  private Binary() { }
 
   public static final Binary EMPTY = fromByteArray(new byte[0]);
 
-  public static Binary fromByteArray(
-      final byte[] value,
-      final int offset,
-      final int length) {
-
-    return new Binary() {
-      @Override
-      public String toStringUsingUTF8() {
-        return UTF8.decode(ByteBuffer.wrap(value, offset, length)).toString();
-        // TODO: figure out why the following line was much slower
-        // rdb: new String(...) is slower because it instantiates a new Decoder,
-        //      while Charset#decode uses a thread-local decoder cache
-        // return new String(value, offset, length, BytesUtils.UTF8);
-      }
+  abstract public String toStringUsingUTF8();
 
-      @Override
-      public int length() {
-        return length;
-      }
+  abstract public int length();
 
-      @Override
-      public void writeTo(OutputStream out) throws IOException {
-        out.write(value, offset, length);
-      }
+  abstract public void writeTo(OutputStream out) throws IOException;
 
-      @Override
-      public byte[] getBytes() {
-        return Arrays.copyOfRange(value, offset, offset + length);
-      }
+  abstract public void writeTo(DataOutput out) throws IOException;
 
-      @Override
-      public int hashCode() {
-        return Binary.hashCode(value, offset, length);
-      }
+  abstract public byte[] getBytes();
 
-      @Override
-      boolean equals(Binary other) {
-        return other.equals(value, offset, length);
-      }
+  abstract boolean equals(byte[] bytes, int offset, int length);
 
-      @Override
-      boolean equals(byte[] other, int otherOffset, int otherLength) {
-        return Binary.equals(value, offset, length, other, otherOffset, otherLength);
-      }
+  abstract boolean equals(Binary other);
 
-      @Override
-      public int compareTo(Binary other) {
-        return other.compareTo(value, offset, length);
-      }
+  abstract public int compareTo(Binary other);
 
-      @Override
-      int compareTo(byte[] other, int otherOffset, int otherLength) {
-        return Binary.compareTwoByteArrays(value, offset, length, other, otherOffset, otherLength);
-      }
+  abstract int compareTo(byte[] bytes, int offset, int length);
 
-      @Override
-      public ByteBuffer toByteBuffer() {
-        return ByteBuffer.wrap(value, offset, length);
-      }
+  abstract public ByteBuffer toByteBuffer();
 
-      @Override
-      public void writeTo(DataOutput out) throws IOException {
-        out.write(value, offset, length);
-      }
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) {
+      return false;
+    }
+    if (obj instanceof Binary) {
+      return equals((Binary)obj);
+    }
+    return false;
+  }
 
-    };
+  @Override
+  public String toString() {
+    return "Binary{" + length() + " bytes, " + Arrays.toString(getBytes()) + "}";
   }
 
-  public static Binary fromByteArray(final byte[] value) {
-    return new Binary() {
-      @Override
-      public String toStringUsingUTF8() {
-        return new String(value, BytesUtils.UTF8);
-      }
+  private static class ByteArraySliceBackedBinary extends Binary {
+    private final byte[] value;
+    private final int offset;
+    private final int length;
 
-      @Override
-      public int length() {
-        return value.length;
-      }
+    public ByteArraySliceBackedBinary(byte[] value, int offset, int length) {
+      this.value = value;
+      this.offset = offset;
+      this.length = length;
+    }
 
-      @Override
-      public void writeTo(OutputStream out) throws IOException {
-        out.write(value);
-      }
+    @Override
+    public String toStringUsingUTF8() {
+      return UTF8.decode(ByteBuffer.wrap(value, offset, length)).toString();
+      // TODO: figure out why the following line was much slower
+      // rdb: new String(...) is slower because it instantiates a new Decoder,
+      //      while Charset#decode uses a thread-local decoder cache
+      // return new String(value, offset, length, BytesUtils.UTF8);
+    }
 
-      @Override
-      public byte[] getBytes() {
-        return value;
-      }
+    @Override
+    public int length() {
+      return length;
+    }
 
-      @Override
-      public int hashCode() {
-        return Binary.hashCode(value, 0, value.length);
-      }
+    @Override
+    public void writeTo(OutputStream out) throws IOException {
+      out.write(value, offset, length);
+    }
 
-      @Override
-      boolean equals(Binary other) {
-        return other.equals(value, 0, value.length);
-      }
+    @Override
+    public byte[] getBytes() {
+      return Arrays.copyOfRange(value, offset, offset + length);
+    }
 
-      @Override
-      boolean equals(byte[] other, int otherOffset, int otherLength) {
-        return Binary.equals(value, 0, value.length, other, otherOffset, otherLength);
-      }
+    @Override
+    public int hashCode() {
+      return Binary.hashCode(value, offset, length);
+    }
 
-      @Override
-      public int compareTo(Binary other) {
-        return other.compareTo(value, 0, value.length);
-      }
+    @Override
+    boolean equals(Binary other) {
+      return other.equals(value, offset, length);
+    }
 
-      @Override
-      int compareTo(byte[] other, int otherOffset, int otherLength) {
-        return Binary.compareTwoByteArrays(value, 0, value.length, other, otherOffset, otherLength);
-      }
+    @Override
+    boolean equals(byte[] other, int otherOffset, int otherLength) {
+      return Binary.equals(value, offset, length, other, otherOffset, otherLength);
+    }
 
-      @Override
-      public ByteBuffer toByteBuffer() {
-        return ByteBuffer.wrap(value);
-      }
+    @Override
+    public int compareTo(Binary other) {
+      return other.compareTo(value, offset, length);
+    }
+
+    @Override
+    int compareTo(byte[] other, int otherOffset, int otherLength) {
+      return Binary.compareTwoByteArrays(value, offset, length, other, otherOffset, otherLength);
+    }
+
+    @Override
+    public ByteBuffer toByteBuffer() {
+      return ByteBuffer.wrap(value, offset, length);
+    }
+
+    @Override
+    public void writeTo(DataOutput out) throws IOException {
+      out.write(value, offset, length);
+    }
 
-      @Override
-      public void writeTo(DataOutput out) throws IOException {
-        out.write(value);
-      }
-    };
   }
 
-  public static Binary fromByteBuffer(final ByteBuffer value) {
-    return new Binary() {
-      @Override
-      public String toStringUsingUTF8() {
-        return new String(getBytes(), BytesUtils.UTF8);
-      }
+  private static class FromStringBinary extends ByteArrayBackedBinary {
+    public FromStringBinary(byte[] value) {
+      super(value);
+    }
 
-      @Override
-      public int length() {
-        return value.remaining();
-      }
+    @Override
+    public String toString() {
+      return "Binary{\"" + toStringUsingUTF8() + "\"}";
+    }
+  }
 
-      @Override
-      public void writeTo(OutputStream out) throws IOException {
-        // TODO: should not have to materialize those bytes
-        out.write(getBytes());
-      }
+  public static Binary fromByteArray(final byte[] value, final int offset, final int length) {
+    return new ByteArraySliceBackedBinary(value, offset, length);
+  }
 
-      @Override
-      public byte[] getBytes() {
-        byte[] bytes = new byte[value.remaining()];
+  private static class ByteArrayBackedBinary extends Binary {
+    private final byte[] value;
 
-        value.mark();
-        value.get(bytes).reset();
-        return bytes;
-      }
+    public ByteArrayBackedBinary(byte[] value) {
+      this.value = value;
+    }
 
-      @Override
-      public int hashCode() {
-        if (value.hasArray()) {
-          return Binary.hashCode(value.array(), value.arrayOffset() + value.position(),
-              value.arrayOffset() + value.remaining());
-        }
-        byte[] bytes = getBytes();
-        return Binary.hashCode(bytes, 0, bytes.length);
-      }
+    @Override
+    public String toStringUsingUTF8() {
+      return new String(value, BytesUtils.UTF8);
+    }
 
-      @Override
-      boolean equals(Binary other) {
-        if (value.hasArray()) {
-          return other.equals(value.array(), value.arrayOffset() + value.position(),
-              value.arrayOffset() + value.remaining());
-        }
-        byte[] bytes = getBytes();
-        return other.equals(bytes, 0, bytes.length);
-      }
+    @Override
+    public int length() {
+      return value.length;
+    }
+
+    @Override
+    public void writeTo(OutputStream out) throws IOException {
+      out.write(value);
+    }
+
+    @Override
+    public byte[] getBytes() {
+      return value;
+    }
 
-      @Override
-      boolean equals(byte[] other, int otherOffset, int otherLength) {
-        if (value.hasArray()) {
-          return Binary.equals(value.array(), value.arrayOffset() + value.position(),
-              value.arrayOffset() + value.remaining(), other, otherOffset, otherLength);
-        }
-        byte[] bytes = getBytes();
-        return Binary.equals(bytes, 0, bytes.length, other, otherOffset, otherLength);
+    @Override
+    public int hashCode() {
+      return Binary.hashCode(value, 0, value.length);
+    }
+
+    @Override
+    boolean equals(Binary other) {
+      return other.equals(value, 0, value.length);
+    }
+
+    @Override
+    boolean equals(byte[] other, int otherOffset, int otherLength) {
+      return Binary.equals(value, 0, value.length, other, otherOffset, otherLength);
+    }
+
+    @Override
+    public int compareTo(Binary other) {
+      return other.compareTo(value, 0, value.length);
+    }
+
+    @Override
+    int compareTo(byte[] other, int otherOffset, int otherLength) {
+      return Binary.compareTwoByteArrays(value, 0, value.length, other, otherOffset, otherLength);
+    }
+
+    @Override
+    public ByteBuffer toByteBuffer() {
+      return ByteBuffer.wrap(value);
+    }
+
+    @Override
+    public void writeTo(DataOutput out) throws IOException {
+      out.write(value);
+    }
+
+  }
+
+  public static Binary fromByteArray(final byte[] value) {
+    return new ByteArrayBackedBinary(value);
+  }
+
+  private static class ByteBufferBackedBinary extends Binary {
+    private transient ByteBuffer value;
+
+    public ByteBufferBackedBinary(ByteBuffer value) {
+      this.value = value;
+    }
+
+    @Override
+    public String toStringUsingUTF8() {
+      return new String(getBytes(), BytesUtils.UTF8);
+    }
+
+    @Override
+    public int length() {
+      return value.remaining();
+    }
+
+    @Override
+    public void writeTo(OutputStream out) throws IOException {
+      // TODO: should not have to materialize those bytes
+      out.write(getBytes());
+    }
+
+    @Override
+    public byte[] getBytes() {
+      byte[] bytes = new byte[value.remaining()];
+
+      value.mark();
+      value.get(bytes).reset();
+      return bytes;
+    }
+
+    @Override
+    public int hashCode() {
+      if (value.hasArray()) {
+        return Binary.hashCode(value.array(), value.arrayOffset() + value.position(),
+            value.arrayOffset() + value.remaining());
       }
+      byte[] bytes = getBytes();
+      return Binary.hashCode(bytes, 0, bytes.length);
+    }
 
-      @Override
-      public int compareTo(Binary other) {
-        if (value.hasArray()) {
-          return other.compareTo(value.array(), value.arrayOffset() + value.position(),
-              value.arrayOffset() + value.remaining());
-        }
-        byte[] bytes = getBytes();
-        return other.compareTo(bytes, 0, bytes.length);
+    @Override
+    boolean equals(Binary other) {
+      if (value.hasArray()) {
+        return other.equals(value.array(), value.arrayOffset() + value.position(),
+            value.arrayOffset() + value.remaining());
       }
+      byte[] bytes = getBytes();
+      return other.equals(bytes, 0, bytes.length);
+    }
 
-      @Override
-      int compareTo(byte[] other, int otherOffset, int otherLength) {
-        if (value.hasArray()) {
-          return Binary.compareTwoByteArrays(value.array(), value.arrayOffset() + value.position(),
-              value.arrayOffset() + value.remaining(), other, otherOffset, otherLength);
-        }
-        byte[] bytes = getBytes();
-        return Binary.compareTwoByteArrays(bytes, 0, bytes.length, other, otherOffset, otherLength);
+    @Override
+    boolean equals(byte[] other, int otherOffset, int otherLength) {
+      if (value.hasArray()) {
+        return Binary.equals(value.array(), value.arrayOffset() + value.position(),
+            value.arrayOffset() + value.remaining(), other, otherOffset, otherLength);
       }
+      byte[] bytes = getBytes();
+      return Binary.equals(bytes, 0, bytes.length, other, otherOffset, otherLength);
+    }
 
-      @Override
-      public ByteBuffer toByteBuffer() {
-        return value;
+    @Override
+    public int compareTo(Binary other) {
+      if (value.hasArray()) {
+        return other.compareTo(value.array(), value.arrayOffset() + value.position(),
+            value.arrayOffset() + value.remaining());
       }
+      byte[] bytes = getBytes();
+      return other.compareTo(bytes, 0, bytes.length);
+    }
 
-      @Override
-      public void writeTo(DataOutput out) throws IOException {
-        // TODO: should not have to materialize those bytes
-        out.write(getBytes());
+    @Override
+    int compareTo(byte[] other, int otherOffset, int otherLength) {
+      if (value.hasArray()) {
+        return Binary.compareTwoByteArrays(value.array(), value.arrayOffset() + value.position(),
+            value.arrayOffset() + value.remaining(), other, otherOffset, otherLength);
       }
-    };
+      byte[] bytes = getBytes();
+      return Binary.compareTwoByteArrays(bytes, 0, bytes.length, other, otherOffset, otherLength);
+    }
+
+    @Override
+    public ByteBuffer toByteBuffer() {
+      return value;
+    }
+
+    @Override
+    public void writeTo(DataOutput out) throws IOException {
+      // TODO: should not have to materialize those bytes
+      out.write(getBytes());
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws IOException {
+      byte[] bytes = getBytes();
+      out.writeInt(bytes.length);
+      out.write(bytes);
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
+      int length = in.readInt();
+      byte[] bytes = new byte[length];
+      in.readFully(bytes, 0, length);
+      this.value = ByteBuffer.wrap(bytes);
+    }
+
+    private void readObjectNoData() throws ObjectStreamException {
+      this.value = ByteBuffer.wrap(new byte[0]);
+    }
+
+  }
+
+  public static Binary fromByteBuffer(final ByteBuffer value) {
+    return new ByteBufferBackedBinary(value);
   }
 
   public static Binary fromString(final String value) {
     try {
-      return fromByteArray(value.getBytes("UTF-8"));
+      return new FromStringBinary(value.getBytes("UTF-8"));
     } catch (UnsupportedEncodingException e) {
       throw new ParquetEncodingException("UTF-8 not supported.", e);
     }
@@ -313,39 +408,4 @@ abstract public class Binary {
     else if (length1 < length2) { return 1;}
     else { return -1; }
   }
-
-  abstract public String toStringUsingUTF8();
-
-  abstract public int length();
-
-  abstract public void writeTo(OutputStream out) throws IOException;
-
-  abstract public void writeTo(DataOutput out) throws IOException;
-
-  abstract public byte[] getBytes();
-
-  abstract boolean equals(byte[] bytes, int offset, int length);
-
-  abstract boolean equals(Binary other);
-
-  abstract public int compareTo(Binary other);
-
-  abstract int compareTo(byte[] bytes, int offset, int length);
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (obj instanceof Binary) {
-      return equals((Binary)obj);
-    }
-    return false;
-  }
-
-  abstract public ByteBuffer toByteBuffer();
-
-  public String toString() {
-    return "Binary{" + length() + " bytes, " + Arrays.toString(getBytes()) + "}";
-  };
 }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/main/java/parquet/io/api/RecordMaterializer.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/main/java/parquet/io/api/RecordMaterializer.java b/parquet-column/src/main/java/parquet/io/api/RecordMaterializer.java
index 7d90c6a..7ff0f1c 100644
--- a/parquet-column/src/main/java/parquet/io/api/RecordMaterializer.java
+++ b/parquet-column/src/main/java/parquet/io/api/RecordMaterializer.java
@@ -34,6 +34,11 @@ abstract public class RecordMaterializer<T> {
   abstract public T getCurrentRecord();
 
   /**
+   * Called if {@link #getCurrentRecord()} isn't going to be called.
+   */
+  public void skipCurrentRecord() { }
+
+  /**
    * @return the root converter for this tree
    */
   abstract public GroupConverter getRootConverter();

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/filter2/predicate/DummyUdp.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/filter2/predicate/DummyUdp.java b/parquet-column/src/test/java/parquet/filter2/predicate/DummyUdp.java
new file mode 100644
index 0000000..277fa43
--- /dev/null
+++ b/parquet-column/src/test/java/parquet/filter2/predicate/DummyUdp.java
@@ -0,0 +1,19 @@
+package parquet.filter2.predicate;
+
+public class DummyUdp extends UserDefinedPredicate<Integer> {
+
+  @Override
+  public boolean keep(Integer value) {
+    return false;
+  }
+
+  @Override
+  public boolean canDrop(Statistics<Integer> statistics) {
+    return false;
+  }
+
+  @Override
+  public boolean inverseCanDrop(Statistics<Integer> statistics) {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/filter2/predicate/TestFilterApiMethods.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/filter2/predicate/TestFilterApiMethods.java b/parquet-column/src/test/java/parquet/filter2/predicate/TestFilterApiMethods.java
new file mode 100644
index 0000000..dafd7fd
--- /dev/null
+++ b/parquet-column/src/test/java/parquet/filter2/predicate/TestFilterApiMethods.java
@@ -0,0 +1,103 @@
+package parquet.filter2.predicate;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+
+import org.junit.Test;
+
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.predicate.Operators.And;
+import parquet.filter2.predicate.Operators.BinaryColumn;
+import parquet.filter2.predicate.Operators.DoubleColumn;
+import parquet.filter2.predicate.Operators.Eq;
+import parquet.filter2.predicate.Operators.Gt;
+import parquet.filter2.predicate.Operators.IntColumn;
+import parquet.filter2.predicate.Operators.Not;
+import parquet.filter2.predicate.Operators.Or;
+import parquet.filter2.predicate.Operators.UserDefined;
+import parquet.io.api.Binary;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static parquet.filter2.predicate.FilterApi.and;
+import static parquet.filter2.predicate.FilterApi.binaryColumn;
+import static parquet.filter2.predicate.FilterApi.doubleColumn;
+import static parquet.filter2.predicate.FilterApi.eq;
+import static parquet.filter2.predicate.FilterApi.gt;
+import static parquet.filter2.predicate.FilterApi.intColumn;
+import static parquet.filter2.predicate.FilterApi.not;
+import static parquet.filter2.predicate.FilterApi.notEq;
+import static parquet.filter2.predicate.FilterApi.or;
+import static parquet.filter2.predicate.FilterApi.userDefined;
+import static parquet.filter2.predicate.Operators.NotEq;
+
+public class TestFilterApiMethods {
+
+  private static final IntColumn intColumn = intColumn("a.b.c");
+  private static final DoubleColumn doubleColumn = doubleColumn("x.y.z");
+  private static final BinaryColumn binColumn = binaryColumn("a.string.column");
+
+  private static final FilterPredicate predicate =
+      and(not(or(eq(intColumn, 7), notEq(intColumn, 17))), gt(doubleColumn, 100.0));
+
+  @Test
+  public void testFilterPredicateCreation() {
+    FilterPredicate outerAnd = predicate;
+
+    assertTrue(outerAnd instanceof And);
+
+    FilterPredicate not = ((And) outerAnd).getLeft();
+    FilterPredicate gt = ((And) outerAnd).getRight();
+    assertTrue(not instanceof Not);
+
+    FilterPredicate or = ((Not) not).getPredicate();
+    assertTrue(or instanceof Or);
+
+    FilterPredicate leftEq = ((Or) or).getLeft();
+    FilterPredicate rightNotEq = ((Or) or).getRight();
+    assertTrue(leftEq instanceof Eq);
+    assertTrue(rightNotEq instanceof NotEq);
+    assertEquals(7, ((Eq) leftEq).getValue());
+    assertEquals(17, ((NotEq) rightNotEq).getValue());
+    assertEquals(ColumnPath.get("a", "b", "c"), ((Eq) leftEq).getColumn().getColumnPath());
+    assertEquals(ColumnPath.get("a", "b", "c"), ((NotEq) rightNotEq).getColumn().getColumnPath());
+
+    assertTrue(gt instanceof Gt);
+    assertEquals(100.0, ((Gt) gt).getValue());
+    assertEquals(ColumnPath.get("x", "y", "z"), ((Gt) gt).getColumn().getColumnPath());
+  }
+
+  @Test
+  public void testToString() {
+    FilterPredicate pred = or(predicate, notEq(binColumn, Binary.fromString("foobarbaz")));
+    assertEquals("or(and(not(or(eq(a.b.c, 7), noteq(a.b.c, 17))), gt(x.y.z, 100.0)), "
+        + "noteq(a.string.column, Binary{\"foobarbaz\"}))",
+        pred.toString());
+  }
+
+  @Test
+  public void testUdp() {
+    FilterPredicate predicate = or(eq(doubleColumn, 12.0), userDefined(intColumn, DummyUdp.class));
+    assertTrue(predicate instanceof Or);
+    FilterPredicate ud = ((Or) predicate).getRight();
+    assertTrue(ud instanceof UserDefined);
+    assertEquals(DummyUdp.class, ((UserDefined) ud).getUserDefinedPredicateClass());
+    assertTrue(((UserDefined) ud).getUserDefinedPredicate() instanceof DummyUdp);
+  }
+
+  @Test
+  public void testSerializable() throws Exception {
+    BinaryColumn binary = binaryColumn("foo");
+    FilterPredicate p = or(and(userDefined(intColumn, DummyUdp.class), predicate), eq(binary, Binary.fromString("hi")));
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    ObjectOutputStream oos = new ObjectOutputStream(baos);
+    oos.writeObject(p);
+    oos.close();
+
+    ObjectInputStream is = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray()));
+    FilterPredicate read = (FilterPredicate) is.readObject();
+    assertEquals(p, read);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/filter2/predicate/TestLogicalInverseRewriter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/filter2/predicate/TestLogicalInverseRewriter.java b/parquet-column/src/test/java/parquet/filter2/predicate/TestLogicalInverseRewriter.java
new file mode 100644
index 0000000..0aa360b
--- /dev/null
+++ b/parquet-column/src/test/java/parquet/filter2/predicate/TestLogicalInverseRewriter.java
@@ -0,0 +1,85 @@
+package parquet.filter2.predicate;
+
+import org.junit.Test;
+
+import parquet.filter2.predicate.Operators.DoubleColumn;
+import parquet.filter2.predicate.Operators.IntColumn;
+import parquet.filter2.predicate.Operators.LogicalNotUserDefined;
+import parquet.filter2.predicate.Operators.UserDefined;
+
+import static org.junit.Assert.assertEquals;
+import static parquet.filter2.predicate.FilterApi.and;
+import static parquet.filter2.predicate.FilterApi.doubleColumn;
+import static parquet.filter2.predicate.FilterApi.eq;
+import static parquet.filter2.predicate.FilterApi.gt;
+import static parquet.filter2.predicate.FilterApi.gtEq;
+import static parquet.filter2.predicate.FilterApi.intColumn;
+import static parquet.filter2.predicate.FilterApi.lt;
+import static parquet.filter2.predicate.FilterApi.ltEq;
+import static parquet.filter2.predicate.FilterApi.not;
+import static parquet.filter2.predicate.FilterApi.notEq;
+import static parquet.filter2.predicate.FilterApi.or;
+import static parquet.filter2.predicate.FilterApi.userDefined;
+import static parquet.filter2.predicate.LogicalInverseRewriter.rewrite;
+
+public class TestLogicalInverseRewriter {
+  private static final IntColumn intColumn = intColumn("a.b.c");
+  private static final DoubleColumn doubleColumn = doubleColumn("a.b.c");
+
+  private static final FilterPredicate complex =
+      and(
+          not(
+              or(ltEq(doubleColumn, 12.0),
+                  and(
+                      not(or(eq(intColumn, 7), notEq(intColumn, 17))),
+                      userDefined(intColumn, DummyUdp.class)))),
+          or(gt(doubleColumn, 100.0), not(gtEq(intColumn, 77))));
+
+  private static final FilterPredicate complexCollapsed =
+      and(
+          and(gt(doubleColumn, 12.0),
+              or(
+                  or(eq(intColumn, 7), notEq(intColumn, 17)),
+                  new LogicalNotUserDefined<Integer, DummyUdp>(userDefined(intColumn, DummyUdp.class)))),
+          or(gt(doubleColumn, 100.0), lt(intColumn, 77)));
+
+  private static void assertNoOp(FilterPredicate p) {
+    assertEquals(p, rewrite(p));
+  }
+
+  @Test
+  public void testBaseCases() {
+    UserDefined<Integer, DummyUdp> ud = userDefined(intColumn, DummyUdp.class);
+
+    assertNoOp(eq(intColumn, 17));
+    assertNoOp(notEq(intColumn, 17));
+    assertNoOp(lt(intColumn, 17));
+    assertNoOp(ltEq(intColumn, 17));
+    assertNoOp(gt(intColumn, 17));
+    assertNoOp(gtEq(intColumn, 17));
+    assertNoOp(and(eq(intColumn, 17), eq(doubleColumn, 12.0)));
+    assertNoOp(or(eq(intColumn, 17), eq(doubleColumn, 12.0)));
+    assertNoOp(ud);
+
+    assertEquals(notEq(intColumn, 17), rewrite(not(eq(intColumn, 17))));
+    assertEquals(eq(intColumn, 17), rewrite(not(notEq(intColumn, 17))));
+    assertEquals(gtEq(intColumn, 17), rewrite(not(lt(intColumn, 17))));
+    assertEquals(gt(intColumn, 17), rewrite(not(ltEq(intColumn, 17))));
+    assertEquals(ltEq(intColumn, 17), rewrite(not(gt(intColumn, 17))));
+    assertEquals(lt(intColumn, 17), rewrite(not(gtEq(intColumn, 17))));
+    assertEquals(new LogicalNotUserDefined<Integer, DummyUdp>(ud), rewrite(not(ud)));
+
+    FilterPredicate notedAnd = not(and(eq(intColumn, 17), eq(doubleColumn, 12.0)));
+    FilterPredicate distributedAnd = or(notEq(intColumn, 17), notEq(doubleColumn, 12.0));
+    assertEquals(distributedAnd, rewrite(notedAnd));
+
+    FilterPredicate andWithNots = and(not(gtEq(intColumn, 17)), lt(intColumn, 7));
+    FilterPredicate andWithoutNots = and(lt(intColumn, 17), lt(intColumn, 7));
+    assertEquals(andWithoutNots, rewrite(andWithNots));
+  }
+
+  @Test
+  public void testComplex() {
+    assertEquals(complexCollapsed, rewrite(complex));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/filter2/predicate/TestLogicalInverter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/filter2/predicate/TestLogicalInverter.java b/parquet-column/src/test/java/parquet/filter2/predicate/TestLogicalInverter.java
new file mode 100644
index 0000000..19e6b68
--- /dev/null
+++ b/parquet-column/src/test/java/parquet/filter2/predicate/TestLogicalInverter.java
@@ -0,0 +1,76 @@
+package parquet.filter2.predicate;
+
+import org.junit.Test;
+
+import parquet.filter2.predicate.Operators.DoubleColumn;
+import parquet.filter2.predicate.Operators.IntColumn;
+import parquet.filter2.predicate.Operators.LogicalNotUserDefined;
+import parquet.filter2.predicate.Operators.UserDefined;
+
+import static org.junit.Assert.assertEquals;
+import static parquet.filter2.predicate.FilterApi.and;
+import static parquet.filter2.predicate.FilterApi.doubleColumn;
+import static parquet.filter2.predicate.FilterApi.eq;
+import static parquet.filter2.predicate.FilterApi.gt;
+import static parquet.filter2.predicate.FilterApi.gtEq;
+import static parquet.filter2.predicate.FilterApi.intColumn;
+import static parquet.filter2.predicate.FilterApi.lt;
+import static parquet.filter2.predicate.FilterApi.ltEq;
+import static parquet.filter2.predicate.FilterApi.not;
+import static parquet.filter2.predicate.FilterApi.notEq;
+import static parquet.filter2.predicate.FilterApi.or;
+import static parquet.filter2.predicate.FilterApi.userDefined;
+import static parquet.filter2.predicate.LogicalInverter.invert;
+
+public class TestLogicalInverter {
+  private static final IntColumn intColumn = intColumn("a.b.c");
+  private static final DoubleColumn doubleColumn = doubleColumn("a.b.c");
+
+  private  static  final UserDefined<Integer, DummyUdp> ud = userDefined(intColumn, DummyUdp.class);
+
+  private static final FilterPredicate complex =
+      and(
+          or(ltEq(doubleColumn, 12.0),
+              and(
+                  not(or(eq(intColumn, 7), notEq(intColumn, 17))),
+                  userDefined(intColumn, DummyUdp.class))),
+          or(gt(doubleColumn, 100.0), notEq(intColumn, 77)));
+
+  private static final FilterPredicate complexInverse =
+      or(
+          and(gt(doubleColumn, 12.0),
+              or(
+                  or(eq(intColumn, 7), notEq(intColumn, 17)),
+                  new LogicalNotUserDefined<Integer, DummyUdp>(userDefined(intColumn, DummyUdp.class)))),
+          and(ltEq(doubleColumn, 100.0), eq(intColumn, 77)));
+
+  @Test
+  public void testBaseCases() {
+    assertEquals(notEq(intColumn, 17), invert(eq(intColumn, 17)));
+    assertEquals(eq(intColumn, 17), invert(notEq(intColumn, 17)));
+    assertEquals(gtEq(intColumn, 17), invert(lt(intColumn, 17)));
+    assertEquals(gt(intColumn, 17), invert(ltEq(intColumn, 17)));
+    assertEquals(ltEq(intColumn, 17), invert(gt(intColumn, 17)));
+    assertEquals(lt(intColumn, 17), invert(gtEq(intColumn, 17)));
+
+    FilterPredicate andPos = and(eq(intColumn, 17), eq(doubleColumn, 12.0));
+    FilterPredicate andInv = or(notEq(intColumn, 17), notEq(doubleColumn, 12.0));
+    assertEquals(andInv, invert(andPos));
+
+    FilterPredicate orPos = or(eq(intColumn, 17), eq(doubleColumn, 12.0));
+    FilterPredicate orInv = and(notEq(intColumn, 17), notEq(doubleColumn, 12.0));
+    assertEquals(orPos, invert(orInv));
+
+    assertEquals(eq(intColumn, 17), invert(not(eq(intColumn, 17))));
+
+    UserDefined<Integer, DummyUdp> ud = userDefined(intColumn, DummyUdp.class);
+    assertEquals(new LogicalNotUserDefined<Integer, DummyUdp>(ud), invert(ud));
+    assertEquals(ud, invert(not(ud)));
+    assertEquals(ud, invert(new LogicalNotUserDefined<Integer, DummyUdp>(ud)));
+  }
+
+  @Test
+  public void testComplex() {
+    assertEquals(complexInverse, invert(complex));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/filter2/predicate/TestSchemaCompatibilityValidator.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/filter2/predicate/TestSchemaCompatibilityValidator.java b/parquet-column/src/test/java/parquet/filter2/predicate/TestSchemaCompatibilityValidator.java
new file mode 100644
index 0000000..e9e745f
--- /dev/null
+++ b/parquet-column/src/test/java/parquet/filter2/predicate/TestSchemaCompatibilityValidator.java
@@ -0,0 +1,124 @@
+package parquet.filter2.predicate;
+
+import org.junit.Test;
+
+import parquet.filter2.predicate.Operators.BinaryColumn;
+import parquet.filter2.predicate.Operators.IntColumn;
+import parquet.filter2.predicate.Operators.LongColumn;
+import parquet.io.api.Binary;
+import parquet.schema.MessageType;
+import parquet.schema.MessageTypeParser;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static parquet.filter2.predicate.FilterApi.and;
+import static parquet.filter2.predicate.FilterApi.binaryColumn;
+import static parquet.filter2.predicate.FilterApi.eq;
+import static parquet.filter2.predicate.FilterApi.gt;
+import static parquet.filter2.predicate.FilterApi.intColumn;
+import static parquet.filter2.predicate.FilterApi.longColumn;
+import static parquet.filter2.predicate.FilterApi.ltEq;
+import static parquet.filter2.predicate.FilterApi.not;
+import static parquet.filter2.predicate.FilterApi.notEq;
+import static parquet.filter2.predicate.FilterApi.or;
+import static parquet.filter2.predicate.FilterApi.userDefined;
+import static parquet.filter2.predicate.SchemaCompatibilityValidator.validate;
+
+public class TestSchemaCompatibilityValidator {
+  private static final BinaryColumn stringC = binaryColumn("c");
+  private static final LongColumn longBar = longColumn("x.bar");
+  private static final IntColumn intBar = intColumn("x.bar");
+  private static final LongColumn lotsOfLongs = longColumn("lotsOfLongs");
+
+  private static final String schemaString =
+      "message Document {\n"
+          + "  required int32 a;\n"
+          + "  required binary b;\n"
+          + "  required binary c (UTF8);\n"
+          + "  required group x { required int32 bar; }\n"
+          + "  repeated int64 lotsOfLongs;\n"
+          + "}\n";
+
+  private static final MessageType schema = MessageTypeParser.parseMessageType(schemaString);
+
+  private static final FilterPredicate complexValid =
+      and(
+          or(ltEq(stringC, Binary.fromString("foo")),
+              and(
+                  not(or(eq(intBar, 17), notEq(intBar, 17))),
+                  userDefined(intBar, DummyUdp.class))),
+          or(gt(stringC, Binary.fromString("bar")), notEq(stringC, Binary.fromString("baz"))));
+
+  static class LongDummyUdp extends UserDefinedPredicate<Long> {
+    @Override
+    public boolean keep(Long value) {
+      return false;
+    }
+
+    @Override
+    public boolean canDrop(Statistics<Long> statistics) {
+      return false;
+    }
+
+    @Override
+    public boolean inverseCanDrop(Statistics<Long> statistics) {
+      return false;
+    }
+  }
+
+  private static final FilterPredicate complexWrongType =
+      and(
+          or(ltEq(stringC, Binary.fromString("foo")),
+              and(
+                  not(or(eq(longBar, 17L), notEq(longBar, 17L))),
+                  userDefined(longBar, LongDummyUdp.class))),
+          or(gt(stringC, Binary.fromString("bar")), notEq(stringC, Binary.fromString("baz"))));
+
+  private static final FilterPredicate complexMixedType =
+      and(
+          or(ltEq(stringC, Binary.fromString("foo")),
+              and(
+                  not(or(eq(intBar, 17), notEq(longBar, 17L))),
+                  userDefined(longBar, LongDummyUdp.class))),
+          or(gt(stringC, Binary.fromString("bar")), notEq(stringC, Binary.fromString("baz"))));
+
+  @Test
+  public void testValidType() {
+    validate(complexValid, schema);
+  }
+
+  @Test
+  public void testFindsInvalidTypes() {
+    try {
+      validate(complexWrongType, schema);
+      fail("this should throw");
+    } catch (IllegalArgumentException e) {
+      assertEquals("FilterPredicate column: x.bar's declared type (java.lang.Long) does not match the schema found in file metadata. "
+          + "Column x.bar is of type: FullTypeDescriptor(PrimitiveType: INT32, OriginalType: null)\n"
+          + "Valid types for this column are: [class java.lang.Integer]", e.getMessage());
+    }
+  }
+
+  @Test
+  public void testTwiceDeclaredColumn() {
+    validate(eq(stringC, Binary.fromString("larry")), schema);
+
+    try {
+      validate(complexMixedType, schema);
+      fail("this should throw");
+    } catch (IllegalArgumentException e) {
+      assertEquals("Column: x.bar was provided with different types in the same predicate. Found both: (class java.lang.Integer, class java.lang.Long)", e.getMessage());
+    }
+
+  }
+
+  @Test
+  public void testRepeatedNotSupported() {
+    try {
+      validate(eq(lotsOfLongs, 10l), schema);
+      fail("this should throw");
+    } catch (IllegalArgumentException e) {
+      assertEquals("FilterPredicates do not currently support repeated columns. Column lotsOfLongs is repeated.", e.getMessage());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/filter2/predicate/TestValidTypeMap.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/filter2/predicate/TestValidTypeMap.java b/parquet-column/src/test/java/parquet/filter2/predicate/TestValidTypeMap.java
new file mode 100644
index 0000000..07f2597
--- /dev/null
+++ b/parquet-column/src/test/java/parquet/filter2/predicate/TestValidTypeMap.java
@@ -0,0 +1,93 @@
+package parquet.filter2.predicate;
+
+import org.junit.Test;
+
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.predicate.Operators.BinaryColumn;
+import parquet.filter2.predicate.Operators.BooleanColumn;
+import parquet.filter2.predicate.Operators.Column;
+import parquet.filter2.predicate.Operators.DoubleColumn;
+import parquet.filter2.predicate.Operators.FloatColumn;
+import parquet.filter2.predicate.Operators.IntColumn;
+import parquet.filter2.predicate.Operators.LongColumn;
+import parquet.schema.OriginalType;
+import parquet.schema.PrimitiveType.PrimitiveTypeName;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static parquet.filter2.predicate.FilterApi.binaryColumn;
+import static parquet.filter2.predicate.FilterApi.booleanColumn;
+import static parquet.filter2.predicate.FilterApi.doubleColumn;
+import static parquet.filter2.predicate.FilterApi.floatColumn;
+import static parquet.filter2.predicate.FilterApi.intColumn;
+import static parquet.filter2.predicate.FilterApi.longColumn;
+import static parquet.filter2.predicate.ValidTypeMap.assertTypeValid;
+
+public class TestValidTypeMap {
+  public static IntColumn intColumn = intColumn("int.column");
+  public static LongColumn longColumn = longColumn("long.column");
+  public static FloatColumn floatColumn = floatColumn("float.column");
+  public static DoubleColumn doubleColumn = doubleColumn("double.column");
+  public static BooleanColumn booleanColumn = booleanColumn("boolean.column");
+  public static BinaryColumn binaryColumn = binaryColumn("binary.column");
+
+  private static class InvalidColumnType implements Comparable<InvalidColumnType> {
+    @Override
+    public int compareTo(InvalidColumnType o) {
+      return 0;
+    }
+  }
+
+  public static Column<InvalidColumnType> invalidColumn =
+      new Column<InvalidColumnType>(ColumnPath.get("invalid.column"), InvalidColumnType.class) { };
+
+  @Test
+  public void testValidTypes() {
+    assertTypeValid(intColumn, PrimitiveTypeName.INT32, null);
+    assertTypeValid(longColumn, PrimitiveTypeName.INT64, null);
+    assertTypeValid(floatColumn, PrimitiveTypeName.FLOAT, null);
+    assertTypeValid(doubleColumn, PrimitiveTypeName.DOUBLE, null);
+    assertTypeValid(booleanColumn, PrimitiveTypeName.BOOLEAN, null);
+    assertTypeValid(binaryColumn, PrimitiveTypeName.BINARY, null);
+    assertTypeValid(binaryColumn, PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY, null);
+    assertTypeValid(binaryColumn, PrimitiveTypeName.BINARY, OriginalType.UTF8);
+    assertTypeValid(binaryColumn, PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY, OriginalType.UTF8);
+  }
+
+  @Test
+  public void testMismatchedTypes() {
+    try {
+      assertTypeValid(intColumn, PrimitiveTypeName.DOUBLE, null);
+      fail("This should throw!");
+    } catch (IllegalArgumentException e) {
+      assertEquals("FilterPredicate column: int.column's declared type (java.lang.Integer) does not match the "
+          + "schema found in file metadata. Column int.column is of type: "
+          + "FullTypeDescriptor(PrimitiveType: DOUBLE, OriginalType: null)\n"
+          + "Valid types for this column are: [class java.lang.Double]", e.getMessage());
+    }
+  }
+
+  @Test
+  public void testUnsupportedType() {
+    try {
+      assertTypeValid(invalidColumn, PrimitiveTypeName.INT32, null);
+      fail("This should throw!");
+    } catch (IllegalArgumentException e) {
+      assertEquals("Column invalid.column was declared as type: "
+          + "parquet.filter2.predicate.TestValidTypeMap$InvalidColumnType which is not supported "
+          + "in FilterPredicates. Supported types for this column are: [class java.lang.Integer]", e.getMessage());
+    }
+
+    try {
+      assertTypeValid(invalidColumn, PrimitiveTypeName.INT32, OriginalType.UTF8);
+      fail("This should throw!");
+    } catch (IllegalArgumentException e) {
+      assertEquals("Column invalid.column was declared as type: "
+          + "parquet.filter2.predicate.TestValidTypeMap$InvalidColumnType which is not supported "
+          + "in FilterPredicates. There are no supported types for columns of FullTypeDescriptor(PrimitiveType: INT32, OriginalType: UTF8)",
+          e.getMessage());
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/filter2/recordlevel/TestIncrementallyUpdatedFilterPredicateEvaluator.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/filter2/recordlevel/TestIncrementallyUpdatedFilterPredicateEvaluator.java b/parquet-column/src/test/java/parquet/filter2/recordlevel/TestIncrementallyUpdatedFilterPredicateEvaluator.java
new file mode 100644
index 0000000..08b7a04
--- /dev/null
+++ b/parquet-column/src/test/java/parquet/filter2/recordlevel/TestIncrementallyUpdatedFilterPredicateEvaluator.java
@@ -0,0 +1,191 @@
+package parquet.filter2.recordlevel;
+
+import org.junit.Test;
+
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.And;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.Or;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicateEvaluator.evaluate;
+
+public class TestIncrementallyUpdatedFilterPredicateEvaluator {
+
+  public static class ShortCircuitException extends RuntimeException {
+    public ShortCircuitException() {
+      super("this was supposed to short circuit and never get here!");
+    }
+  }
+
+  public static ValueInspector intIsNull() {
+    return new ValueInspector() {
+      @Override
+      public void updateNull() {
+        setResult(true);
+      }
+
+      @Override
+      public void update(int value) {
+        setResult(false);
+      }
+    };
+  }
+
+  public static ValueInspector intIsEven() {
+    return new ValueInspector() {
+      @Override
+      public void updateNull() {
+        setResult(false);
+      }
+
+      @Override
+      public void update(int value) {
+        setResult(value % 2 == 0);
+      }
+    };
+  }
+
+  public static ValueInspector doubleMoreThan10() {
+    return new ValueInspector() {
+      @Override
+      public void updateNull() {
+        setResult(false);
+      }
+
+      @Override
+      public void update(double value) {
+        setResult(value > 10.0);
+      }
+    };
+  }
+
+  @Test
+  public void testValueInspector() {
+    // known, and set to false criteria, null considered false
+    ValueInspector v = intIsEven();
+    v.update(11);
+    assertFalse(evaluate(v));
+    v.reset();
+
+    // known and set to true criteria, null considered false
+    v.update(12);
+    assertTrue(evaluate(v));
+    v.reset();
+
+    // known and set to null, null considered false
+    v.updateNull();
+    assertFalse(evaluate(v));
+    v.reset();
+
+    // known, and set to false criteria, null considered true
+    ValueInspector intIsNull = intIsNull();
+    intIsNull.update(10);
+    assertFalse(evaluate(intIsNull));
+    intIsNull.reset();
+
+    // known, and set to false criteria, null considered true
+    intIsNull.updateNull();
+    assertTrue(evaluate(intIsNull));
+    intIsNull.reset();
+
+    // unknown, null considered false
+    v.reset();
+    assertFalse(evaluate(v));
+
+    // unknown, null considered true
+    intIsNull.reset();
+    assertTrue(evaluate(intIsNull));
+  }
+
+  private void doOrTest(ValueInspector v1, ValueInspector v2, int v1Value, int v2Value, boolean expected) {
+    v1.update(v1Value);
+    v2.update(v2Value);
+    IncrementallyUpdatedFilterPredicate or = new Or(v1, v2);
+    assertEquals(expected, evaluate(or));
+    v1.reset();
+    v2.reset();
+  }
+
+  private void doAndTest(ValueInspector v1, ValueInspector v2, int v1Value, int v2Value, boolean expected) {
+    v1.update(v1Value);
+    v2.update(v2Value);
+    IncrementallyUpdatedFilterPredicate and = new And(v1, v2);
+    assertEquals(expected, evaluate(and));
+    v1.reset();
+    v2.reset();
+  }
+
+
+  @Test
+  public void testOr() {
+    ValueInspector v1 = intIsEven();
+    ValueInspector v2 = intIsEven();
+
+    int F = 11;
+    int T = 12;
+
+    // F || F == F
+    doOrTest(v1, v2, F, F, false);
+    // F || T == T
+    doOrTest(v1, v2, F, T, true);
+    // T || F == T
+    doOrTest(v1, v2, T, F, true);
+    // T || T == T
+    doOrTest(v1, v2, T, T, true);
+
+  }
+
+  @Test
+  public void testAnd() {
+    ValueInspector v1 = intIsEven();
+    ValueInspector v2 = intIsEven();
+
+    int F = 11;
+    int T = 12;
+
+    // F && F == F
+    doAndTest(v1, v2, F, F, false);
+    // F && T == F
+    doAndTest(v1, v2, F, T, false);
+    // T && F == F
+    doAndTest(v1, v2, T, F, false);
+    // T && T == T
+    doAndTest(v1, v2, T, T, true);
+
+  }
+
+  @Test
+  public void testShortCircuit() {
+    ValueInspector neverCalled = new ValueInspector() {
+      @Override
+      public boolean accept(Visitor visitor) {
+        throw new ShortCircuitException();
+      }
+    };
+
+    try {
+      evaluate(neverCalled);
+      fail("this should throw");
+    } catch (ShortCircuitException e) {
+      //
+    }
+
+    // T || X should evaluate to true without inspecting X
+    ValueInspector v = intIsEven();
+    v.update(10);
+    IncrementallyUpdatedFilterPredicate or = new Or(v, neverCalled);
+    assertTrue(evaluate(or));
+    v.reset();
+
+    // F && X should evaluate to false without inspecting X
+    v.update(11);
+    IncrementallyUpdatedFilterPredicate and = new And(v, neverCalled);
+    assertFalse(evaluate(and));
+    v.reset();
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/filter2/recordlevel/TestIncrementallyUpdatedFilterPredicateResetter.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/filter2/recordlevel/TestIncrementallyUpdatedFilterPredicateResetter.java b/parquet-column/src/test/java/parquet/filter2/recordlevel/TestIncrementallyUpdatedFilterPredicateResetter.java
new file mode 100644
index 0000000..974d6e7
--- /dev/null
+++ b/parquet-column/src/test/java/parquet/filter2/recordlevel/TestIncrementallyUpdatedFilterPredicateResetter.java
@@ -0,0 +1,51 @@
+package parquet.filter2.recordlevel;
+
+
+import org.junit.Test;
+
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.And;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.Or;
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static parquet.filter2.recordlevel.TestIncrementallyUpdatedFilterPredicateEvaluator.doubleMoreThan10;
+import static parquet.filter2.recordlevel.TestIncrementallyUpdatedFilterPredicateEvaluator.intIsEven;
+import static parquet.filter2.recordlevel.TestIncrementallyUpdatedFilterPredicateEvaluator.intIsNull;
+
+public class TestIncrementallyUpdatedFilterPredicateResetter {
+  @Test
+  public void testReset() {
+
+    ValueInspector intIsNull = intIsNull();
+    ValueInspector intIsEven = intIsEven();
+    ValueInspector doubleMoreThan10 = doubleMoreThan10();
+
+    IncrementallyUpdatedFilterPredicate pred = new Or(intIsNull, new And(intIsEven, doubleMoreThan10));
+
+    intIsNull.updateNull();
+    intIsEven.update(11);
+    doubleMoreThan10.update(20.0D);
+
+    assertTrue(intIsNull.isKnown());
+    assertTrue(intIsEven.isKnown());
+    assertTrue(doubleMoreThan10.isKnown());
+
+    IncrementallyUpdatedFilterPredicateResetter.reset(pred);
+
+    assertFalse(intIsNull.isKnown());
+    assertFalse(intIsEven.isKnown());
+    assertFalse(doubleMoreThan10.isKnown());
+
+    intIsNull.updateNull();
+    assertTrue(intIsNull.isKnown());
+    assertFalse(intIsEven.isKnown());
+    assertFalse(doubleMoreThan10.isKnown());
+
+    IncrementallyUpdatedFilterPredicateResetter.reset(pred);
+    assertFalse(intIsNull.isKnown());
+    assertFalse(intIsEven.isKnown());
+    assertFalse(doubleMoreThan10.isKnown());
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/filter2/recordlevel/TestValueInspector.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/filter2/recordlevel/TestValueInspector.java b/parquet-column/src/test/java/parquet/filter2/recordlevel/TestValueInspector.java
new file mode 100644
index 0000000..fc2e587
--- /dev/null
+++ b/parquet-column/src/test/java/parquet/filter2/recordlevel/TestValueInspector.java
@@ -0,0 +1,79 @@
+package parquet.filter2.recordlevel;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.junit.Test;
+
+import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static parquet.filter2.recordlevel.TestIncrementallyUpdatedFilterPredicateEvaluator.intIsEven;
+
+public class TestValueInspector {
+
+  @Test
+  public void testLifeCycle() {
+    ValueInspector v = intIsEven();
+
+    // begins in unknown state
+    assertFalse(v.isKnown());
+    // calling getResult in unknown state throws
+    try {
+      v.getResult();
+      fail("this should throw");
+    } catch (IllegalStateException e) {
+      assertEquals("getResult() called on a ValueInspector whose result is not yet known!", e.getMessage());
+    }
+
+    // update state to known
+    v.update(10);
+
+    // v was updated with value 10, so result is known and should be true
+    assertTrue(v.isKnown());
+    assertTrue(v.getResult());
+
+    // calling update w/o resetting should throw
+    try {
+      v.update(11);
+      fail("this should throw");
+    } catch (IllegalStateException e) {
+      assertEquals("setResult() called on a ValueInspector whose result is already known!"
+          + " Did you forget to call reset()?", e.getMessage());
+    }
+
+    // back to unknown state
+    v.reset();
+
+    assertFalse(v.isKnown());
+    // calling getResult in unknown state throws
+    try {
+      v.getResult();
+      fail("this should throw");
+    } catch (IllegalStateException e) {
+      assertEquals("getResult() called on a ValueInspector whose result is not yet known!", e.getMessage());
+    }
+
+    // v was updated with value 11, so result is known and should be false
+    v.update(11);
+    assertTrue(v.isKnown());
+    assertFalse(v.getResult());
+
+  }
+
+  @Test
+  public void testReusable() {
+    List<Integer> values = Arrays.asList(2, 4, 7, 3, 8, 8, 11, 200);
+    ValueInspector v = intIsEven();
+
+    for (Integer x : values) {
+      v.update(x);
+      assertEquals(x % 2 == 0, v.getResult());
+      v.reset();
+    }
+
+  }
+}


[2/4] Add a unified and optionally more constrained API for expressing filters on columns

Posted by ju...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-column/src/test/java/parquet/io/TestFiltered.java
----------------------------------------------------------------------
diff --git a/parquet-column/src/test/java/parquet/io/TestFiltered.java b/parquet-column/src/test/java/parquet/io/TestFiltered.java
index 66fe6a0..0107b36 100644
--- a/parquet-column/src/test/java/parquet/io/TestFiltered.java
+++ b/parquet-column/src/test/java/parquet/io/TestFiltered.java
@@ -15,19 +15,6 @@
  */
 package parquet.io;
 
-import static org.junit.Assert.assertEquals;
-import static parquet.example.Paper.r1;
-import static parquet.example.Paper.r2;
-import static parquet.example.Paper.schema;
-import static parquet.filter.AndRecordFilter.and;
-import static parquet.filter.ColumnPredicates.applyFunctionToLong;
-import static parquet.filter.ColumnPredicates.applyFunctionToString;
-import static parquet.filter.ColumnPredicates.equalTo;
-import static parquet.filter.ColumnRecordFilter.column;
-import static parquet.filter.NotRecordFilter.not;
-import static parquet.filter.OrRecordFilter.or;
-import static parquet.filter.PagedRecordFilter.page;
-
 import java.util.ArrayList;
 import java.util.List;
 
@@ -41,8 +28,22 @@ import parquet.example.data.GroupWriter;
 import parquet.example.data.simple.convert.GroupRecordConverter;
 import parquet.filter.ColumnPredicates.LongPredicateFunction;
 import parquet.filter.ColumnPredicates.PredicateFunction;
+import parquet.filter2.compat.FilterCompat;
 import parquet.io.api.RecordMaterializer;
 
+import static org.junit.Assert.assertEquals;
+import static parquet.example.Paper.r1;
+import static parquet.example.Paper.r2;
+import static parquet.example.Paper.schema;
+import static parquet.filter.AndRecordFilter.and;
+import static parquet.filter.ColumnPredicates.applyFunctionToLong;
+import static parquet.filter.ColumnPredicates.applyFunctionToString;
+import static parquet.filter.ColumnPredicates.equalTo;
+import static parquet.filter.ColumnRecordFilter.column;
+import static parquet.filter.NotRecordFilter.not;
+import static parquet.filter.OrRecordFilter.or;
+import static parquet.filter.PagedRecordFilter.page;
+
 public class TestFiltered {
 
   /* Class that implements applyFunction filter for long. Checks for long greater than 15. */
@@ -84,15 +85,14 @@ public class TestFiltered {
     // Get first record
     RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
     RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
-        columnIO.getRecordReader(memPageStore, recordConverter,
-            column("DocId", equalTo(10l)));
+        columnIO.getRecordReader(memPageStore, recordConverter, FilterCompat.get(column("DocId", equalTo(10l))));
 
     readOne(recordReader, "r2 filtered out", r1);
 
     // Get second record
     recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            column("DocId", equalTo(20l)));
+            FilterCompat.get(column("DocId", equalTo(20l))));
 
     readOne(recordReader, "r1 filtered out", r2);
 
@@ -107,14 +107,14 @@ public class TestFiltered {
     RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
     RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            column("DocId", equalTo(10l)));
+            FilterCompat.get(column("DocId", equalTo(10l))));
 
     readOne(recordReader, "r2 filtered out", r1);
 
     // Get second record
     recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            column("DocId", applyFunctionToLong (new LongGreaterThan15Predicate())));
+            FilterCompat.get(column("DocId", applyFunctionToLong(new LongGreaterThan15Predicate()))));
 
     readOne(recordReader, "r1 filtered out", r2);
   }
@@ -128,7 +128,7 @@ public class TestFiltered {
     RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
     RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            column("Name.Url", equalTo("http://A")));
+            FilterCompat.get(column("Name.Url", equalTo("http://A"))));
 
     readOne(recordReader, "r2 filtered out", r1);
 
@@ -136,7 +136,7 @@ public class TestFiltered {
     // against the first instance of a
     recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            column("Name.Url", equalTo("http://B")));
+            FilterCompat.get(column("Name.Url", equalTo("http://B"))));
 
     List<Group> all = readAll(recordReader);
     assertEquals("There should be no matching records: " + all , 0, all.size());
@@ -144,7 +144,7 @@ public class TestFiltered {
     // Finally try matching against the C url in record 2
     recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            column("Name.Url", equalTo("http://C")));
+            FilterCompat.get(column("Name.Url", equalTo("http://C"))));
 
     readOne(recordReader, "r1 filtered out", r2);
 
@@ -159,7 +159,7 @@ public class TestFiltered {
     RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
     RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            column("Name.Url", applyFunctionToString (new StringEndsWithAPredicate ())));
+            FilterCompat.get(column("Name.Url", applyFunctionToString(new StringEndsWithAPredicate()))));
 
     readOne(recordReader, "r2 filtered out", r1);
 
@@ -167,7 +167,7 @@ public class TestFiltered {
     // against the first instance of a
     recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            column("Name.Url", equalTo("http://B")));
+            FilterCompat.get(column("Name.Url", equalTo("http://B"))));
 
     List<Group> all = readAll(recordReader);
     assertEquals("There should be no matching records: " + all , 0, all.size());
@@ -175,7 +175,7 @@ public class TestFiltered {
     // Finally try matching against the C url in record 2
     recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            column("Name.Url", equalTo("http://C")));
+            FilterCompat.get(column("Name.Url", equalTo("http://C"))));
 
     readOne(recordReader, "r1 filtered out", r2);
 
@@ -189,7 +189,7 @@ public class TestFiltered {
     RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
     RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-                                 page(4, 4));
+            FilterCompat.get(page(4, 4)));
 
     List<Group> all = readAll(recordReader);
     assertEquals("expecting records " + all, 4, all.size());
@@ -206,7 +206,7 @@ public class TestFiltered {
     RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
     RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            and(column("DocId", equalTo(10l)), page(2, 4)));
+            FilterCompat.get(and(column("DocId", equalTo(10l)), page(2, 4))));
 
     List<Group> all = readAll(recordReader);
     assertEquals("expecting 4 records " + all, 4, all.size());
@@ -224,8 +224,8 @@ public class TestFiltered {
     RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
     RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            or(column("DocId", equalTo(10l)),
-                column("DocId", equalTo(20l))));
+            FilterCompat.get(or(column("DocId", equalTo(10l)),
+                column("DocId", equalTo(20l)))));
 
     List<Group> all = readAll(recordReader);
     assertEquals("expecting 8 records " + all, 16, all.size());
@@ -243,7 +243,7 @@ public class TestFiltered {
     RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
     RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
         columnIO.getRecordReader(memPageStore, recordConverter,
-            not(column("DocId", equalTo(10l))));
+            FilterCompat.get(not(column("DocId", equalTo(10l)))));
 
     List<Group> all = readAll(recordReader);
     assertEquals("expecting 8 records " + all, 8, all.size());

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-common/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-common/pom.xml b/parquet-common/pom.xml
index 9abc8af..02abcad 100644
--- a/parquet-common/pom.xml
+++ b/parquet-common/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-common/src/main/java/parquet/Closeables.java
----------------------------------------------------------------------
diff --git a/parquet-common/src/main/java/parquet/Closeables.java b/parquet-common/src/main/java/parquet/Closeables.java
new file mode 100644
index 0000000..9d4c213
--- /dev/null
+++ b/parquet-common/src/main/java/parquet/Closeables.java
@@ -0,0 +1,37 @@
+package parquet;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+/**
+ * Utility for working with {@link java.io.Closeable}ss
+ */
+public final class Closeables {
+  private Closeables() { }
+
+  private static final Log LOG = Log.getLog(Closeables.class);
+
+  /**
+   * Closes a (potentially null) closeable.
+   * @param c can be null
+   * @throws IOException if c.close() throws an IOException.
+   */
+  public static void close(Closeable c) throws IOException {
+    if (c == null) { return; }
+    c.close();
+  }
+
+  /**
+   * Closes a (potentially null) closeable, swallowing any IOExceptions thrown by
+   * c.close(). The exception will be logged.
+   * @param c can be null
+   */
+  public static void closeAndSwallowIOExceptions(Closeable c) {
+    if (c == null) { return; }
+    try {
+      c.close();
+    } catch (IOException e) {
+      LOG.warn("Encountered exception closing closeable", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-common/src/main/java/parquet/common/internal/Canonicalizer.java
----------------------------------------------------------------------
diff --git a/parquet-common/src/main/java/parquet/common/internal/Canonicalizer.java b/parquet-common/src/main/java/parquet/common/internal/Canonicalizer.java
new file mode 100644
index 0000000..3cea532
--- /dev/null
+++ b/parquet-common/src/main/java/parquet/common/internal/Canonicalizer.java
@@ -0,0 +1,59 @@
+/**
+ * Copyright 2014 Twitter, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package parquet.common.internal;
+
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * returns canonical representation of objects (similar to String.intern()) to save memory
+ * if a.equals(b) then canonicalize(a) == canonicalize(b)
+ * this class is thread safe
+ * @author Julien Le Dem
+ *
+ * @param <T>
+ */
+public class Canonicalizer<T> {
+
+  private ConcurrentHashMap<T, T> canonicals = new ConcurrentHashMap<T, T>();
+
+  /**
+   * @param value the value to canonicalize
+   * @return the corresponding canonical value
+   */
+  final public T canonicalize(T value) {
+    T canonical = canonicals.get(value);
+    if (canonical == null) {
+      value = toCanonical(value);
+      T existing = canonicals.putIfAbsent(value, value);
+      // putIfAbsent is atomic, making sure we always return the same canonical representation of the value
+      if (existing == null) {
+        canonical = value;
+      } else {
+        canonical = existing;
+      }
+    }
+    return canonical;
+  }
+
+  /**
+   * @param value the value to canonicalize if needed
+   * @return the canonicalized value
+   */
+  protected T toCanonical(T value) {
+    return value;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-common/src/main/java/parquet/common/schema/ColumnPath.java
----------------------------------------------------------------------
diff --git a/parquet-common/src/main/java/parquet/common/schema/ColumnPath.java b/parquet-common/src/main/java/parquet/common/schema/ColumnPath.java
new file mode 100644
index 0000000..f3ded9c
--- /dev/null
+++ b/parquet-common/src/main/java/parquet/common/schema/ColumnPath.java
@@ -0,0 +1,96 @@
+/**
+ * Copyright 2012 Twitter, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package parquet.common.schema;
+
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Iterator;
+
+import parquet.common.internal.Canonicalizer;
+
+import static parquet.Preconditions.checkNotNull;
+
+public final class ColumnPath implements Iterable<String>, Serializable {
+
+  private static Canonicalizer<ColumnPath> paths = new Canonicalizer<ColumnPath>() {
+    @Override
+    protected ColumnPath toCanonical(ColumnPath value) {
+      String[] path = new String[value.p.length];
+      for (int i = 0; i < value.p.length; i++) {
+        path[i] = value.p[i].intern();
+      }
+      return new ColumnPath(path);
+    }
+  };
+
+  public static ColumnPath fromDotString(String path) {
+    checkNotNull(path, "path");
+    return get(path.split("\\."));
+  }
+
+  public static ColumnPath get(String... path){
+    return paths.canonicalize(new ColumnPath(path));
+  }
+
+  private final String[] p;
+
+  private ColumnPath(String[] path) {
+    this.p = path;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj instanceof ColumnPath) {
+      return Arrays.equals(p, ((ColumnPath)obj).p);
+    }
+    return false;
+  }
+
+  @Override
+  public int hashCode() {
+    return Arrays.hashCode(p);
+  }
+
+  public String toDotString() {
+    Iterator<String> iter = Arrays.asList(p).iterator();
+    StringBuilder sb = new StringBuilder();
+    while (iter.hasNext()) {
+      sb.append(iter.next());
+      if (iter.hasNext()) {
+        sb.append('.');
+      }
+    }
+    return sb.toString();
+  }
+
+  @Override
+  public String toString() {
+    return Arrays.toString(p);
+  }
+
+  @Override
+  public Iterator<String> iterator() {
+    return Arrays.asList(p).iterator();
+  }
+
+  public int size() {
+    return p.length;
+  }
+
+  public String[] toArray() {
+    return p;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-encoding/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-encoding/pom.xml b/parquet-encoding/pom.xml
index 6da2ea6..5840ca5 100644
--- a/parquet-encoding/pom.xml
+++ b/parquet-encoding/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-generator/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-generator/pom.xml b/parquet-generator/pom.xml
index 7afc41b..1ec4a50 100644
--- a/parquet-generator/pom.xml
+++ b/parquet-generator/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-generator/src/main/java/parquet/encoding/Generator.java
----------------------------------------------------------------------
diff --git a/parquet-generator/src/main/java/parquet/encoding/Generator.java b/parquet-generator/src/main/java/parquet/encoding/Generator.java
index 58d9382..b6aa596 100644
--- a/parquet-generator/src/main/java/parquet/encoding/Generator.java
+++ b/parquet-generator/src/main/java/parquet/encoding/Generator.java
@@ -19,7 +19,7 @@ import parquet.encoding.bitpacking.ByteBasedBitPackingGenerator;
 import parquet.encoding.bitpacking.IntBasedBitPackingGenerator;
 
 /**
- * main class for code generation hook in build
+ * main class for code generation hook in build for encodings generation
  *
  * @author Julien Le Dem
  *

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-generator/src/main/java/parquet/filter2/Generator.java
----------------------------------------------------------------------
diff --git a/parquet-generator/src/main/java/parquet/filter2/Generator.java b/parquet-generator/src/main/java/parquet/filter2/Generator.java
new file mode 100644
index 0000000..9818218
--- /dev/null
+++ b/parquet-generator/src/main/java/parquet/filter2/Generator.java
@@ -0,0 +1,10 @@
+package parquet.filter2;
+
+/**
+ * main class for code generation hook in build for filter2 generation
+ */
+public class Generator {
+  public static void main(String[] args) throws Exception {
+    IncrementallyUpdatedFilterPredicateGenerator.main(args);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-generator/src/main/java/parquet/filter2/IncrementallyUpdatedFilterPredicateGenerator.java
----------------------------------------------------------------------
diff --git a/parquet-generator/src/main/java/parquet/filter2/IncrementallyUpdatedFilterPredicateGenerator.java b/parquet-generator/src/main/java/parquet/filter2/IncrementallyUpdatedFilterPredicateGenerator.java
new file mode 100644
index 0000000..e0f08e4
--- /dev/null
+++ b/parquet-generator/src/main/java/parquet/filter2/IncrementallyUpdatedFilterPredicateGenerator.java
@@ -0,0 +1,251 @@
+package parquet.filter2;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+
+public class IncrementallyUpdatedFilterPredicateGenerator {
+
+  public static void main(String[] args) throws IOException {
+    File srcFile = new File(args[0] + "/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateBuilder.java");
+    srcFile = srcFile.getAbsoluteFile();
+    File parent = srcFile.getParentFile();
+    if (!parent.exists()) {
+      if (!parent.mkdirs()) {
+        throw new IOException("Couldn't mkdirs for " + parent);
+      }
+    }
+    new IncrementallyUpdatedFilterPredicateGenerator(srcFile).run();
+  }
+
+  private final FileWriter writer;
+
+  public IncrementallyUpdatedFilterPredicateGenerator(File file) throws IOException {
+    this.writer = new FileWriter(file);
+  }
+
+  private static class TypeInfo {
+    public final String className;
+    public final String primitiveName;
+    public final boolean useComparable;
+    public final boolean supportsInequality;
+
+    private TypeInfo(String className, String primitiveName, boolean useComparable, boolean supportsInequality) {
+      this.className = className;
+      this.primitiveName = primitiveName;
+      this.useComparable = useComparable;
+      this.supportsInequality = supportsInequality;
+    }
+  }
+
+  private static final TypeInfo[] TYPES = new TypeInfo[]{
+    new TypeInfo("Integer", "int", false, true),
+    new TypeInfo("Long", "long", false, true),
+    new TypeInfo("Boolean", "boolean", false, false),
+    new TypeInfo("Float", "float", false, true),
+    new TypeInfo("Double", "double", false, true),
+    new TypeInfo("Binary", "Binary", true, true),
+  };
+
+  public void run() throws IOException {
+    add("package parquet.filter2.recordlevel;\n" +
+        "\n" +
+        "import parquet.common.schema.ColumnPath;\n" +
+        "import parquet.filter2.predicate.Operators.Eq;\n" +
+        "import parquet.filter2.predicate.Operators.Gt;\n" +
+        "import parquet.filter2.predicate.Operators.GtEq;\n" +
+        "import parquet.filter2.predicate.Operators.LogicalNotUserDefined;\n" +
+        "import parquet.filter2.predicate.Operators.Lt;\n" +
+        "import parquet.filter2.predicate.Operators.LtEq;\n" +
+        "import parquet.filter2.predicate.Operators.NotEq;\n" +
+        "import parquet.filter2.predicate.Operators.UserDefined;\n" +
+        "import parquet.filter2.predicate.UserDefinedPredicate;\n" +
+        "import parquet.filter2.recordlevel.IncrementallyUpdatedFilterPredicate.ValueInspector;\n" +
+        "import parquet.io.api.Binary;\n\n" +
+        "/**\n" +
+        " * This class is auto-generated by {@link parquet.filter2.IncrementallyUpdatedFilterPredicateGenerator}\n" +
+        " * Do not manually edit!\n" +
+        " * See {@link IncrementallyUpdatedFilterPredicateBuilderBase}\n" +
+        " */\n");
+
+    add("public class IncrementallyUpdatedFilterPredicateBuilder extends IncrementallyUpdatedFilterPredicateBuilderBase {\n\n");
+
+    addVisitBegin("Eq");
+    for (TypeInfo info : TYPES) {
+      addEqNotEqCase(info, true);
+    }
+    addVisitEnd();
+
+    addVisitBegin("NotEq");
+    for (TypeInfo info : TYPES) {
+      addEqNotEqCase(info, false);
+    }
+    addVisitEnd();
+
+    addVisitBegin("Lt");
+    for (TypeInfo info : TYPES) {
+      addInequalityCase(info, "<");
+    }
+    addVisitEnd();
+
+    addVisitBegin("LtEq");
+    for (TypeInfo info : TYPES) {
+      addInequalityCase(info, "<=");
+    }
+    addVisitEnd();
+
+    addVisitBegin("Gt");
+    for (TypeInfo info : TYPES) {
+      addInequalityCase(info, ">");
+    }
+    addVisitEnd();
+
+    addVisitBegin("GtEq");
+    for (TypeInfo info : TYPES) {
+      addInequalityCase(info, ">=");
+    }
+    addVisitEnd();
+
+    add("  @Override\n" +
+        "  public <T extends Comparable<T>, U extends UserDefinedPredicate<T>> IncrementallyUpdatedFilterPredicate visit(UserDefined<T, U> pred) {\n");
+    addUdpBegin();
+    for (TypeInfo info : TYPES) {
+      addUdpCase(info, false);
+    }
+    addVisitEnd();
+
+    add("  @Override\n" +
+        "  public <T extends Comparable<T>, U extends UserDefinedPredicate<T>> IncrementallyUpdatedFilterPredicate visit(LogicalNotUserDefined<T, U> notPred) {\n" +
+        "    UserDefined<T, U> pred = notPred.getUserDefined();\n");
+    addUdpBegin();
+    for (TypeInfo info : TYPES) {
+      addUdpCase(info, true);
+    }
+    addVisitEnd();
+
+    add("}\n");
+    writer.close();
+  }
+
+  private void addVisitBegin(String inVar) throws IOException {
+    add("  @Override\n" +
+        "  public <T extends Comparable<T>> IncrementallyUpdatedFilterPredicate visit(" + inVar + "<T> pred) {\n" +
+        "    ColumnPath columnPath = pred.getColumn().getColumnPath();\n" +
+        "    Class<T> clazz = pred.getColumn().getColumnType();\n" +
+        "\n" +
+        "    ValueInspector valueInspector = null;\n\n");
+  }
+
+  private void addVisitEnd() throws IOException {
+    add("    if (valueInspector == null) {\n" +
+        "      throw new IllegalArgumentException(\"Encountered unknown type \" + clazz);\n" +
+        "    }\n" +
+        "\n" +
+        "    addValueInspector(columnPath, valueInspector);\n" +
+        "    return valueInspector;\n" +
+        "  }\n\n");
+  }
+
+  private void addEqNotEqCase(TypeInfo info, boolean isEq) throws IOException {
+    add("    if (clazz.equals(" + info.className + ".class)) {\n" +
+        "      if (pred.getValue() == null) {\n" +
+        "        valueInspector = new ValueInspector() {\n" +
+        "          @Override\n" +
+        "          public void updateNull() {\n" +
+        "            setResult(" + isEq + ");\n" +
+        "          }\n" +
+        "\n" +
+        "          @Override\n" +
+        "          public void update(" + info.primitiveName + " value) {\n" +
+        "            setResult(" + !isEq + ");\n" +
+        "          }\n" +
+        "        };\n" +
+        "      } else {\n" +
+        "        final " + info.primitiveName + " target = (" + info.className + ") (Object) pred.getValue();\n" +
+        "\n" +
+        "        valueInspector = new ValueInspector() {\n" +
+        "          @Override\n" +
+        "          public void updateNull() {\n" +
+        "            setResult(" + !isEq +");\n" +
+        "          }\n" +
+        "\n" +
+        "          @Override\n" +
+        "          public void update(" + info.primitiveName + " value) {\n");
+
+    if (info.useComparable) {
+      add("            setResult(" + compareEquality("value", "target", isEq) + ");\n");
+    } else {
+      add("            setResult(" + (isEq ? "value == target" : "value != target" )  + ");\n");
+    }
+
+    add("          }\n" +
+        "        };\n" +
+        "      }\n" +
+        "    }\n\n");
+  }
+
+  private void addInequalityCase(TypeInfo info, String op) throws IOException {
+    if (!info.supportsInequality) {
+      add("    if (clazz.equals(" + info.className + ".class)) {\n");
+      add("      throw new IllegalArgumentException(\"Operator " + op + " not supported for " + info.className + "\");\n");
+      add("    }\n\n");
+      return;
+    }
+
+    add("    if (clazz.equals(" + info.className + ".class)) {\n" +
+        "      final " + info.primitiveName + " target = (" + info.className + ") (Object) pred.getValue();\n" +
+        "\n" +
+        "      valueInspector = new ValueInspector() {\n" +
+        "        @Override\n" +
+        "        public void updateNull() {\n" +
+        "          setResult(false);\n" +
+        "        }\n" +
+        "\n" +
+        "        @Override\n" +
+        "        public void update(" + info.primitiveName + " value) {\n");
+
+    if (info.useComparable) {
+      add("          setResult(value.compareTo(target) " + op + " 0);\n");
+    } else {
+      add("          setResult(value " + op + " target);\n");
+    }
+    add("        }\n" +
+        "      };\n" +
+        "    }\n\n");
+  }
+
+  private void addUdpBegin() throws IOException {
+    add("    ColumnPath columnPath = pred.getColumn().getColumnPath();\n" +
+        "    Class<T> clazz = pred.getColumn().getColumnType();\n" +
+        "\n" +
+        "    ValueInspector valueInspector = null;\n" +
+        "\n" +
+        "    final U udp = pred.getUserDefinedPredicate();\n" +
+        "\n");
+  }
+
+  private void addUdpCase(TypeInfo info, boolean invert)throws IOException {
+    add("    if (clazz.equals(" + info.className + ".class)) {\n" +
+        "      valueInspector = new ValueInspector() {\n" +
+        "        @Override\n" +
+        "        public void updateNull() {\n" +
+        "          setResult(" + (invert ? "!" : "") + "udp.keep(null));\n" +
+        "        }\n" +
+        "\n" +
+        "        @SuppressWarnings(\"unchecked\")\n" +
+        "        @Override\n" +
+        "        public void update(" + info.primitiveName + " value) {\n" +
+        "          setResult(" + (invert ? "!" : "") + "udp.keep((T) (Object) value));\n" +
+        "        }\n" +
+        "      };\n" +
+        "    }\n\n");
+  }
+
+  private String compareEquality(String var, String target, boolean eq) {
+    return var + ".compareTo(" + target + ")" + (eq ? " == 0 " : " != 0");
+  }
+
+  private void add(String s) throws IOException {
+    writer.write(s);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop-bundle/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hadoop-bundle/pom.xml b/parquet-hadoop-bundle/pom.xml
index c3caa2f..ae89233 100644
--- a/parquet-hadoop-bundle/pom.xml
+++ b/parquet-hadoop-bundle/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/pom.xml
----------------------------------------------------------------------
diff --git a/parquet-hadoop/pom.xml b/parquet-hadoop/pom.xml
index c1b4f07..d074b0c 100644
--- a/parquet-hadoop/pom.xml
+++ b/parquet-hadoop/pom.xml
@@ -3,7 +3,7 @@
     <groupId>com.twitter</groupId>
     <artifactId>parquet</artifactId>
     <relativePath>../pom.xml</relativePath>
-    <version>1.5.1-SNAPSHOT</version>
+    <version>1.6.0-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>
@@ -78,9 +78,11 @@
 
   <build>
     <plugins>
+<!-- turned off temporarily, must be turned back on after 1.6.0 is released.
       <plugin>
         <artifactId>maven-enforcer-plugin</artifactId>
       </plugin>
+-->
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/filter2/compat/RowGroupFilter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/filter2/compat/RowGroupFilter.java b/parquet-hadoop/src/main/java/parquet/filter2/compat/RowGroupFilter.java
new file mode 100644
index 0000000..4da9821
--- /dev/null
+++ b/parquet-hadoop/src/main/java/parquet/filter2/compat/RowGroupFilter.java
@@ -0,0 +1,63 @@
+package parquet.filter2.compat;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import parquet.filter2.compat.FilterCompat.Filter;
+import parquet.filter2.compat.FilterCompat.NoOpFilter;
+import parquet.filter2.compat.FilterCompat.Visitor;
+import parquet.filter2.predicate.FilterPredicate;
+import parquet.filter2.predicate.SchemaCompatibilityValidator;
+import parquet.filter2.statisticslevel.StatisticsFilter;
+import parquet.hadoop.metadata.BlockMetaData;
+import parquet.schema.MessageType;
+
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * Given a {@link Filter} applies it to a list of BlockMetaData (row groups)
+ * If the Filter is an {@link parquet.filter.UnboundRecordFilter} or the no op filter,
+ * no filtering will be performed.
+ */
+public class RowGroupFilter implements Visitor<List<BlockMetaData>> {
+  private final List<BlockMetaData> blocks;
+  private final MessageType schema;
+
+  public static List<BlockMetaData> filterRowGroups(Filter filter, List<BlockMetaData> blocks, MessageType schema) {
+    checkNotNull(filter, "filter");
+    return filter.accept(new RowGroupFilter(blocks, schema));
+  }
+
+  private RowGroupFilter(List<BlockMetaData> blocks, MessageType schema) {
+    this.blocks = checkNotNull(blocks, "blocks");
+    this.schema = checkNotNull(schema, "schema");
+  }
+
+  @Override
+  public List<BlockMetaData> visit(FilterCompat.FilterPredicateCompat filterPredicateCompat) {
+    FilterPredicate filterPredicate = filterPredicateCompat.getFilterPredicate();
+
+    // check that the schema of the filter matches the schema of the file
+    SchemaCompatibilityValidator.validate(filterPredicate, schema);
+
+    List<BlockMetaData> filteredBlocks = new ArrayList<BlockMetaData>();
+
+    for (BlockMetaData block : blocks) {
+      if (!StatisticsFilter.canDrop(filterPredicate, block.getColumns())) {
+        filteredBlocks.add(block);
+      }
+    }
+
+    return filteredBlocks;
+  }
+
+  @Override
+  public List<BlockMetaData> visit(FilterCompat.UnboundRecordFilterCompat unboundRecordFilterCompat) {
+    return blocks;
+  }
+
+  @Override
+  public List<BlockMetaData> visit(NoOpFilter noOpFilter) {
+    return blocks;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/filter2/statisticslevel/StatisticsFilter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/filter2/statisticslevel/StatisticsFilter.java b/parquet-hadoop/src/main/java/parquet/filter2/statisticslevel/StatisticsFilter.java
new file mode 100644
index 0000000..4daed5a
--- /dev/null
+++ b/parquet-hadoop/src/main/java/parquet/filter2/statisticslevel/StatisticsFilter.java
@@ -0,0 +1,244 @@
+package parquet.filter2.statisticslevel;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import parquet.column.statistics.Statistics;
+import parquet.common.schema.ColumnPath;
+import parquet.filter2.predicate.FilterPredicate;
+import parquet.filter2.predicate.Operators.And;
+import parquet.filter2.predicate.Operators.Column;
+import parquet.filter2.predicate.Operators.Eq;
+import parquet.filter2.predicate.Operators.Gt;
+import parquet.filter2.predicate.Operators.GtEq;
+import parquet.filter2.predicate.Operators.LogicalNotUserDefined;
+import parquet.filter2.predicate.Operators.Lt;
+import parquet.filter2.predicate.Operators.LtEq;
+import parquet.filter2.predicate.Operators.Not;
+import parquet.filter2.predicate.Operators.NotEq;
+import parquet.filter2.predicate.Operators.Or;
+import parquet.filter2.predicate.Operators.UserDefined;
+import parquet.filter2.predicate.UserDefinedPredicate;
+import parquet.hadoop.metadata.ColumnChunkMetaData;
+
+import static parquet.Preconditions.checkArgument;
+import static parquet.Preconditions.checkNotNull;
+
+/**
+ * Applies a {@link parquet.filter2.predicate.FilterPredicate} to statistics about a group of
+ * records.
+ *
+ * Note: the supplied predicate must not contain any instances of the not() operator as this is not
+ * supported by this filter.
+ *
+ * the supplied predicate should first be run through {@link parquet.filter2.predicate.LogicalInverseRewriter} to rewrite it
+ * in a form that doesn't make use of the not() operator.
+ *
+ * the supplied predicate should also have already been run through
+ * {@link parquet.filter2.predicate.SchemaCompatibilityValidator}
+ * to make sure it is compatible with the schema of this file.
+ *
+ * Returns true if all the records represented by the statistics in the provided column metadata can be dropped.
+ *         false otherwise (including when it is not known, which is often the case).
+ */
+// TODO: this belongs in the parquet-column project, but some of the classes here need to be moved too
+// TODO: (https://issues.apache.org/jira/browse/PARQUET-38)
+public class StatisticsFilter implements FilterPredicate.Visitor<Boolean> {
+
+  public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) {
+    checkNotNull(pred, "pred");
+    checkNotNull(columns, "columns");
+    return pred.accept(new StatisticsFilter(columns));
+  }
+
+  private final Map<ColumnPath, ColumnChunkMetaData> columns = new HashMap<ColumnPath, ColumnChunkMetaData>();
+
+  private StatisticsFilter(List<ColumnChunkMetaData> columnsList) {
+    for (ColumnChunkMetaData chunk : columnsList) {
+      columns.put(chunk.getPath(), chunk);
+    }
+  }
+
+  private ColumnChunkMetaData getColumnChunk(ColumnPath columnPath) {
+    ColumnChunkMetaData c = columns.get(columnPath);
+    checkArgument(c != null, "Column " + columnPath.toDotString() + " not found in schema!");
+    return c;
+  }
+
+  // is this column chunk composed entirely of nulls?
+  private boolean isAllNulls(ColumnChunkMetaData column) {
+    return column.getStatistics().getNumNulls() == column.getValueCount();
+  }
+
+  // are there any nulls in this column chunk?
+  private boolean hasNulls(ColumnChunkMetaData column) {
+    return column.getStatistics().getNumNulls() > 0;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Boolean visit(Eq<T> eq) {
+    Column<T> filterColumn = eq.getColumn();
+    T value = eq.getValue();
+    ColumnChunkMetaData columnChunk = getColumnChunk(filterColumn.getColumnPath());
+
+    if (value == null) {
+      // we are looking for records where v eq(null)
+      // so drop if there are no nulls in this chunk
+      return !hasNulls(columnChunk);
+    }
+
+    if (isAllNulls(columnChunk)) {
+      // we are looking for records where v eq(someNonNull)
+      // and this is a column of all nulls, so drop it
+      return true;
+    }
+
+    Statistics<T> stats = columnChunk.getStatistics();
+
+    // drop if value < min || value > max
+    return value.compareTo(stats.genericGetMin()) < 0 || value.compareTo(stats.genericGetMax()) > 0;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Boolean visit(NotEq<T> notEq) {
+    Column<T> filterColumn = notEq.getColumn();
+    T value = notEq.getValue();
+    ColumnChunkMetaData columnChunk = getColumnChunk(filterColumn.getColumnPath());
+
+    if (value == null) {
+      // we are looking for records where v notEq(null)
+      // so, if this is a column of all nulls, we can drop it
+      return isAllNulls(columnChunk);
+    }
+
+    if (hasNulls(columnChunk)) {
+      // we are looking for records where v notEq(someNonNull)
+      // but this chunk contains nulls, we cannot drop it
+      return false;
+    }
+
+    Statistics<T> stats = columnChunk.getStatistics();
+
+    // drop if this is a column where min = max = value
+    return value.compareTo(stats.genericGetMin()) == 0 && value.compareTo(stats.genericGetMax()) == 0;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Boolean visit(Lt<T> lt) {
+    Column<T> filterColumn = lt.getColumn();
+    T value = lt.getValue();
+    ColumnChunkMetaData columnChunk = getColumnChunk(filterColumn.getColumnPath());
+
+    if (isAllNulls(columnChunk)) {
+      // we are looking for records where v < someValue
+      // this chunk is all nulls, so we can drop it
+      return true;
+    }
+
+    Statistics<T> stats = columnChunk.getStatistics();
+
+    // drop if value <= min
+    return  value.compareTo(stats.genericGetMin()) <= 0;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Boolean visit(LtEq<T> ltEq) {
+    Column<T> filterColumn = ltEq.getColumn();
+    T value = ltEq.getValue();
+    ColumnChunkMetaData columnChunk = getColumnChunk(filterColumn.getColumnPath());
+
+    if (isAllNulls(columnChunk)) {
+      // we are looking for records where v <= someValue
+      // this chunk is all nulls, so we can drop it
+      return true;
+    }
+
+    Statistics<T> stats = columnChunk.getStatistics();
+
+    // drop if value < min
+    return value.compareTo(stats.genericGetMin()) < 0;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Boolean visit(Gt<T> gt) {
+    Column<T> filterColumn = gt.getColumn();
+    T value = gt.getValue();
+    ColumnChunkMetaData columnChunk = getColumnChunk(filterColumn.getColumnPath());
+
+    if (isAllNulls(columnChunk)) {
+      // we are looking for records where v > someValue
+      // this chunk is all nulls, so we can drop it
+      return true;
+    }
+
+    Statistics<T> stats = columnChunk.getStatistics();
+
+    // drop if value >= max
+    return value.compareTo(stats.genericGetMax()) >= 0;
+  }
+
+  @Override
+  public <T extends Comparable<T>> Boolean visit(GtEq<T> gtEq) {
+    Column<T> filterColumn = gtEq.getColumn();
+    T value = gtEq.getValue();
+    ColumnChunkMetaData columnChunk = getColumnChunk(filterColumn.getColumnPath());
+
+    if (isAllNulls(columnChunk)) {
+      // we are looking for records where v >= someValue
+      // this chunk is all nulls, so we can drop it
+      return true;
+    }
+
+    Statistics<T> stats = columnChunk.getStatistics();
+
+    // drop if value >= max
+    return value.compareTo(stats.genericGetMax()) > 0;
+  }
+
+  @Override
+  public Boolean visit(And and) {
+    return and.getLeft().accept(this) && and.getRight().accept(this);
+  }
+
+  @Override
+  public Boolean visit(Or or) {
+    // seems unintuitive to put an && not an || here
+    // but we can only drop a chunk of records if we know that
+    // both the left and right predicates agree that no matter what
+    // we don't need this chunk.
+    return or.getLeft().accept(this) && or.getRight().accept(this);
+  }
+
+  @Override
+  public Boolean visit(Not not) {
+    throw new IllegalArgumentException(
+        "This predicate contains a not! Did you forget to run this predicate through LogicalInverseRewriter? " + not);
+  }
+
+  private <T extends Comparable<T>, U extends UserDefinedPredicate<T>> Boolean visit(UserDefined<T, U> ud, boolean inverted) {
+    Column<T> filterColumn = ud.getColumn();
+    ColumnChunkMetaData columnChunk = getColumnChunk(filterColumn.getColumnPath());
+    U udp = ud.getUserDefinedPredicate();
+    Statistics<T> stats = columnChunk.getStatistics();
+    parquet.filter2.predicate.Statistics<T> udpStats =
+        new parquet.filter2.predicate.Statistics<T>(stats.genericGetMin(), stats.genericGetMax());
+
+    if (inverted) {
+      return udp.inverseCanDrop(udpStats);
+    } else {
+      return udp.canDrop(udpStats);
+    }
+  }
+
+  @Override
+  public <T extends Comparable<T>, U extends UserDefinedPredicate<T>> Boolean visit(UserDefined<T, U> ud) {
+    return visit(ud, false);
+  }
+
+  @Override
+  public <T extends Comparable<T>, U extends UserDefinedPredicate<T>> Boolean visit(LogicalNotUserDefined<T, U> lnud) {
+    return visit(lnud.getUserDefined(), true);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/format/converter/ParquetMetadataConverter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/format/converter/ParquetMetadataConverter.java b/parquet-hadoop/src/main/java/parquet/format/converter/ParquetMetadataConverter.java
index 81a39b8..5bd6869 100644
--- a/parquet-hadoop/src/main/java/parquet/format/converter/ParquetMetadataConverter.java
+++ b/parquet-hadoop/src/main/java/parquet/format/converter/ParquetMetadataConverter.java
@@ -15,9 +15,6 @@
  */
 package parquet.format.converter;
 
-import static parquet.format.Util.readFileMetaData;
-import static parquet.format.Util.writePageHeader;
-
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -33,8 +30,9 @@ import java.util.Map.Entry;
 import java.util.Set;
 
 import parquet.Log;
-import parquet.format.ConvertedType;
+import parquet.common.schema.ColumnPath;
 import parquet.format.ColumnChunk;
+import parquet.format.ConvertedType;
 import parquet.format.DataPageHeader;
 import parquet.format.DictionaryPageHeader;
 import parquet.format.Encoding;
@@ -49,11 +47,9 @@ import parquet.format.Statistics;
 import parquet.format.Type;
 import parquet.hadoop.metadata.BlockMetaData;
 import parquet.hadoop.metadata.ColumnChunkMetaData;
-import parquet.hadoop.metadata.ColumnPath;
 import parquet.hadoop.metadata.CompressionCodecName;
 import parquet.hadoop.metadata.ParquetMetadata;
 import parquet.io.ParquetDecodingException;
-import parquet.schema.Types;
 import parquet.schema.GroupType;
 import parquet.schema.MessageType;
 import parquet.schema.OriginalType;
@@ -61,6 +57,10 @@ import parquet.schema.PrimitiveType;
 import parquet.schema.PrimitiveType.PrimitiveTypeName;
 import parquet.schema.Type.Repetition;
 import parquet.schema.TypeVisitor;
+import parquet.schema.Types;
+
+import static parquet.format.Util.readFileMetaData;
+import static parquet.format.Util.writePageHeader;
 
 public class ParquetMetadataConverter {
   private static final Log LOG = Log.getLog(ParquetMetadataConverter.class);

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/InternalParquetRecordReader.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/InternalParquetRecordReader.java b/parquet-hadoop/src/main/java/parquet/hadoop/InternalParquetRecordReader.java
index f3aa81f..5a9b019 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/InternalParquetRecordReader.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/InternalParquetRecordReader.java
@@ -18,12 +18,16 @@ package parquet.hadoop;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+
 import parquet.Log;
 import parquet.column.ColumnDescriptor;
 import parquet.column.page.PageReadStore;
 import parquet.filter.UnboundRecordFilter;
+import parquet.filter2.compat.FilterCompat;
+import parquet.filter2.compat.FilterCompat.Filter;
 import parquet.hadoop.api.ReadSupport;
 import parquet.hadoop.metadata.BlockMetaData;
 import parquet.hadoop.util.counters.BenchmarkCounter;
@@ -37,12 +41,14 @@ import parquet.schema.Type;
 
 import static java.lang.String.format;
 import static parquet.Log.DEBUG;
+import static parquet.Preconditions.checkNotNull;
 import static parquet.hadoop.ParquetInputFormat.STRICT_TYPE_CHECKING;
 
 class InternalParquetRecordReader<T> {
   private static final Log LOG = Log.getLog(InternalParquetRecordReader.class);
 
   private final ColumnIOFactory columnIOFactory = new ColumnIOFactory();
+  private final Filter filter;
 
   private MessageType requestedSchema;
   private MessageType fileSchema;
@@ -57,7 +63,6 @@ class InternalParquetRecordReader<T> {
   private int currentBlock = -1;
   private ParquetFileReader reader;
   private parquet.io.RecordReader<T> recordReader;
-  private UnboundRecordFilter recordFilter;
   private boolean strictTypeChecking;
 
   private long totalTimeSpentReadingBytes;
@@ -70,19 +75,28 @@ class InternalParquetRecordReader<T> {
 
   /**
    * @param readSupport Object which helps reads files of the given type, e.g. Thrift, Avro.
+   * @param filter for filtering individual records
+   */
+  public InternalParquetRecordReader(ReadSupport<T> readSupport, Filter filter) {
+    this.readSupport = readSupport;
+    this.filter = checkNotNull(filter, "filter");
+  }
+
+  /**
+   * @param readSupport Object which helps reads files of the given type, e.g. Thrift, Avro.
    */
   public InternalParquetRecordReader(ReadSupport<T> readSupport) {
-    this(readSupport, null);
+    this(readSupport, FilterCompat.NOOP);
   }
 
   /**
    * @param readSupport Object which helps reads files of the given type, e.g. Thrift, Avro.
    * @param filter Optional filter for only returning matching records.
+   * @deprecated use {@link #InternalParquetRecordReader(ReadSupport, Filter)}
    */
-  public InternalParquetRecordReader(ReadSupport<T> readSupport, UnboundRecordFilter
-      filter) {
-    this.readSupport = readSupport;
-    this.recordFilter = filter;
+  @Deprecated
+  public InternalParquetRecordReader(ReadSupport<T> readSupport, UnboundRecordFilter filter) {
+    this(readSupport, FilterCompat.get(filter));
   }
 
   private void checkRead() throws IOException {
@@ -109,7 +123,7 @@ class InternalParquetRecordReader<T> {
       LOG.info("block read in memory in " + timeSpentReading + " ms. row count = " + pages.getRowCount());
       if (Log.DEBUG) LOG.debug("initializing Record assembly with requested schema " + requestedSchema);
       MessageColumnIO columnIO = columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
-      recordReader = columnIO.getRecordReader(pages, recordConverter, recordFilter);
+      recordReader = columnIO.getRecordReader(pages, recordConverter, filter);
       startedAssemblingCurrentBlockAt = System.currentTimeMillis();
       totalCountLoadedSoFar += pages.getRowCount();
       ++ currentBlock;
@@ -169,27 +183,36 @@ class InternalParquetRecordReader<T> {
   }
 
   public boolean nextKeyValue() throws IOException, InterruptedException {
-    if (current < total) {
+    boolean recordFound = false;
+
+    while (!recordFound) {
+      // no more records left
+      if (current >= total) { return false; }
+
       try {
         checkRead();
         currentValue = recordReader.read();
         current ++;
-        while (currentValue == null) { // only happens with FilteredRecordReader at end of block
+        if (recordReader.shouldSkipCurrentRecord()) {
+          // this record is being filtered via the filter2 package
+          if (DEBUG) LOG.debug("skipping record");
+          continue;
+        }
+
+        if (currentValue == null) {
+          // only happens with FilteredRecordReader at end of block
           current = totalCountLoadedSoFar;
-          if (current < total) {
-            checkRead();
-            currentValue = recordReader.read();
-            current ++;
-            continue;
-          }
-          return false;
+          if (DEBUG) LOG.debug("filtered record reader reached end of block");
+          continue;
         }
+
+        recordFound = true;
+
         if (DEBUG) LOG.debug("read value: " + currentValue);
       } catch (RuntimeException e) {
         throw new ParquetDecodingException(format("Can not read value at %d in block %d in file %s", current, currentBlock, file), e);
       }
-      return true;
     }
-    return false;
+    return true;
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileReader.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileReader.java b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileReader.java
index 2a2f054..e660c9f 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileReader.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileReader.java
@@ -15,11 +15,6 @@
  */
 package parquet.hadoop;
 
-import static parquet.Log.DEBUG;
-import static parquet.bytes.BytesUtils.readIntLittleEndian;
-import static parquet.hadoop.ParquetFileWriter.MAGIC;
-import static parquet.hadoop.ParquetFileWriter.PARQUET_METADATA_FILE;
-
 import java.io.ByteArrayInputStream;
 import java.io.Closeable;
 import java.io.IOException;
@@ -52,6 +47,7 @@ import parquet.column.ColumnDescriptor;
 import parquet.column.page.DictionaryPage;
 import parquet.column.page.Page;
 import parquet.column.page.PageReadStore;
+import parquet.common.schema.ColumnPath;
 import parquet.format.PageHeader;
 import parquet.format.Util;
 import parquet.format.converter.ParquetMetadataConverter;
@@ -59,11 +55,15 @@ import parquet.hadoop.CodecFactory.BytesDecompressor;
 import parquet.hadoop.ColumnChunkPageReadStore.ColumnChunkPageReader;
 import parquet.hadoop.metadata.BlockMetaData;
 import parquet.hadoop.metadata.ColumnChunkMetaData;
-import parquet.hadoop.metadata.ColumnPath;
 import parquet.hadoop.metadata.ParquetMetadata;
 import parquet.hadoop.util.counters.BenchmarkCounter;
 import parquet.io.ParquetDecodingException;
 
+import static parquet.Log.DEBUG;
+import static parquet.bytes.BytesUtils.readIntLittleEndian;
+import static parquet.hadoop.ParquetFileWriter.MAGIC;
+import static parquet.hadoop.ParquetFileWriter.PARQUET_METADATA_FILE;
+
 /**
  * Internal implementation of the Parquet file reader as a block container
  *

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileWriter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileWriter.java b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileWriter.java
index ff29179..f3ef61b 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileWriter.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetFileWriter.java
@@ -15,9 +15,6 @@
  */
 package parquet.hadoop;
 
-import static parquet.Log.DEBUG;
-import static parquet.format.Util.writeFileMetaData;
-
 import java.io.IOException;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
@@ -40,10 +37,10 @@ import parquet.bytes.BytesUtils;
 import parquet.column.ColumnDescriptor;
 import parquet.column.page.DictionaryPage;
 import parquet.column.statistics.Statistics;
+import parquet.common.schema.ColumnPath;
 import parquet.format.converter.ParquetMetadataConverter;
 import parquet.hadoop.metadata.BlockMetaData;
 import parquet.hadoop.metadata.ColumnChunkMetaData;
-import parquet.hadoop.metadata.ColumnPath;
 import parquet.hadoop.metadata.CompressionCodecName;
 import parquet.hadoop.metadata.FileMetaData;
 import parquet.hadoop.metadata.GlobalMetaData;
@@ -52,6 +49,9 @@ import parquet.io.ParquetEncodingException;
 import parquet.schema.MessageType;
 import parquet.schema.PrimitiveType.PrimitiveTypeName;
 
+import static parquet.Log.DEBUG;
+import static parquet.format.Util.writeFileMetaData;
+
 /**
  * Internal implementation of the Parquet file writer as a block container
  *

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputFormat.java b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputFormat.java
index 882d2f7..0231ccd 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputFormat.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputFormat.java
@@ -43,6 +43,10 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 
 import parquet.Log;
 import parquet.filter.UnboundRecordFilter;
+import parquet.filter2.compat.FilterCompat;
+import parquet.filter2.compat.FilterCompat.Filter;
+import parquet.filter2.compat.RowGroupFilter;
+import parquet.filter2.predicate.FilterPredicate;
 import parquet.hadoop.api.InitContext;
 import parquet.hadoop.api.ReadSupport;
 import parquet.hadoop.api.ReadSupport.ReadContext;
@@ -53,10 +57,13 @@ import parquet.hadoop.metadata.GlobalMetaData;
 import parquet.hadoop.metadata.ParquetMetadata;
 import parquet.hadoop.util.ConfigurationUtil;
 import parquet.hadoop.util.ContextUtil;
+import parquet.hadoop.util.SerializationUtil;
 import parquet.io.ParquetDecodingException;
 import parquet.schema.MessageType;
 import parquet.schema.MessageTypeParser;
 
+import static parquet.Preconditions.checkArgument;
+
 /**
  * The input format to read a Parquet file.
  *
@@ -88,6 +95,11 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
    */
   public static final String STRICT_TYPE_CHECKING = "parquet.strict.typing";
 
+  /**
+   * key to configure the filter predicate
+   */
+  public static final String FILTER_PREDICATE = "parquet.private.read.filter.predicate";
+
   private static final int MIN_FOOTER_CACHE_SIZE = 100;
 
   private LruCache<FileStatusWrapper, FootersCacheValue> footersCache;
@@ -99,13 +111,40 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
   }
 
   public static void setUnboundRecordFilter(Job job, Class<? extends UnboundRecordFilter> filterClass) {
-    ContextUtil.getConfiguration(job).set(UNBOUND_RECORD_FILTER, filterClass.getName());
+    Configuration conf = ContextUtil.getConfiguration(job);
+    checkArgument(getFilterPredicate(conf) == null,
+        "You cannot provide an UnboundRecordFilter after providing a FilterPredicate");
+
+    conf.set(UNBOUND_RECORD_FILTER, filterClass.getName());
   }
 
+  /**
+   * @deprecated use {@link #getFilter(Configuration)}
+   */
+  @Deprecated
   public static Class<?> getUnboundRecordFilter(Configuration configuration) {
     return ConfigurationUtil.getClassFromConfig(configuration, UNBOUND_RECORD_FILTER, UnboundRecordFilter.class);
   }
 
+  private static UnboundRecordFilter getUnboundRecordFilterInstance(Configuration configuration) {
+    Class<?> clazz = ConfigurationUtil.getClassFromConfig(configuration, UNBOUND_RECORD_FILTER, UnboundRecordFilter.class);
+    if (clazz == null) { return null; }
+
+    try {
+      UnboundRecordFilter unboundRecordFilter = (UnboundRecordFilter) clazz.newInstance();
+
+      if (unboundRecordFilter instanceof Configurable) {
+        ((Configurable)unboundRecordFilter).setConf(configuration);
+      }
+
+      return unboundRecordFilter;
+    } catch (InstantiationException e) {
+      throw new BadConfigurationException("could not instantiate unbound record filter class", e);
+    } catch (IllegalAccessException e) {
+      throw new BadConfigurationException("could not instantiate unbound record filter class", e);
+    }
+  }
+
   public static void setReadSupportClass(JobConf conf, Class<?> readSupportClass) {
     conf.set(READ_SUPPORT_CLASS, readSupportClass.getName());
   }
@@ -114,6 +153,34 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
     return ConfigurationUtil.getClassFromConfig(configuration, READ_SUPPORT_CLASS, ReadSupport.class);
   }
 
+  public static void setFilterPredicate(Configuration configuration, FilterPredicate filterPredicate) {
+    checkArgument(getUnboundRecordFilter(configuration) == null,
+        "You cannot provide a FilterPredicate after providing an UnboundRecordFilter");
+
+    configuration.set(FILTER_PREDICATE + ".human.readable", filterPredicate.toString());
+    try {
+      SerializationUtil.writeObjectToConfAsBase64(FILTER_PREDICATE, filterPredicate, configuration);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private static FilterPredicate getFilterPredicate(Configuration configuration) {
+    try {
+      return SerializationUtil.readObjectFromConfAsBase64(FILTER_PREDICATE, configuration);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Returns a non-null Filter, which is a wrapper around either a
+   * FilterPredicate, an UnboundRecordFilter, or a no-op filter.
+   */
+  public static Filter getFilter(Configuration conf) {
+    return FilterCompat.get(getFilterPredicate(conf), getUnboundRecordFilterInstance(conf));
+  }
+
   /**
    * Hadoop will instantiate using this constructor
    */
@@ -135,24 +202,12 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
   public RecordReader<Void, T> createRecordReader(
       InputSplit inputSplit,
       TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
+
+    ReadSupport<T> readSupport = getReadSupport(ContextUtil.getConfiguration(taskAttemptContext));
+
     Configuration conf = ContextUtil.getConfiguration(taskAttemptContext);
-    ReadSupport<T> readSupport = getReadSupport(conf);
-    Class<?> unboundRecordFilterClass = getUnboundRecordFilter(conf);
-    if (unboundRecordFilterClass == null) {
-      return new ParquetRecordReader<T>(readSupport);
-    } else {
-      try {
-        UnboundRecordFilter filter = (UnboundRecordFilter)unboundRecordFilterClass.newInstance();
-        if (filter instanceof Configurable) {
-          ((Configurable)filter).setConf(conf);
-        }
-        return new ParquetRecordReader<T>(readSupport, filter);
-      } catch (InstantiationException e) {
-        throw new BadConfigurationException("could not instantiate unbound record filter class", e);
-      } catch (IllegalAccessException e) {
-        throw new BadConfigurationException("could not instantiate unbound record filter class", e);
-      }
-    }
+
+    return new ParquetRecordReader<T>(readSupport, getFilter(conf));
   }
 
   /**
@@ -381,6 +436,12 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
         configuration,
         globalMetaData.getKeyValueMetaData(),
         globalMetaData.getSchema()));
+
+    Filter filter = getFilter(configuration);
+
+    long rowGroupsDropped = 0;
+    long totalRowGroups = 0;
+
     for (Footer footer : footers) {
       final Path file = footer.getFile();
       LOG.debug(file);
@@ -388,10 +449,21 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
       FileStatus fileStatus = fs.getFileStatus(file);
       ParquetMetadata parquetMetaData = footer.getParquetMetadata();
       List<BlockMetaData> blocks = parquetMetaData.getBlocks();
+
+      List<BlockMetaData> filteredBlocks = blocks;
+
+      totalRowGroups += blocks.size();
+      filteredBlocks = RowGroupFilter.filterRowGroups(filter, blocks, parquetMetaData.getFileMetaData().getSchema());
+      rowGroupsDropped += blocks.size() - filteredBlocks.size();
+
+      if (filteredBlocks.isEmpty()) {
+        continue;
+      }
+
       BlockLocation[] fileBlockLocations = fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
       splits.addAll(
           generateSplits(
-              blocks,
+              filteredBlocks,
               fileBlockLocations,
               fileStatus,
               parquetMetaData.getFileMetaData(),
@@ -401,6 +473,14 @@ public class ParquetInputFormat<T> extends FileInputFormat<Void, T> {
               maxSplitSize)
           );
     }
+
+    if (rowGroupsDropped > 0 && totalRowGroups > 0) {
+      int percentDropped = (int) ((((double) rowGroupsDropped) / totalRowGroups) * 100);
+      LOG.info("Dropping " + rowGroupsDropped + " row groups that do not pass filter predicate! (" + percentDropped + "%)");
+    } else {
+      LOG.info("There were no row groups that could be dropped due to filter predicates");
+    }
+
     return splits;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputSplit.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputSplit.java b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputSplit.java
index 9e9b4ff..da0c2ec 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputSplit.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetInputSplit.java
@@ -40,9 +40,9 @@ import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 import parquet.Log;
 import parquet.column.Encoding;
 import parquet.column.statistics.IntStatistics;
+import parquet.common.schema.ColumnPath;
 import parquet.hadoop.metadata.BlockMetaData;
 import parquet.hadoop.metadata.ColumnChunkMetaData;
-import parquet.hadoop.metadata.ColumnPath;
 import parquet.hadoop.metadata.CompressionCodecName;
 import parquet.schema.PrimitiveType.PrimitiveTypeName;
 

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/ParquetReader.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetReader.java b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetReader.java
index 3e85331..c56a402 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetReader.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetReader.java
@@ -17,7 +17,6 @@ package parquet.hadoop;
 
 import java.io.Closeable;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
@@ -30,6 +29,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
 import parquet.filter.UnboundRecordFilter;
+import parquet.filter2.compat.FilterCompat;
+import parquet.filter2.compat.FilterCompat.Filter;
+import parquet.filter2.compat.RowGroupFilter;
 import parquet.hadoop.api.InitContext;
 import parquet.hadoop.api.ReadSupport;
 import parquet.hadoop.api.ReadSupport.ReadContext;
@@ -37,26 +39,32 @@ import parquet.hadoop.metadata.BlockMetaData;
 import parquet.hadoop.metadata.GlobalMetaData;
 import parquet.schema.MessageType;
 
+import static parquet.Preconditions.checkNotNull;
+
 /**
  * Read records from a Parquet file.
+ * TODO: too many constructors (https://issues.apache.org/jira/browse/PARQUET-39)
  */
 public class ParquetReader<T> implements Closeable {
 
-  private ReadSupport<T> readSupport;
-  private UnboundRecordFilter filter;
-  private Configuration conf;
-  private ReadContext readContext;
-  private Iterator<Footer> footersIterator;
+  private final ReadSupport<T> readSupport;
+  private final Configuration conf;
+  private final ReadContext readContext;
+  private final Iterator<Footer> footersIterator;
+  private final GlobalMetaData globalMetaData;
+  private final Filter filter;
+
   private InternalParquetRecordReader<T> reader;
-  private GlobalMetaData globalMetaData;
 
   /**
    * @param file the file to read
    * @param readSupport to materialize records
    * @throws IOException
+   * @deprecated use {@link #builder(ReadSupport, Path)}
    */
+  @Deprecated
   public ParquetReader(Path file, ReadSupport<T> readSupport) throws IOException {
-    this(file, readSupport, null);
+    this(new Configuration(), file, readSupport, FilterCompat.NOOP);
   }
 
   /**
@@ -64,31 +72,44 @@ public class ParquetReader<T> implements Closeable {
    * @param file the file to read
    * @param readSupport to materialize records
    * @throws IOException
+   * @deprecated use {@link #builder(ReadSupport, Path)}
    */
+  @Deprecated
   public ParquetReader(Configuration conf, Path file, ReadSupport<T> readSupport) throws IOException {
-    this(conf, file, readSupport, null);
+    this(conf, file, readSupport, FilterCompat.NOOP);
   }
 
   /**
    * @param file the file to read
    * @param readSupport to materialize records
-   * @param filter the filter to use to filter records
+   * @param unboundRecordFilter the filter to use to filter records
    * @throws IOException
+   * @deprecated use {@link #builder(ReadSupport, Path)}
    */
-  public ParquetReader(Path file, ReadSupport<T> readSupport, UnboundRecordFilter filter) throws IOException {
-    this(new Configuration(), file, readSupport, filter);
+  @Deprecated
+  public ParquetReader(Path file, ReadSupport<T> readSupport, UnboundRecordFilter unboundRecordFilter) throws IOException {
+    this(new Configuration(), file, readSupport, FilterCompat.get(unboundRecordFilter));
   }
 
   /**
    * @param conf the configuration
    * @param file the file to read
    * @param readSupport to materialize records
-   * @param filter the filter to use to filter records
+   * @param unboundRecordFilter the filter to use to filter records
    * @throws IOException
+   * @deprecated use {@link #builder(ReadSupport, Path)}
    */
-  public ParquetReader(Configuration conf, Path file, ReadSupport<T> readSupport, UnboundRecordFilter filter) throws IOException {
+  @Deprecated
+  public ParquetReader(Configuration conf, Path file, ReadSupport<T> readSupport, UnboundRecordFilter unboundRecordFilter) throws IOException {
+    this(conf, file, readSupport, FilterCompat.get(unboundRecordFilter));
+  }
+
+  private ParquetReader(Configuration conf,
+                       Path file,
+                       ReadSupport<T> readSupport,
+                       Filter filter) throws IOException {
     this.readSupport = readSupport;
-    this.filter = filter;
+    this.filter = checkNotNull(filter, "filter");
     this.conf = conf;
 
     FileSystem fs = file.getFileSystem(conf);
@@ -96,12 +117,6 @@ public class ParquetReader<T> implements Closeable {
     List<Footer> footers = ParquetFileReader.readAllFootersInParallelUsingSummaryFiles(conf, statuses);
     this.footersIterator = footers.iterator();
     globalMetaData = ParquetFileWriter.getGlobalMetaData(footers);
-
-    List<BlockMetaData> blocks = new ArrayList<BlockMetaData>();
-    for (Footer footer : footers) {
-      blocks.addAll(footer.getParquetMetadata().getBlocks());
-    }
-
     MessageType schema = globalMetaData.getSchema();
     Map<String, Set<String>> extraMetadata = globalMetaData.getKeyValueMetaData();
     readContext = readSupport.init(new InitContext(conf, extraMetadata, schema));
@@ -131,10 +146,15 @@ public class ParquetReader<T> implements Closeable {
     }
     if (footersIterator.hasNext()) {
       Footer footer = footersIterator.next();
+
+      List<BlockMetaData> blocks = footer.getParquetMetadata().getBlocks();
+
+      List<BlockMetaData> filteredBlocks = RowGroupFilter.filterRowGroups(filter, blocks, footer.getParquetMetadata().getFileMetaData().getSchema());
+
       reader = new InternalParquetRecordReader<T>(readSupport, filter);
       reader.initialize(
           readContext.getRequestedSchema(), globalMetaData.getSchema(), footer.getParquetMetadata().getFileMetaData().getKeyValueMetaData(),
-          readContext.getReadSupportMetadata(), footer.getFile(), footer.getParquetMetadata().getBlocks(), conf);
+          readContext.getReadSupportMetadata(), footer.getFile(), filteredBlocks, conf);
     }
   }
 
@@ -144,4 +164,36 @@ public class ParquetReader<T> implements Closeable {
       reader.close();
     }
   }
+
+  public static <T> Builder<T> builder(ReadSupport<T> readSupport, Path path) {
+    return new Builder<T>(readSupport, path);
+  }
+
+  public static class Builder<T> {
+    private final ReadSupport<T> readSupport;
+    private final Path file;
+    private Configuration conf;
+    private Filter filter;
+
+    private Builder(ReadSupport<T> readSupport, Path path) {
+      this.readSupport = checkNotNull(readSupport, "readSupport");
+      this.file = checkNotNull(path, "path");
+      this.conf = new Configuration();
+      this.filter = FilterCompat.NOOP;
+    }
+
+    public Builder<T> withConf(Configuration conf) {
+      this.conf = checkNotNull(conf, "conf");
+      return this;
+    }
+
+    public Builder<T> withFilter(Filter filter) {
+      this.filter = checkNotNull(filter, "filter");
+      return this;
+    }
+
+    public ParquetReader<T> build() throws IOException {
+      return new ParquetReader<T>(conf, file, readSupport, filter);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/ParquetRecordReader.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetRecordReader.java b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetRecordReader.java
index f6f4815..67c7dd7 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetRecordReader.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetRecordReader.java
@@ -16,17 +16,21 @@
 package parquet.hadoop;
 
 import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskInputOutputContext;
+
 import parquet.Log;
 import parquet.filter.UnboundRecordFilter;
+import parquet.filter2.compat.FilterCompat;
+import parquet.filter2.compat.FilterCompat.Filter;
 import parquet.hadoop.api.ReadSupport;
-import parquet.hadoop.util.counters.BenchmarkCounter;
 import parquet.hadoop.util.ContextUtil;
+import parquet.hadoop.util.counters.BenchmarkCounter;
 import parquet.schema.MessageTypeParser;
 
 /**
@@ -41,24 +45,34 @@ import parquet.schema.MessageTypeParser;
 public class ParquetRecordReader<T> extends RecordReader<Void, T> {
 
   private static final Log LOG= Log.getLog(ParquetRecordReader.class);
-  private InternalParquetRecordReader<T> internalReader;
+  private final InternalParquetRecordReader<T> internalReader;
 
   /**
    * @param readSupport Object which helps reads files of the given type, e.g. Thrift, Avro.
    */
   public ParquetRecordReader(ReadSupport<T> readSupport) {
-    this(readSupport, null);
+    this(readSupport, FilterCompat.NOOP);
   }
 
   /**
    * @param readSupport Object which helps reads files of the given type, e.g. Thrift, Avro.
-   * @param filter Optional filter for only returning matching records.
+   * @param filter for filtering individual records
    */
-  public ParquetRecordReader(ReadSupport<T> readSupport, UnboundRecordFilter filter) {
+  public ParquetRecordReader(ReadSupport<T> readSupport, Filter filter) {
     internalReader = new InternalParquetRecordReader<T>(readSupport, filter);
   }
 
   /**
+   * @param readSupport Object which helps reads files of the given type, e.g. Thrift, Avro.
+   * @param filter for filtering individual records
+   * @deprecated use {@link #ParquetRecordReader(ReadSupport, Filter)}
+   */
+  @Deprecated
+  public ParquetRecordReader(ReadSupport<T> readSupport, UnboundRecordFilter filter) {
+    this(readSupport, FilterCompat.get(filter));
+  }
+
+  /**
    * {@inheritDoc}
    */
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/ParquetWriter.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetWriter.java b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetWriter.java
index 9c24475..41f27ed 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/ParquetWriter.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/ParquetWriter.java
@@ -204,6 +204,19 @@ public class ParquetWriter<T> implements Closeable {
     this(file, writeSupport, DEFAULT_COMPRESSION_CODEC_NAME, DEFAULT_BLOCK_SIZE, DEFAULT_PAGE_SIZE);
   }
 
+  public ParquetWriter(Path file, Configuration conf, WriteSupport<T> writeSupport) throws IOException {
+    this(file,
+        writeSupport,
+        DEFAULT_COMPRESSION_CODEC_NAME,
+        DEFAULT_BLOCK_SIZE,
+        DEFAULT_PAGE_SIZE,
+        DEFAULT_PAGE_SIZE,
+        DEFAULT_IS_DICTIONARY_ENABLED,
+        DEFAULT_IS_VALIDATING_ENABLED,
+        DEFAULT_WRITER_VERSION,
+        conf);
+  }
+
   public void write(T object) throws IOException {
     try {
       writer.write(object);

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/mapred/DeprecatedParquetInputFormat.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/mapred/DeprecatedParquetInputFormat.java b/parquet-hadoop/src/main/java/parquet/hadoop/mapred/DeprecatedParquetInputFormat.java
index d0b723b..9544865 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/mapred/DeprecatedParquetInputFormat.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/mapred/DeprecatedParquetInputFormat.java
@@ -18,23 +18,18 @@ package parquet.hadoop.mapred;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.util.List;
 import java.util.Arrays;
+import java.util.List;
 
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapred.FileSplit;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapreduce.InputFormat;
 
+import parquet.hadoop.Footer;
 import parquet.hadoop.ParquetInputFormat;
 import parquet.hadoop.ParquetInputSplit;
 import parquet.hadoop.ParquetRecordReader;
-import parquet.hadoop.Footer;
 
 @SuppressWarnings("deprecation")
 public class DeprecatedParquetInputFormat<V> extends org.apache.hadoop.mapred.FileInputFormat<Void, Container<V>> {
@@ -87,7 +82,9 @@ public class DeprecatedParquetInputFormat<V> extends org.apache.hadoop.mapred.Fi
       splitLen = oldSplit.getLength();
 
       try {
-        realReader = new ParquetRecordReader<V>(newInputFormat.getReadSupport(oldJobConf));
+        realReader = new ParquetRecordReader<V>(newInputFormat.getReadSupport(oldJobConf),
+            ParquetInputFormat.getFilter(oldJobConf));
+
         realReader.initialize(((ParquetInputSplitWrapper)oldSplit).realSplit, oldJobConf, reporter);
 
         // read once to gain access to key and value objects

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/metadata/Canonicalizer.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/Canonicalizer.java b/parquet-hadoop/src/main/java/parquet/hadoop/metadata/Canonicalizer.java
deleted file mode 100644
index ece6e63..0000000
--- a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/Canonicalizer.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Copyright 2014 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package parquet.hadoop.metadata;
-
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * returns canonical representation of objects (similar to String.intern()) to save memory
- * if a.equals(b) then canonicalize(a) == canonicalize(b)
- * this class is thread safe
- * @author Julien Le Dem
- *
- * @param <T>
- */
-public class Canonicalizer<T> {
-
-  private ConcurrentHashMap<T, T> canonicals = new ConcurrentHashMap<T, T>();
-
-  /**
-   * @param value the value to canonicalize
-   * @return the corresponding canonical value
-   */
-  final public T canonicalize(T value) {
-    T canonical = canonicals.get(value);
-    if (canonical == null) {
-      value = toCanonical(value);
-      T existing = canonicals.putIfAbsent(value, value);
-      // putIfAbsent is atomic, making sure we always return the same canonical representation of the value
-      if (existing == null) {
-        canonical = value;
-      } else {
-        canonical = existing;
-      }
-    }
-    return canonical;
-  }
-
-  /**
-   * @param value the value to canonicalize if needed
-   * @return the canonicalized value
-   */
-  protected T toCanonical(T value) {
-    return value;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkMetaData.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkMetaData.java b/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkMetaData.java
index 98de367..45af78a 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkMetaData.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkMetaData.java
@@ -18,8 +18,9 @@ package parquet.hadoop.metadata;
 import java.util.Set;
 
 import parquet.column.Encoding;
-import parquet.column.statistics.Statistics;
 import parquet.column.statistics.BooleanStatistics;
+import parquet.column.statistics.Statistics;
+import parquet.common.schema.ColumnPath;
 import parquet.schema.PrimitiveType.PrimitiveTypeName;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkProperties.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkProperties.java b/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkProperties.java
index 074a900..9b9a7a8 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkProperties.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnChunkProperties.java
@@ -19,6 +19,8 @@ import java.util.Arrays;
 import java.util.Set;
 
 import parquet.column.Encoding;
+import parquet.common.internal.Canonicalizer;
+import parquet.common.schema.ColumnPath;
 import parquet.schema.PrimitiveType.PrimitiveTypeName;
 
 public class ColumnChunkProperties {

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnPath.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnPath.java b/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnPath.java
deleted file mode 100644
index b179ae3..0000000
--- a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/ColumnPath.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package parquet.hadoop.metadata;
-
-import java.util.Arrays;
-import java.util.Iterator;
-
-public final class ColumnPath implements Iterable<String> {
-
-  private static Canonicalizer<ColumnPath> paths = new Canonicalizer<ColumnPath>() {
-    protected ColumnPath toCanonical(ColumnPath value) {
-      String[] path = new String[value.p.length];
-      for (int i = 0; i < value.p.length; i++) {
-        path[i] = value.p[i].intern();
-      }
-      return new ColumnPath(path);
-    }
-  };
-
-  public static ColumnPath get(String... path){
-    return paths.canonicalize(new ColumnPath(path));
-  }
-
-  private final String[] p;
-
-  private ColumnPath(String[] path) {
-    this.p = path;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof ColumnPath) {
-      return Arrays.equals(p, ((ColumnPath)obj).p);
-    }
-    return false;
-  }
-
-  @Override
-  public int hashCode() {
-    return Arrays.hashCode(p);
-  }
-
-  @Override
-  public String toString() {
-    return Arrays.toString(p);
-  }
-
-  @Override
-  public Iterator<String> iterator() {
-    return Arrays.asList(p).iterator();
-  }
-
-  public int size() {
-    return p.length;
-  }
-
-  public String[] toArray() {
-    return p;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/metadata/EncodingList.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/EncodingList.java b/parquet-hadoop/src/main/java/parquet/hadoop/metadata/EncodingList.java
index 790b601..6121111 100644
--- a/parquet-hadoop/src/main/java/parquet/hadoop/metadata/EncodingList.java
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/metadata/EncodingList.java
@@ -20,6 +20,7 @@ import java.util.Iterator;
 import java.util.List;
 
 import parquet.column.Encoding;
+import parquet.common.internal.Canonicalizer;
 
 public class EncodingList implements Iterable<Encoding> {
 

http://git-wip-us.apache.org/repos/asf/incubator-parquet-mr/blob/ad32bf0f/parquet-hadoop/src/main/java/parquet/hadoop/util/SerializationUtil.java
----------------------------------------------------------------------
diff --git a/parquet-hadoop/src/main/java/parquet/hadoop/util/SerializationUtil.java b/parquet-hadoop/src/main/java/parquet/hadoop/util/SerializationUtil.java
new file mode 100644
index 0000000..0cd5df5
--- /dev/null
+++ b/parquet-hadoop/src/main/java/parquet/hadoop/util/SerializationUtil.java
@@ -0,0 +1,93 @@
+package parquet.hadoop.util;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.zip.GZIPInputStream;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.conf.Configuration;
+
+import parquet.Closeables;
+import parquet.Log;
+
+/**
+ * Serialization utils copied from:
+ * https://github.com/kevinweil/elephant-bird/blob/master/core/src/main/java/com/twitter/elephantbird/util/HadoopUtils.java
+ *
+ * TODO: Refactor elephant-bird so that we can depend on utils like this without extra baggage.
+ */
+public final class SerializationUtil {
+  private static final Log LOG = Log.getLog(SerializationUtil.class);
+
+  private SerializationUtil() { }
+
+  /**
+   * Reads an object (that was written using
+   * {@link #writeObjectToConfAsBase64}) from a configuration.
+   *
+   * @param key for the configuration
+   * @param conf to read from
+   * @return the read object, or null if key is not present in conf
+   * @throws IOException
+   */
+  public static void writeObjectToConfAsBase64(String key, Object obj, Configuration conf) throws IOException {
+    ByteArrayOutputStream baos = null;
+    GZIPOutputStream gos = null;
+    ObjectOutputStream oos = null;
+
+    try {
+      baos = new ByteArrayOutputStream();
+      gos = new GZIPOutputStream(baos);
+      oos = new ObjectOutputStream(gos);
+      oos.writeObject(obj);
+    } finally {
+      Closeables.close(oos);
+      Closeables.close(gos);
+      Closeables.close(baos);
+    }
+
+    conf.set(key, new String(Base64.encodeBase64(baos.toByteArray()), "UTF-8"));
+  }
+
+  /**
+   * Reads an object (that was written using
+   * {@link #writeObjectToConfAsBase64}) from a configuration
+   *
+   * @param key for the configuration
+   * @param conf to read from
+   * @return the read object, or null if key is not present in conf
+   * @throws IOException
+   */
+  @SuppressWarnings("unchecked")
+  public static <T> T readObjectFromConfAsBase64(String key, Configuration conf) throws IOException {
+    String b64 = conf.get(key);
+    if (b64 == null) {
+      return null;
+    }
+
+    byte[] bytes = Base64.decodeBase64(b64.getBytes("UTF-8"));
+
+    ByteArrayInputStream bais = null;
+    GZIPInputStream gis = null;
+    ObjectInputStream ois = null;
+
+    try {
+      bais = new ByteArrayInputStream(bytes);
+      gis = new GZIPInputStream(bais);
+      ois = new ObjectInputStream(gis);
+      return (T) ois.readObject();
+    } catch (ClassNotFoundException e) {
+      throw new IOException("Could not read object from config with key " + key, e);
+    } catch (ClassCastException e) {
+      throw new IOException("Couldn't cast object read from config with key " + key, e);
+    } finally {
+      Closeables.close(ois);
+      Closeables.close(gis);
+      Closeables.close(bais);
+    }
+  }
+}