You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@parquet.apache.org by nk...@apache.org on 2019/09/05 08:07:28 UTC

[parquet-mr] branch master updated: PARQUET-1445: Remove Files.java (#584)

This is an automated email from the ASF dual-hosted git repository.

nkollar pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/parquet-mr.git


The following commit(s) were added to refs/heads/master by this push:
     new 14c1e81  PARQUET-1445: Remove Files.java (#584)
14c1e81 is described below

commit 14c1e815dfcac7f2aa7bcebf24a7fa9c14cbf961
Author: belugabehr <12...@users.noreply.github.com>
AuthorDate: Thu Sep 5 03:07:23 2019 -0500

    PARQUET-1445: Remove Files.java (#584)
---
 .../src/main/java/org/apache/parquet/Files.java           |  5 ++++-
 .../parquet/hadoop/TestInputOutputFormatWithPadding.java  |  9 +++------
 .../parquet/hadoop/example/TestInputOutputFormat.java     | 15 +++++++++------
 .../apache/parquet/thrift/TestThriftRecordConverter.java  |  8 ++++----
 4 files changed, 20 insertions(+), 17 deletions(-)

diff --git a/parquet-common/src/main/java/org/apache/parquet/Files.java b/parquet-common/src/main/java/org/apache/parquet/Files.java
index 1d2b506..66a6e73 100644
--- a/parquet-common/src/main/java/org/apache/parquet/Files.java
+++ b/parquet-common/src/main/java/org/apache/parquet/Files.java
@@ -28,7 +28,10 @@ import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
 
-//TODO: Use java.nio.file.Files when Parquet is updated to Java 7
+/**
+ * Use java.nio.file.Files facilities as replacement. 
+ */
+@Deprecated
 public final class Files {
   private Files() { }
 
diff --git a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestInputOutputFormatWithPadding.java b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestInputOutputFormatWithPadding.java
index 8e3e6c7..5066553 100644
--- a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestInputOutputFormatWithPadding.java
+++ b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestInputOutputFormatWithPadding.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-import org.apache.parquet.Files;
 import org.apache.parquet.example.data.Group;
 import org.apache.parquet.example.data.simple.SimpleGroupFactory;
 import org.apache.parquet.format.converter.ParquetMetadataConverter;
@@ -36,7 +35,6 @@ import org.apache.parquet.hadoop.example.GroupWriteSupport;
 import org.apache.parquet.hadoop.metadata.BlockMetaData;
 import org.apache.parquet.hadoop.metadata.ParquetMetadata;
 import org.apache.parquet.hadoop.util.HadoopOutputFile;
-import org.apache.parquet.io.OutputFile;
 import org.apache.parquet.io.api.Binary;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.Types;
@@ -48,7 +46,8 @@ import org.junit.rules.TemporaryFolder;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
 import java.util.UUID;
 
 import static java.lang.Thread.sleep;
@@ -66,8 +65,6 @@ public class TestInputOutputFormatWithPadding {
       .required(BINARY).as(UTF8).named("char")
       .named("FormatTestObject");
 
-  private static final Charset UTF_8 = Charset.forName("UTF-8");
-
   /**
    * ParquetInputFormat that will not split the input file (easier validation)
    */
@@ -181,7 +178,7 @@ public class TestInputOutputFormatWithPadding {
     Assert.assertNotNull("Should find a data file", dataFile);
 
     StringBuilder contentBuilder = new StringBuilder();
-    for (String line : Files.readAllLines(dataFile, UTF_8)) {
+    for (String line : Files.readAllLines(dataFile.toPath(), StandardCharsets.UTF_8)) {
       contentBuilder.append(line);
     }
     String reconstructed = contentBuilder.toString();
diff --git a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java
index c829dc1..0a38303 100644
--- a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java
+++ b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java
@@ -28,7 +28,8 @@ import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
 import java.lang.reflect.Method;
-import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -47,7 +48,6 @@ import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-import org.apache.parquet.Files;
 import org.apache.parquet.Strings;
 import org.apache.parquet.filter2.predicate.FilterApi;
 import org.junit.Before;
@@ -69,7 +69,7 @@ import org.slf4j.LoggerFactory;
 
 public class TestInputOutputFormat {
   private static final Logger LOG = LoggerFactory.getLogger(TestInputOutputFormat.class);
-  private static final Charset UTF_8 = Charset.forName("UTF-8");
+
   final Path parquetPath = new Path("target/test/example/TestInputOutputFormat/parquet");
   final Path inputPath = new Path("src/test/java/org/apache/parquet/hadoop/example/TestInputOutputFormat.java");
   final Path outputPath = new Path("target/test/example/TestInputOutputFormat/out");
@@ -260,7 +260,8 @@ public class TestInputOutputFormat {
       put(ParquetInputFormat.FILTER_PREDICATE, fpString);
     }});
 
-    List<String> lines = Files.readAllLines(new File(outputPath.toString(), "part-m-00000"), UTF_8);
+    File file = new File(outputPath.toString(), "part-m-00000");
+    List<String> lines = Files.readAllLines(file.toPath(), StandardCharsets.UTF_8);
     assertTrue(lines.isEmpty());
   }
 
@@ -278,7 +279,8 @@ public class TestInputOutputFormat {
       put(ParquetInputFormat.FILTER_PREDICATE, fpString);
     }});
 
-    List<String> expected = Files.readAllLines(new File(inputPath.toString()), UTF_8);
+    File file = new File(inputPath.toString());
+    List<String> expected = Files.readAllLines(file.toPath(), StandardCharsets.UTF_8);
 
     // grab the lines that contain the first 500 characters (including the rest of the line past 500 characters)
     int size = 0;
@@ -295,7 +297,8 @@ public class TestInputOutputFormat {
     }
 
     // put the output back into it's original format (remove the character counts / tabs)
-    List<String> found = Files.readAllLines(new File(outputPath.toString(), "part-m-00000"), UTF_8);
+    File file2 = new File(outputPath.toString(), "part-m-00000");
+    List<String> found = Files.readAllLines(file2.toPath(), StandardCharsets.UTF_8);
     StringBuilder sbFound = new StringBuilder();
     for (String line : found) {
       sbFound.append(line.split("\t", -1)[1]);
diff --git a/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestThriftRecordConverter.java b/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestThriftRecordConverter.java
index 1619dd5..255515a 100644
--- a/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestThriftRecordConverter.java
+++ b/parquet-thrift/src/test/java/org/apache/parquet/thrift/TestThriftRecordConverter.java
@@ -19,11 +19,11 @@
 package org.apache.parquet.thrift;
 
 import java.io.File;
-import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.Arrays;
 
-import org.apache.parquet.Files;
 import org.apache.parquet.Strings;
 import org.apache.parquet.io.ParquetDecodingException;
 import org.apache.parquet.io.api.Binary;
@@ -80,8 +80,8 @@ public class TestThriftRecordConverter {
   public void constructorDoesNotRequireStructOrUnionTypeMeta() throws Exception {
     String jsonWithNoStructOrUnionMeta = Strings.join(
         Files.readAllLines(
-            new File("src/test/resources/org/apache/parquet/thrift/StructWithUnionV1NoStructOrUnionMeta.json"),
-            Charset.forName("UTF-8")), "\n");
+            new File("src/test/resources/org/apache/parquet/thrift/StructWithUnionV1NoStructOrUnionMeta.json").toPath(),
+            StandardCharsets.UTF_8), "\n");
 
     StructType noStructOrUnionMeta  = (StructType) ThriftType.fromJSON(jsonWithNoStructOrUnionMeta);