You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@flink.apache.org by GitBox <gi...@apache.org> on 2018/11/28 11:10:24 UTC

[GitHub] igalshilman commented on a change in pull request #7161: [FLINK-10963][fs-connector, s3] Cleanup tmp S3 objects uploaded as backups of in-progress files.

igalshilman commented on a change in pull request #7161: [FLINK-10963][fs-connector, s3] Cleanup tmp S3 objects uploaded as backups of in-progress files.
URL: https://github.com/apache/flink/pull/7161#discussion_r237021819
 
 

 ##########
 File path: flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/flink/fs/s3hadoop/HadoopS3AccessHelper.java
 ##########
 @@ -61,25 +64,50 @@ public HadoopS3MultiPartUploader(S3AFileSystem s3a, Configuration conf) {
 
 	@Override
 	public String startMultiPartUpload(String key) throws IOException {
-		return s3uploader.initiateMultiPartUpload(key);
+		return s3accessHelper.initiateMultiPartUpload(key);
 	}
 
 	@Override
 	public UploadPartResult uploadPart(String key, String uploadId, int partNumber, InputStream inputStream, long length) throws IOException {
-		final UploadPartRequest uploadRequest = s3uploader.newUploadPartRequest(
+		final UploadPartRequest uploadRequest = s3accessHelper.newUploadPartRequest(
 				key, uploadId, partNumber, MathUtils.checkedDownCast(length), inputStream, null, 0L);
-		return s3uploader.uploadPart(uploadRequest);
+		return s3accessHelper.uploadPart(uploadRequest);
 	}
 
 	@Override
-	public PutObjectResult uploadIncompletePart(String key, InputStream inputStream, long length) throws IOException {
-		final PutObjectRequest putRequest = s3uploader.createPutObjectRequest(key, inputStream, length);
-		return s3uploader.putObject(putRequest);
+	public PutObjectResult putObject(String key, InputStream inputStream, long length) throws IOException {
+		final PutObjectRequest putRequest = s3accessHelper.createPutObjectRequest(key, inputStream, length);
+		return s3accessHelper.putObject(putRequest);
 	}
 
 	@Override
 	public CompleteMultipartUploadResult commitMultiPartUpload(String destKey, String uploadId, List<PartETag> partETags, long length, AtomicInteger errorCount) throws IOException {
-		return s3uploader.completeMPUwithRetries(destKey, uploadId, partETags, length, errorCount);
+		return s3accessHelper.completeMPUwithRetries(destKey, uploadId, partETags, length, errorCount);
+	}
+
+	@Override
+	public boolean deleteObject(String key) throws IOException {
+		return s3a.delete(new org.apache.hadoop.fs.Path('/' + key), false);
+	}
+
+	@Override
+	public long getObject(String key, File targetLocation) throws IOException {
+		long numBytes = 0L;
+		try (
+				final OutputStream outStream = new FileOutputStream(targetLocation);
+				final org.apache.hadoop.fs.FSDataInputStream inStream =
+						s3a.open(new org.apache.hadoop.fs.Path('/' + key))
+		) {
+			final byte[] buffer = new byte[32 * 1024];
+
+			int numRead;
+			while ((numRead = inStream.read(buffer)) > 0) {
 
 Review comment:
   suggestion: `while ((..) != -1)`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services