You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@libcloud.apache.org by to...@apache.org on 2013/09/04 12:28:17 UTC

[10/14] git commit: LIBCLOUD-378: S3 uploads fail on small iterators

LIBCLOUD-378: S3 uploads fail on small iterators


Project: http://git-wip-us.apache.org/repos/asf/libcloud/repo
Commit: http://git-wip-us.apache.org/repos/asf/libcloud/commit/db4cfbdc
Tree: http://git-wip-us.apache.org/repos/asf/libcloud/tree/db4cfbdc
Diff: http://git-wip-us.apache.org/repos/asf/libcloud/diff/db4cfbdc

Branch: refs/heads/0.13.1
Commit: db4cfbdccd1358450ce513d64eb872c637b273e5
Parents: a8055c1
Author: Mahendra M <ma...@apache.org>
Authored: Tue Sep 3 16:50:37 2013 +0530
Committer: Tomaz Muraus <to...@apache.org>
Committed: Wed Sep 4 12:05:28 2013 +0200

----------------------------------------------------------------------
 CHANGES                        | 4 ++++
 libcloud/storage/drivers/s3.py | 3 ++-
 2 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/libcloud/blob/db4cfbdc/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index 08db5b6..663fa23 100644
--- a/CHANGES
+++ b/CHANGES
@@ -25,6 +25,10 @@ Changes with Apache Libcloud in development
       Reported by Ben Meng (LIBCLOUD-366)
       [Tomaz Muraus]
 
+    - Ensure that AWS S3 multipart upload works for small iterators.
+      (LIBCLOUD-378)
+      [Mahendra M]
+
 Changes with Apache Libcloud 0.13.0:
 
  *) General

http://git-wip-us.apache.org/repos/asf/libcloud/blob/db4cfbdc/libcloud/storage/drivers/s3.py
----------------------------------------------------------------------
diff --git a/libcloud/storage/drivers/s3.py b/libcloud/storage/drivers/s3.py
index 38bd723..5b20035 100644
--- a/libcloud/storage/drivers/s3.py
+++ b/libcloud/storage/drivers/s3.py
@@ -491,7 +491,8 @@ class S3StorageDriver(StorageDriver):
         params = {'uploadId': upload_id}
 
         # Read the input data in chunk sizes suitable for AWS
-        for data in read_in_chunks(iterator, chunk_size=CHUNK_SIZE):
+        for data in read_in_chunks(iterator, chunk_size=CHUNK_SIZE,
+                                   fill_size=True):
             bytes_transferred += len(data)
 
             if calculate_hash: