You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ah...@apache.org on 2013/01/10 23:47:20 UTC

[8/25] removed componentlocator and inject

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/f40e7b75/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java
----------------------------------------------------------------------
diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java
index 916c51d..2ce9e33 100644
--- a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java
+++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java
@@ -33,13 +33,14 @@ import java.util.Set;
 import java.util.TimeZone;
 import java.util.UUID;
 
+import javax.inject.Inject;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.log4j.Logger;
 import org.json.simple.parser.ParseException;
 
-import com.cloud.bridge.io.S3FileSystemBucketAdapter;
 import com.cloud.bridge.io.S3CAStorBucketAdapter;
+import com.cloud.bridge.io.S3FileSystemBucketAdapter;
 import com.cloud.bridge.model.BucketPolicyVO;
 import com.cloud.bridge.model.MHostMountVO;
 import com.cloud.bridge.model.MHostVO;
@@ -50,27 +51,18 @@ import com.cloud.bridge.model.SBucketVO;
 import com.cloud.bridge.model.SHost;
 import com.cloud.bridge.model.SHostVO;
 import com.cloud.bridge.model.SMetaVO;
-import com.cloud.bridge.model.SObjectVO;
 import com.cloud.bridge.model.SObjectItemVO;
+import com.cloud.bridge.model.SObjectVO;
 import com.cloud.bridge.persist.dao.BucketPolicyDao;
-import com.cloud.bridge.persist.dao.BucketPolicyDaoImpl;
 import com.cloud.bridge.persist.dao.MHostDao;
-import com.cloud.bridge.persist.dao.MHostDaoImpl;
 import com.cloud.bridge.persist.dao.MHostMountDao;
-import com.cloud.bridge.persist.dao.MHostMountDaoImpl;
 import com.cloud.bridge.persist.dao.MultipartLoadDao;
 import com.cloud.bridge.persist.dao.SAclDao;
-import com.cloud.bridge.persist.dao.SAclDaoImpl;
 import com.cloud.bridge.persist.dao.SBucketDao;
-import com.cloud.bridge.persist.dao.SBucketDaoImpl;
 import com.cloud.bridge.persist.dao.SHostDao;
-import com.cloud.bridge.persist.dao.SHostDaoImpl;
 import com.cloud.bridge.persist.dao.SMetaDao;
-import com.cloud.bridge.persist.dao.SMetaDaoImpl;
 import com.cloud.bridge.persist.dao.SObjectDao;
-import com.cloud.bridge.persist.dao.SObjectDaoImpl;
 import com.cloud.bridge.persist.dao.SObjectItemDao;
-import com.cloud.bridge.persist.dao.SObjectItemDaoImpl;
 import com.cloud.bridge.service.UserContext;
 import com.cloud.bridge.service.controller.s3.ServiceProvider;
 import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess;
@@ -86,11 +78,10 @@ import com.cloud.bridge.service.exception.OutOfServiceException;
 import com.cloud.bridge.service.exception.OutOfStorageException;
 import com.cloud.bridge.service.exception.PermissionDeniedException;
 import com.cloud.bridge.util.DateHelper;
+import com.cloud.bridge.util.OrderedPair;
 import com.cloud.bridge.util.PolicyParser;
 import com.cloud.bridge.util.StringHelper;
-import com.cloud.bridge.util.OrderedPair;
 import com.cloud.bridge.util.Triple;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.utils.db.DB;
 import com.cloud.utils.db.Transaction;
 
@@ -99,105 +90,105 @@ import com.cloud.utils.db.Transaction;
  */
 public class S3Engine {
     protected final static Logger logger = Logger.getLogger(S3Engine.class);
-    protected final SHostDao shostDao = ComponentLocator.inject(SHostDaoImpl.class);
-    protected final MHostDao mhostDao = ComponentLocator.inject(MHostDaoImpl.class);
-    protected final static BucketPolicyDao bPolicy = ComponentLocator.inject(BucketPolicyDaoImpl.class);
-    protected final BucketPolicyDao bPolicyDao = ComponentLocator.inject(BucketPolicyDaoImpl.class);
-    protected final SBucketDao bucketDao = ComponentLocator.inject(SBucketDaoImpl.class);  
-    protected final SAclDao aclDao = ComponentLocator.inject(SAclDaoImpl.class);
-    protected final static SAclDao saclDao = ComponentLocator.inject(SAclDaoImpl.class);
-    protected final SObjectDao objectDao = ComponentLocator.inject(SObjectDaoImpl.class);
-    protected final SObjectItemDao itemDao = ComponentLocator.inject(SObjectItemDaoImpl.class);
-    protected final SMetaDao metaDao = ComponentLocator.inject(SMetaDaoImpl.class);
-    protected final MHostMountDao mountDao = ComponentLocator.inject(MHostMountDaoImpl.class);
+    @Inject SHostDao shostDao;
+    @Inject MHostDao mhostDao;
+    @Inject static BucketPolicyDao bPolicy;
+    @Inject BucketPolicyDao bPolicyDao;
+    @Inject SBucketDao bucketDao;  
+    @Inject SAclDao aclDao;
+    @Inject static SAclDao saclDao;
+    @Inject SObjectDao objectDao;
+    @Inject SObjectItemDao itemDao;
+    @Inject SMetaDao metaDao;
+    @Inject MHostMountDao mountDao;
     private final int LOCK_ACQUIRING_TIMEOUT_SECONDS = 10;		// ten seconds
 
     private final Map<Integer, S3BucketAdapter> bucketAdapters = new HashMap<Integer, S3BucketAdapter>();
-    
+
     public S3Engine() {
-    	bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter());
+        bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter());
         bucketAdapters.put(SHost.STORAGE_HOST_TYPE_CASTOR, new S3CAStorBucketAdapter());
     }
-    
-    
+
+
     /**
      * Return a S3CopyObjectResponse which represents an object being copied from source
      * to destination bucket.    
      * Called from S3ObjectAction when copying an object.
      * This can be treated as first a GET followed by a PUT of the object the user wants to copy.
      */
-    
-	public S3CopyObjectResponse handleRequest(S3CopyObjectRequest request) 
-	{
-		S3CopyObjectResponse response = new S3CopyObjectResponse();
-		
-		// [A] Get the object we want to copy
-		S3GetObjectRequest getRequest = new S3GetObjectRequest();
-		getRequest.setBucketName(request.getSourceBucketName());
-		getRequest.setKey(request.getSourceKey());
-		getRequest.setVersion(request.getVersion());
-		getRequest.setConditions( request.getConditions());
-
-		getRequest.setInlineData( true );
-		getRequest.setReturnData( true );
-		if ( MetadataDirective.COPY == request.getDirective()) 
-		     getRequest.setReturnMetadata( true );
-		else getRequest.setReturnMetadata( false );		
-			
-		//-> before we do anything verify the permissions on a copy basis
-		String  destinationBucketName = request.getDestinationBucketName();
-		String  destinationKeyName = request.getDestinationKey();
-		S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, destinationBucketName );
-		context.setKeyName( destinationKeyName );
-		context.setEvalParam( ConditionKeys.MetaData, request.getDirective().toString());
-		context.setEvalParam( ConditionKeys.CopySource, "/" + request.getSourceBucketName() + "/" + request.getSourceKey());
-		if (PolicyAccess.DENY == verifyPolicy( context )) 
+
+    public S3CopyObjectResponse handleRequest(S3CopyObjectRequest request) 
+    {
+        S3CopyObjectResponse response = new S3CopyObjectResponse();
+
+        // [A] Get the object we want to copy
+        S3GetObjectRequest getRequest = new S3GetObjectRequest();
+        getRequest.setBucketName(request.getSourceBucketName());
+        getRequest.setKey(request.getSourceKey());
+        getRequest.setVersion(request.getVersion());
+        getRequest.setConditions( request.getConditions());
+
+        getRequest.setInlineData( true );
+        getRequest.setReturnData( true );
+        if ( MetadataDirective.COPY == request.getDirective()) 
+            getRequest.setReturnMetadata( true );
+        else getRequest.setReturnMetadata( false );		
+
+        //-> before we do anything verify the permissions on a copy basis
+        String  destinationBucketName = request.getDestinationBucketName();
+        String  destinationKeyName = request.getDestinationKey();
+        S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, destinationBucketName );
+        context.setKeyName( destinationKeyName );
+        context.setEvalParam( ConditionKeys.MetaData, request.getDirective().toString());
+        context.setEvalParam( ConditionKeys.CopySource, "/" + request.getSourceBucketName() + "/" + request.getSourceKey());
+        if (PolicyAccess.DENY == verifyPolicy( context )) 
             throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" );
-			  		
-	    S3GetObjectResponse originalObject = handleRequest(getRequest); 	
-	    int resultCode = originalObject.getResultCode();
-	    if (200 != resultCode) {
-	    	response.setResultCode( resultCode );
-	    	response.setResultDescription( originalObject.getResultDescription());
-	    	return response;
-	    }
-	    	    
-	    response.setCopyVersion( originalObject.getVersion());
-
-	    
-	    // [B] Put the object into the destination bucket
-	    S3PutObjectInlineRequest putRequest = new S3PutObjectInlineRequest();
-	    putRequest.setBucketName(request.getDestinationBucketName()) ;
-	    putRequest.setKey(destinationKeyName);
-		if ( MetadataDirective.COPY == request.getDirective()) 
-			 putRequest.setMetaEntries(originalObject.getMetaEntries());
-		else putRequest.setMetaEntries(request.getMetaEntries());	
-	    putRequest.setAcl(request.getAcl());                    // -> if via a SOAP call
-	    putRequest.setCannedAccess(request.getCannedAccess());  // -> if via a REST call 
-	    putRequest.setContentLength(originalObject.getContentLength());
-	    putRequest.setData(originalObject.getData());
-
-	    S3PutObjectInlineResponse putResp = handleRequest(putRequest);  
-	    response.setResultCode( putResp.resultCode );
-	    response.setResultDescription( putResp.getResultDescription());
-		response.setETag( putResp.getETag());
-		response.setLastModified( putResp.getLastModified());
-		response.setPutVersion( putResp.getVersion());
-		return response;
-	}
+
+        S3GetObjectResponse originalObject = handleRequest(getRequest); 	
+        int resultCode = originalObject.getResultCode();
+        if (200 != resultCode) {
+            response.setResultCode( resultCode );
+            response.setResultDescription( originalObject.getResultDescription());
+            return response;
+        }
+
+        response.setCopyVersion( originalObject.getVersion());
+
+
+        // [B] Put the object into the destination bucket
+        S3PutObjectInlineRequest putRequest = new S3PutObjectInlineRequest();
+        putRequest.setBucketName(request.getDestinationBucketName()) ;
+        putRequest.setKey(destinationKeyName);
+        if ( MetadataDirective.COPY == request.getDirective()) 
+            putRequest.setMetaEntries(originalObject.getMetaEntries());
+        else putRequest.setMetaEntries(request.getMetaEntries());	
+        putRequest.setAcl(request.getAcl());                    // -> if via a SOAP call
+        putRequest.setCannedAccess(request.getCannedAccess());  // -> if via a REST call 
+        putRequest.setContentLength(originalObject.getContentLength());
+        putRequest.setData(originalObject.getData());
+
+        S3PutObjectInlineResponse putResp = handleRequest(putRequest);  
+        response.setResultCode( putResp.resultCode );
+        response.setResultDescription( putResp.getResultDescription());
+        response.setETag( putResp.getETag());
+        response.setLastModified( putResp.getLastModified());
+        response.setPutVersion( putResp.getVersion());
+        return response;
+    }
 
     public S3CreateBucketResponse handleRequest(S3CreateBucketRequest request) 
     {
-    	S3CreateBucketResponse response = new S3CreateBucketResponse();
-		String cannedAccessPolicy = request.getCannedAccess();
-    	String bucketName = request.getBucketName();
-    	response.setBucketName( bucketName );
-    	Transaction txn= null;
-		verifyBucketName( bucketName, false );
- 	
-		S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket,  bucketName );
-		context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy );
-		if (PolicyAccess.DENY == verifyPolicy( context )) 
+        S3CreateBucketResponse response = new S3CreateBucketResponse();
+        String cannedAccessPolicy = request.getCannedAccess();
+        String bucketName = request.getBucketName();
+        response.setBucketName( bucketName );
+        Transaction txn= null;
+        verifyBucketName( bucketName, false );
+
+        S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket,  bucketName );
+        context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy );
+        if (PolicyAccess.DENY == verifyPolicy( context )) 
             throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" );
         OrderedPair<SHostVO, String> shost_storagelocation_pair = null;
         boolean success = false;
@@ -211,7 +202,7 @@ public class S3Engine {
                     request.getBucketName(), null);
             SBucketVO sbucket = new SBucketVO(request.getBucketName(),
                     DateHelper.currentGMTTime(), UserContext.current()
-                            .getCanonicalUserId(),
+                    .getCanonicalUserId(),
                     shost_storagelocation_pair.getFirst());
 
             shost_storagelocation_pair.getFirst().getBuckets().add(sbucket);
@@ -239,29 +230,29 @@ public class S3Engine {
             txn.rollback();
             txn.close();
         }
-		return response;
+        return response;
     }
-    
+
     /**
      * Return a S3Response which represents the effect of an object being deleted from its bucket.    
      * Called from S3BucketAction when deleting an object.
      */
-    
+
     public S3Response handleRequest( S3DeleteBucketRequest request ) 
     {
-    		S3Response response  = new S3Response();
-		//
-		String bucketName = request.getBucketName();
-		SBucketVO sbucket   = bucketDao.getByName(bucketName);
-		
-		Transaction txn = null;
-		if ( sbucket != null ) 
-		{	
-		    txn = Transaction.open(Transaction.AWSAPI_DB);
-		    txn.start();
-		    S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName );
-		    switch( verifyPolicy( context ))
-		    {
+        S3Response response  = new S3Response();
+        //
+        String bucketName = request.getBucketName();
+        SBucketVO sbucket   = bucketDao.getByName(bucketName);
+
+        Transaction txn = null;
+        if ( sbucket != null ) 
+        {	
+            txn = Transaction.open(Transaction.AWSAPI_DB);
+            txn.start();
+            S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName );
+            switch( verifyPolicy( context ))
+            {
             case ALLOW:
                 // The bucket policy can give users permission to delete a
                 // bucket whereas ACLs cannot
@@ -282,110 +273,110 @@ public class S3Engine {
                 }
                 break;
             }
-		    
-			 // Delete the file from its storage location
-			 OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(sbucket);
-			 S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
-			 bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName());
-			
-			 // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects.
-			 // To delete SMeta & SAcl objects: 
-			 // (1)Get all the objects in the bucket, 
-			 // (2)then all the items in each object, 
-			 // (3) then all meta & acl data for each item
-			 Set<SObjectVO> objectsInBucket = sbucket.getObjectsInBucket();
-			 Iterator<SObjectVO> it = objectsInBucket.iterator();
-			 while( it.hasNext()) 
-			 {
-			 	SObjectVO oneObject = (SObjectVO)it.next();
-				Set<SObjectItemVO> itemsInObject = oneObject.getItems();
-				Iterator<SObjectItemVO> is = itemsInObject.iterator();
-				while( is.hasNext()) 
-				{
-                    SObjectItemVO oneItem = (SObjectItemVO) is.next();
+
+            // Delete the file from its storage location
+            OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(sbucket);
+            S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
+            bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName());
+
+            // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects.
+            // To delete SMeta & SAcl objects: 
+            // (1)Get all the objects in the bucket, 
+            // (2)then all the items in each object, 
+            // (3) then all meta & acl data for each item
+            Set<SObjectVO> objectsInBucket = sbucket.getObjectsInBucket();
+            Iterator<SObjectVO> it = objectsInBucket.iterator();
+            while( it.hasNext()) 
+            {
+                SObjectVO oneObject = it.next();
+                Set<SObjectItemVO> itemsInObject = oneObject.getItems();
+                Iterator<SObjectItemVO> is = itemsInObject.iterator();
+                while( is.hasNext()) 
+                {
+                    SObjectItemVO oneItem = is.next();
                     deleteMetaData(oneItem.getId());
                     deleteObjectAcls("SObjectItem", oneItem.getId());
-				}				
-			 }
-			 	
-			 // Delete all the policy state associated with the bucket
-			 try {
+                }				
+            }
+
+            // Delete all the policy state associated with the bucket
+            try {
                 ServiceProvider.getInstance().deleteBucketPolicy(bucketName);
                 bPolicyDao.deletePolicy(bucketName);
-			 } catch( Exception e ) {
-			     logger.error("When deleting a bucket we must try to delete its policy: ", e);
-			 }
-			 
-			 deleteBucketAcls( sbucket.getId());
-			 bucketDao.remove(sbucket.getId());
-			 
-	
-			 response.setResultCode(204);
-			 response.setResultDescription("OK");
-			 
-			 txn.close();
-		} 
-		else 
-		{    response.setResultCode(404);
-			 response.setResultDescription("Bucket does not exist");
-		}
-    	return response;
+            } catch( Exception e ) {
+                logger.error("When deleting a bucket we must try to delete its policy: ", e);
+            }
+
+            deleteBucketAcls( sbucket.getId());
+            bucketDao.remove(sbucket.getId());
+
+
+            response.setResultCode(204);
+            response.setResultDescription("OK");
+
+            txn.close();
+        } 
+        else 
+        {    response.setResultCode(404);
+        response.setResultDescription("Bucket does not exist");
+        }
+        return response;
     }
-    
+
     /**
      * Return a S3ListBucketResponse which represents a list of up to 1000 objects contained ins  the bucket.    
      * Called from S3BucketAction for GETting objects and for GETting object versions.
      */
-    
+
     public S3ListBucketResponse listBucketContents(S3ListBucketRequest request, boolean includeVersions) 
     {
-    	S3ListBucketResponse response = new S3ListBucketResponse();
-		String bucketName = request.getBucketName();
-		String prefix = request.getPrefix();
-		if (prefix == null) prefix = StringHelper.EMPTY_STRING;
-		String marker = request.getMarker();
-		if (marker == null)	marker = StringHelper.EMPTY_STRING;
-			
-		String delimiter = request.getDelimiter();
-		int maxKeys = request.getMaxKeys();
-		if(maxKeys <= 0) maxKeys = 1000;
-		
-		//
-		SBucketVO sbucket = bucketDao.getByName(bucketName);
-		if (sbucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
-
-		PolicyActions action = (includeVersions ? PolicyActions.ListBucketVersions : PolicyActions.ListBucket);
-		S3PolicyContext context = new S3PolicyContext( action, bucketName );
-		context.setEvalParam( ConditionKeys.MaxKeys, new String( "" + maxKeys ));
-		context.setEvalParam( ConditionKeys.Prefix, prefix );
-		context.setEvalParam( ConditionKeys.Delimiter, delimiter );
-		verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ ); 
-
-		
-		// Wen execting the query, request one more item so that we know how to set isTruncated flag 
-		List<SObjectVO> l = null;
-		
-		if ( includeVersions )
-		    l = objectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 );
-		else l = objectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 );
-		
-		response.setBucketName(bucketName);
-		response.setMarker(marker);
-		response.setMaxKeys(maxKeys);
-		response.setPrefix(prefix);
-		response.setDelimiter(delimiter);
-		if (null != l ) {
-    		response.setTruncated(l.size() > maxKeys);
-    		if(l.size() > maxKeys) {
-    			response.setNextMarker(l.get(l.size() - 1).getNameKey());
-    		}
-		}
-		// If needed - SOAP response does not support versioning
-		response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker()));
-		response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys));
-		return response;
+        S3ListBucketResponse response = new S3ListBucketResponse();
+        String bucketName = request.getBucketName();
+        String prefix = request.getPrefix();
+        if (prefix == null) prefix = StringHelper.EMPTY_STRING;
+        String marker = request.getMarker();
+        if (marker == null)	marker = StringHelper.EMPTY_STRING;
+
+        String delimiter = request.getDelimiter();
+        int maxKeys = request.getMaxKeys();
+        if(maxKeys <= 0) maxKeys = 1000;
+
+        //
+        SBucketVO sbucket = bucketDao.getByName(bucketName);
+        if (sbucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
+
+        PolicyActions action = (includeVersions ? PolicyActions.ListBucketVersions : PolicyActions.ListBucket);
+        S3PolicyContext context = new S3PolicyContext( action, bucketName );
+        context.setEvalParam( ConditionKeys.MaxKeys, new String( "" + maxKeys ));
+        context.setEvalParam( ConditionKeys.Prefix, prefix );
+        context.setEvalParam( ConditionKeys.Delimiter, delimiter );
+        verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ ); 
+
+
+        // Wen execting the query, request one more item so that we know how to set isTruncated flag 
+        List<SObjectVO> l = null;
+
+        if ( includeVersions )
+            l = objectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 );
+        else l = objectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 );
+
+        response.setBucketName(bucketName);
+        response.setMarker(marker);
+        response.setMaxKeys(maxKeys);
+        response.setPrefix(prefix);
+        response.setDelimiter(delimiter);
+        if (null != l ) {
+            response.setTruncated(l.size() > maxKeys);
+            if(l.size() > maxKeys) {
+                response.setNextMarker(l.get(l.size() - 1).getNameKey());
+            }
+        }
+        // If needed - SOAP response does not support versioning
+        response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker()));
+        response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys));
+        return response;
     }
-    
+
     /**
      * Return a S3ListAllMyBucketResponse which represents a list of all buckets owned by the requester.    
      * Called from S3BucketAction for GETting all buckets.
@@ -394,90 +385,90 @@ public class S3Engine {
      */
     public S3ListAllMyBucketsResponse handleRequest(S3ListAllMyBucketsRequest request) 
     {
-    	S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse();   	
-
-    	
-    	// "...you can only list buckets for which you are the owner."
-    	List<SBucketVO> buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId());
-    	S3CanonicalUser owner = new S3CanonicalUser();
-    	owner.setID(UserContext.current().getCanonicalUserId());
-    	owner.setDisplayName("");
-    	response.setOwner(owner);
-    	
-    	if (buckets != null) 
-    	{
-    		S3ListAllMyBucketsEntry[] entries = new S3ListAllMyBucketsEntry[buckets.size()];
-    		int i = 0;
-    		for(SBucketVO bucket : buckets) 
-    		{	
-    			String bucketName = bucket.getName();
-    			S3PolicyContext context = new S3PolicyContext( PolicyActions.ListAllMyBuckets, bucketName );
-    			verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_PASS ); 
- 			
-    			entries[i] = new S3ListAllMyBucketsEntry();
-    			entries[i].setName(bucketName);
-    			entries[i].setCreationDate(DateHelper.toCalendar(bucket.getCreateTime()));
-    			i++;
-    		}   		
-    		response.setBuckets(entries);
-    	}   	
-    	return response;
+        S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse();   	
+
+
+        // "...you can only list buckets for which you are the owner."
+        List<SBucketVO> buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId());
+        S3CanonicalUser owner = new S3CanonicalUser();
+        owner.setID(UserContext.current().getCanonicalUserId());
+        owner.setDisplayName("");
+        response.setOwner(owner);
+
+        if (buckets != null) 
+        {
+            S3ListAllMyBucketsEntry[] entries = new S3ListAllMyBucketsEntry[buckets.size()];
+            int i = 0;
+            for(SBucketVO bucket : buckets) 
+            {	
+                String bucketName = bucket.getName();
+                S3PolicyContext context = new S3PolicyContext( PolicyActions.ListAllMyBuckets, bucketName );
+                verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_PASS ); 
+
+                entries[i] = new S3ListAllMyBucketsEntry();
+                entries[i].setName(bucketName);
+                entries[i].setCreationDate(DateHelper.toCalendar(bucket.getCreateTime()));
+                i++;
+            }   		
+            response.setBuckets(entries);
+        }   	
+        return response;
     }
-    
+
     /**
      * Return an S3Response representing the result of PUTTING the ACL of a given bucket.
      * Called from S3BucketAction to PUT its ACL.
      */
-    
+
     public S3Response handleRequest(S3SetBucketAccessControlPolicyRequest request) 
     { 
-    	S3Response response = new S3Response();	
-    	String bucketName = request.getBucketName();
-    	SBucketVO sbucket = bucketDao.getByName(bucketName);
-    	if(sbucket == null) {
-    		response.setResultCode(404);
-    		response.setResultDescription("Bucket does not exist");
-    		return response;
-    	}
-    	
-	    S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName );
-    	verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL ); 
-
-    	aclDao.save("SBucket", sbucket.getId(), request.getAcl());
-   	
-    	response.setResultCode(200);
-    	response.setResultDescription("OK");
-    	return response;
+        S3Response response = new S3Response();	
+        String bucketName = request.getBucketName();
+        SBucketVO sbucket = bucketDao.getByName(bucketName);
+        if(sbucket == null) {
+            response.setResultCode(404);
+            response.setResultDescription("Bucket does not exist");
+            return response;
+        }
+
+        S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName );
+        verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL ); 
+
+        aclDao.save("SBucket", sbucket.getId(), request.getAcl());
+
+        response.setResultCode(200);
+        response.setResultDescription("OK");
+        return response;
     }
-    
-    
+
+
     /**
      * Return a S3AccessControlPolicy representing the ACL of a given bucket.
      * Called from S3BucketAction to GET its ACL.
      */
-    
+
     public S3AccessControlPolicy handleRequest(S3GetBucketAccessControlPolicyRequest request) 
     {
-    	S3AccessControlPolicy policy = new S3AccessControlPolicy();   	
-    	String bucketName = request.getBucketName();
-    	SBucketVO sbucket = bucketDao.getByName( bucketName );
-    	if (sbucket == null)
-    		throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
-    	
-    	S3CanonicalUser owner = new S3CanonicalUser();
-    	owner.setID(sbucket.getOwnerCanonicalId());
-    	owner.setDisplayName("");
-    	policy.setOwner(owner);
-    	
-	    S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketAcl, bucketName );
-    	verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ_ACL ); 
-
-
-    	List<SAclVO> grants = aclDao.listGrants("SBucket", sbucket.getId());
-    	policy.setGrants(S3Grant.toGrants(grants));  	
-    	return policy;
+        S3AccessControlPolicy policy = new S3AccessControlPolicy();   	
+        String bucketName = request.getBucketName();
+        SBucketVO sbucket = bucketDao.getByName( bucketName );
+        if (sbucket == null)
+            throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
+
+        S3CanonicalUser owner = new S3CanonicalUser();
+        owner.setID(sbucket.getOwnerCanonicalId());
+        owner.setDisplayName("");
+        policy.setOwner(owner);
+
+        S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketAcl, bucketName );
+        verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ_ACL ); 
+
+
+        List<SAclVO> grants = aclDao.listGrants("SBucket", sbucket.getId());
+        policy.setGrants(S3Grant.toGrants(grants));  	
+        return policy;
     }
-    
+
     /**
      * This method should be called if a multipart upload is aborted OR has completed successfully and
      * the individual parts have to be cleaned up.
@@ -487,67 +478,67 @@ public class S3Engine {
      * @param verifyPermissiod - If false then do not check the user's permission to clean up the state
      */
     public int freeUploadParts(String bucketName, int uploadId, boolean verifyPermission) {
-	
-	// -> we need to look up the final bucket to figure out which mount
-	// point to use to save the part in
-	// SBucketDao bucketDao = new SBucketDao();
-	SBucketVO bucket = bucketDao.getByName(bucketName);
-	if (bucket == null) {
-	    logger.error("initiateMultipartUpload failed since " + bucketName
-		    + " does not exist");
-	    return 404;
-	}
-
-	OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
-	S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
-
-	try {
-	    MultipartLoadDao uploadDao = new MultipartLoadDao();
-	    OrderedPair<String, String> exists = uploadDao.multipartExits(uploadId);
-	    
-	    if (null == exists) {
-		logger.error("initiateMultipartUpload failed since multipart upload"
-			+ uploadId + " does not exist");
-		return 404;
-	    }
-
-	    // -> the multipart initiator or bucket owner can do this action by
-	    // default
-	    if (verifyPermission) {
-		String initiator = uploadDao.getInitiator(uploadId);
-		if (null == initiator
-			|| !initiator.equals(UserContext.current()
-				.getAccessKey())) {
-		    // -> write permission on a bucket allows a PutObject /
-		    // DeleteObject action on any object in the bucket
-		    S3PolicyContext context = new S3PolicyContext(
-			    PolicyActions.AbortMultipartUpload, bucketName);
-		    context.setKeyName(exists.getSecond());
-		    verifyAccess(context, "SBucket", bucket.getId(),
-			    SAcl.PERMISSION_WRITE);
-		}
-	    }
-
-	    // -> first get a list of all the uploaded files and delete one by
-	    // one
-	    S3MultipartPart[] parts = uploadDao.getParts(uploadId, 10000, 0);
-	    for (int i = 0; i < parts.length; i++) {
-		bucketAdapter.deleteObject(host_storagelocation_pair.getSecond(), ServiceProvider.getInstance()
-			.getMultipartDir(), parts[i].getPath());
-	    }
-	    uploadDao.deleteUpload(uploadId);
-	    return 204;
-
-	} catch (PermissionDeniedException e) {
-	    logger.error("freeUploadParts failed due to [" + e.getMessage()
-		    + "]", e);
-	    throw e;
-	} catch (Exception e) {
-	    logger.error("freeUploadParts failed due to [" + e.getMessage()
-		    + "]", e);
-	    return 500;
-	}
-	}
+
+        // -> we need to look up the final bucket to figure out which mount
+        // point to use to save the part in
+        // SBucketDao bucketDao = new SBucketDao();
+        SBucketVO bucket = bucketDao.getByName(bucketName);
+        if (bucket == null) {
+            logger.error("initiateMultipartUpload failed since " + bucketName
+                    + " does not exist");
+            return 404;
+        }
+
+        OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
+        S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
+
+        try {
+            MultipartLoadDao uploadDao = new MultipartLoadDao();
+            OrderedPair<String, String> exists = uploadDao.multipartExits(uploadId);
+
+            if (null == exists) {
+                logger.error("initiateMultipartUpload failed since multipart upload"
+                        + uploadId + " does not exist");
+                return 404;
+            }
+
+            // -> the multipart initiator or bucket owner can do this action by
+            // default
+            if (verifyPermission) {
+                String initiator = uploadDao.getInitiator(uploadId);
+                if (null == initiator
+                        || !initiator.equals(UserContext.current()
+                                .getAccessKey())) {
+                    // -> write permission on a bucket allows a PutObject /
+                    // DeleteObject action on any object in the bucket
+                    S3PolicyContext context = new S3PolicyContext(
+                            PolicyActions.AbortMultipartUpload, bucketName);
+                    context.setKeyName(exists.getSecond());
+                    verifyAccess(context, "SBucket", bucket.getId(),
+                            SAcl.PERMISSION_WRITE);
+                }
+            }
+
+            // -> first get a list of all the uploaded files and delete one by
+            // one
+            S3MultipartPart[] parts = uploadDao.getParts(uploadId, 10000, 0);
+            for (int i = 0; i < parts.length; i++) {
+                bucketAdapter.deleteObject(host_storagelocation_pair.getSecond(), ServiceProvider.getInstance()
+                        .getMultipartDir(), parts[i].getPath());
+            }
+            uploadDao.deleteUpload(uploadId);
+            return 204;
+
+        } catch (PermissionDeniedException e) {
+            logger.error("freeUploadParts failed due to [" + e.getMessage()
+                    + "]", e);
+            throw e;
+        } catch (Exception e) {
+            logger.error("freeUploadParts failed due to [" + e.getMessage()
+                    + "]", e);
+            return 500;
+        }
+    }
 
     /**
      * The initiator must have permission to write to the bucket in question in order to initiate
@@ -557,33 +548,33 @@ public class S3Engine {
      */
     public S3PutObjectInlineResponse initiateMultipartUpload(S3PutObjectInlineRequest request)
     {
-    	S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();	
-		String bucketName = request.getBucketName();
-		String nameKey = request.getKey();
-
-		// -> does the bucket exist and can we write to it?
-		SBucketVO bucket = bucketDao.getByName(bucketName);
-		if (bucket == null) {
-			logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" );
-			response.setResultCode(404);
-		}
-	    
-		S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName );
-		context.setKeyName( nameKey );
-		context.setEvalParam( ConditionKeys.Acl, request.getCannedAccess());
-		verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE );
-
-		createUploadFolder( bucketName ); 
+        S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();	
+        String bucketName = request.getBucketName();
+        String nameKey = request.getKey();
+
+        // -> does the bucket exist and can we write to it?
+        SBucketVO bucket = bucketDao.getByName(bucketName);
+        if (bucket == null) {
+            logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" );
+            response.setResultCode(404);
+        }
+
+        S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName );
+        context.setKeyName( nameKey );
+        context.setEvalParam( ConditionKeys.Acl, request.getCannedAccess());
+        verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE );
+
+        createUploadFolder( bucketName ); 
 
         try {
-    	    MultipartLoadDao uploadDao = new MultipartLoadDao();
-    	    int uploadId = uploadDao.initiateUpload( UserContext.current().getAccessKey(), bucketName, nameKey, request.getCannedAccess(), request.getMetaEntries());
-    	    response.setUploadId( uploadId );
-    	    response.setResultCode(200);
-    	    
+            MultipartLoadDao uploadDao = new MultipartLoadDao();
+            int uploadId = uploadDao.initiateUpload( UserContext.current().getAccessKey(), bucketName, nameKey, request.getCannedAccess(), request.getMetaEntries());
+            response.setUploadId( uploadId );
+            response.setResultCode(200);
+
         } catch( Exception e ) {
             logger.error("initiateMultipartUpload exception: ", e);
-        	response.setResultCode(500);
+            response.setResultCode(500);
         }
 
         return response;
@@ -600,55 +591,55 @@ public class S3Engine {
      */
     public S3PutObjectInlineResponse saveUploadPart(S3PutObjectInlineRequest request, int uploadId, int partNumber) 
     {
-    	S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();	
-		String bucketName = request.getBucketName();
-
-		// -> we need to look up the final bucket to figure out which mount point to use to save the part in
-		//SBucketDao bucketDao = new SBucketDao();
-		SBucketVO bucket = bucketDao.getByName(bucketName);
-		if (bucket == null) {
-			logger.error( "saveUploadedPart failed since " + bucketName + " does not exist" );
-			response.setResultCode(404);
-		}
-		S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName );
-		context.setKeyName( request.getKey());
-		verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE );
-		
-		OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);		
-		S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
-		String itemFileName = new String( uploadId + "-" + partNumber );
-		InputStream is = null;
-
-		try {
-		    is = request.getDataInputStream();
-		    String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName);
-		    response.setETag(md5Checksum);
-        	    MultipartLoadDao uploadDao = new MultipartLoadDao();
-        	    uploadDao.savePart(uploadId, partNumber, md5Checksum, itemFileName,(int) request.getContentLength());
-        	    response.setResultCode(200);
-        	    
-		} catch (IOException e) {
-			logger.error("UploadPart failed due to " + e.getMessage(), e);
-			response.setResultCode(500);
-		} catch (OutOfStorageException e) {
-			logger.error("UploadPart failed due to " + e.getMessage(), e);
-			response.setResultCode(500);
-		} catch (Exception e) {
-			logger.error("UploadPart failed due to " + e.getMessage(), e);	
-			response.setResultCode(500);
-		} finally {
-			if(is != null) {
-				try {
-					is.close();
-				} catch (IOException e) {
-					logger.error("UploadPart unable to close stream from data handler.", e);
-				}
-			}
-		}
-    	
-		return response;
+        S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();	
+        String bucketName = request.getBucketName();
+
+        // -> we need to look up the final bucket to figure out which mount point to use to save the part in
+        //SBucketDao bucketDao = new SBucketDao();
+        SBucketVO bucket = bucketDao.getByName(bucketName);
+        if (bucket == null) {
+            logger.error( "saveUploadedPart failed since " + bucketName + " does not exist" );
+            response.setResultCode(404);
+        }
+        S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName );
+        context.setKeyName( request.getKey());
+        verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE );
+
+        OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);		
+        S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
+        String itemFileName = new String( uploadId + "-" + partNumber );
+        InputStream is = null;
+
+        try {
+            is = request.getDataInputStream();
+            String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName);
+            response.setETag(md5Checksum);
+            MultipartLoadDao uploadDao = new MultipartLoadDao();
+            uploadDao.savePart(uploadId, partNumber, md5Checksum, itemFileName,(int) request.getContentLength());
+            response.setResultCode(200);
+
+        } catch (IOException e) {
+            logger.error("UploadPart failed due to " + e.getMessage(), e);
+            response.setResultCode(500);
+        } catch (OutOfStorageException e) {
+            logger.error("UploadPart failed due to " + e.getMessage(), e);
+            response.setResultCode(500);
+        } catch (Exception e) {
+            logger.error("UploadPart failed due to " + e.getMessage(), e);	
+            response.setResultCode(500);
+        } finally {
+            if(is != null) {
+                try {
+                    is.close();
+                } catch (IOException e) {
+                    logger.error("UploadPart unable to close stream from data handler.", e);
+                }
+            }
+        }
+
+        return response;
     }
-    
+
     /**
      * Create the real object represented by all the parts of the multipart upload.       
      * Called from S3ObjectAction at completion of multipart upload.
@@ -659,55 +650,55 @@ public class S3Engine {
      * N.B. - This method can be long-lasting 
      * We are required to keep the connection alive by returning whitespace characters back periodically.
      */
-    
+
     public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream outputStream) throws IOException
     {
-    	// [A] Set up and initial error checking
-	S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();
-	String bucketName = request.getBucketName();
-	String key = request.getKey();
-	S3MetaDataEntry[] meta = request.getMetaEntries();
-
-	SBucketVO bucket = bucketDao.getByName(bucketName);
-	if (bucket == null) {
-	    logger.error("completeMultipartUpload( failed since " + bucketName
-		    + " does not exist");
-	    response.setResultCode(404);
-	}
-
-	// [B] Now we need to create the final re-assembled object
-	// -> the allocObjectItem checks for the bucket policy PutObject
-	// permissions
-	OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(
-		bucket, key, meta, null, request.getCannedAccess());
-	OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
-
-	S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair
-		.getFirst());
-	String itemFileName = object_objectitem_pair.getSecond()
-		.getStoredPath();
-
-	// -> Amazon defines that we must return a 200 response immediately to
-	// the client, but
-	// -> we don't know the version header until we hit here
-	httpResp.setStatus(200);
-	httpResp.setContentType("text/xml; charset=UTF-8");
-	String version = object_objectitem_pair.getSecond().getVersion();
-	if (null != version)
-	    httpResp.addHeader("x-amz-version-id", version);
-	httpResp.flushBuffer();
-	Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
-	// [C] Re-assemble the object from its uploaded file parts
-	try {
-	    // explicit transaction control to avoid holding transaction during
-	    // long file concatenation process
-	    txn.start();
-	    OrderedPair<String, Long> result = bucketAdapter
-	            .concatentateObjects(host_storagelocation_pair.getSecond(),
-	                    bucket.getName(), itemFileName, ServiceProvider
-	                    .getInstance().getMultipartDir(), parts,
-	                    outputStream);
-	    
+        // [A] Set up and initial error checking
+        S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();
+        String bucketName = request.getBucketName();
+        String key = request.getKey();
+        S3MetaDataEntry[] meta = request.getMetaEntries();
+
+        SBucketVO bucket = bucketDao.getByName(bucketName);
+        if (bucket == null) {
+            logger.error("completeMultipartUpload( failed since " + bucketName
+                    + " does not exist");
+            response.setResultCode(404);
+        }
+
+        // [B] Now we need to create the final re-assembled object
+        // -> the allocObjectItem checks for the bucket policy PutObject
+        // permissions
+        OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(
+                bucket, key, meta, null, request.getCannedAccess());
+        OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
+
+        S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair
+                .getFirst());
+        String itemFileName = object_objectitem_pair.getSecond()
+                .getStoredPath();
+
+        // -> Amazon defines that we must return a 200 response immediately to
+        // the client, but
+        // -> we don't know the version header until we hit here
+        httpResp.setStatus(200);
+        httpResp.setContentType("text/xml; charset=UTF-8");
+        String version = object_objectitem_pair.getSecond().getVersion();
+        if (null != version)
+            httpResp.addHeader("x-amz-version-id", version);
+        httpResp.flushBuffer();
+        Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
+        // [C] Re-assemble the object from its uploaded file parts
+        try {
+            // explicit transaction control to avoid holding transaction during
+            // long file concatenation process
+            txn.start();
+            OrderedPair<String, Long> result = bucketAdapter
+                    .concatentateObjects(host_storagelocation_pair.getSecond(),
+                            bucket.getName(), itemFileName, ServiceProvider
+                            .getInstance().getMultipartDir(), parts,
+                            outputStream);
+
             response.setETag(result.getFirst());
             response.setLastModified(DateHelper.toCalendar(object_objectitem_pair.getSecond().getLastModifiedTime()));
             SObjectItemVO item = itemDao.findById(object_objectitem_pair
@@ -716,13 +707,13 @@ public class S3Engine {
             item.setStoredSize(result.getSecond().longValue());
             itemDao.update(item.getId(), item);
             response.setResultCode(200);
-	} catch (Exception e) {
-	    logger.error("completeMultipartUpload failed due to " + e.getMessage(),e);
-	    txn.close();
-	}
-	return response;
+        } catch (Exception e) {
+            logger.error("completeMultipartUpload failed due to " + e.getMessage(),e);
+            txn.close();
+        }
+        return response;
     }
-    
+
     /**
      * Return a S3PutObjectInlineResponse which represents an object being created into a bucket      
      * Called from S3ObjectAction when PUTting or POTing an object.
@@ -730,61 +721,61 @@ public class S3Engine {
     @DB
     public S3PutObjectInlineResponse handleRequest(S3PutObjectInlineRequest request) 
     {
-    	S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();	
-		String bucketName = request.getBucketName();
-		String key = request.getKey();
-		long contentLength = request.getContentLength();
-		S3MetaDataEntry[] meta = request.getMetaEntries();
-		S3AccessControlList acl = request.getAcl();
-		
-		SBucketVO bucket = bucketDao.getByName(bucketName);
-		if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
-		
-
-		// Is the caller allowed to write the object?
-		// The allocObjectItem checks for the bucket policy PutObject permissions
-		OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess());
-		OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);		
-		
-		S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
-		String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
-		InputStream is = null;
-		Transaction txn = null;
-		try {
-			// explicit transaction control to avoid holding transaction during file-copy process
-			
-		    txn = Transaction.open(Transaction.AWSAPI_DB);
-		    txn.start();
-			is = request.getDataInputStream();
-			String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName);
-			response.setETag(md5Checksum);
-			response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
-	        response.setVersion( object_objectitem_pair.getSecond().getVersion());
-		
-			//SObjectItemDaoImpl itemDao = new SObjectItemDaoImpl();
-			SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId());
-			item.setMd5(md5Checksum);
-			item.setStoredSize(contentLength);
-			itemDao.update(item.getId(), item);
-			txn.commit();
-		} catch (IOException e) {
-			logger.error("PutObjectInline failed due to " + e.getMessage(), e);
-		} catch (OutOfStorageException e) {
-			logger.error("PutObjectInline failed due to " + e.getMessage(), e);
-		} finally {
-			if(is != null) {
-				try {
-					is.close();
-				} catch (IOException e) {
-					logger.error("PutObjectInline unable to close stream from data handler.", e);
-				}
-			}
-			txn.close();
-		}
-    	
-    	return response;
+        S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();	
+        String bucketName = request.getBucketName();
+        String key = request.getKey();
+        long contentLength = request.getContentLength();
+        S3MetaDataEntry[] meta = request.getMetaEntries();
+        S3AccessControlList acl = request.getAcl();
+
+        SBucketVO bucket = bucketDao.getByName(bucketName);
+        if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
+
+
+        // Is the caller allowed to write the object?
+        // The allocObjectItem checks for the bucket policy PutObject permissions
+        OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess());
+        OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);		
+
+        S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
+        String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
+        InputStream is = null;
+        Transaction txn = null;
+        try {
+            // explicit transaction control to avoid holding transaction during file-copy process
+
+            txn = Transaction.open(Transaction.AWSAPI_DB);
+            txn.start();
+            is = request.getDataInputStream();
+            String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName);
+            response.setETag(md5Checksum);
+            response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
+            response.setVersion( object_objectitem_pair.getSecond().getVersion());
+
+            //SObjectItemDaoImpl itemDao = new SObjectItemDaoImpl();
+            SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId());
+            item.setMd5(md5Checksum);
+            item.setStoredSize(contentLength);
+            itemDao.update(item.getId(), item);
+            txn.commit();
+        } catch (IOException e) {
+            logger.error("PutObjectInline failed due to " + e.getMessage(), e);
+        } catch (OutOfStorageException e) {
+            logger.error("PutObjectInline failed due to " + e.getMessage(), e);
+        } finally {
+            if(is != null) {
+                try {
+                    is.close();
+                } catch (IOException e) {
+                    logger.error("PutObjectInline unable to close stream from data handler.", e);
+                }
+            }
+            txn.close();
+        }
+
+        return response;
     }
-    
+
     /**
      * Return a S3PutObjectResponse which represents an object being created into a bucket      
      * Called from S3RestServlet when processing a DIME request.
@@ -792,56 +783,56 @@ public class S3Engine {
 
     public S3PutObjectResponse handleRequest(S3PutObjectRequest request)  
     {
-    	S3PutObjectResponse response = new S3PutObjectResponse();	
-		String bucketName = request.getBucketName();
-		String key = request.getKey();
-		long contentLength = request.getContentLength();
-		S3MetaDataEntry[] meta = request.getMetaEntries();
-		S3AccessControlList acl = request.getAcl();
-		
-		SBucketVO bucket = bucketDao.getByName(bucketName);
-		if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
-		
-		// Is the caller allowed to write the object?	
-		// The allocObjectItem checks for the bucket policy PutObject permissions
-		OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null);
-		OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
-    	
-		S3BucketAdapter bucketAdapter =  getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
-		String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
-		InputStream is = null;
-		Transaction txn = null;
-		try {
-			// explicit transaction control to avoid holding transaction during file-copy process
-		    
-	        txn = Transaction.open(Transaction.AWSAPI_DB);
-	        txn.start();
-			
-			is = request.getInputStream();
-			String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName);
-			response.setETag(md5Checksum);
-			response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
-			
-			SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId());
-			item.setMd5(md5Checksum);
-			item.setStoredSize(contentLength);
-			itemDao.update(item.getId(), item);
-			txn.commit();
-			
-		} catch (OutOfStorageException e) {
-			logger.error("PutObject failed due to " + e.getMessage(), e);
-		} finally {
-			if(is != null) {
-				try {
-					is.close();
-				} catch (IOException e) {
-					logger.error("Unable to close stream from data handler.", e);
-				}
-			}
-			txn.close();
-		}
-    	
-    	return response;
+        S3PutObjectResponse response = new S3PutObjectResponse();	
+        String bucketName = request.getBucketName();
+        String key = request.getKey();
+        long contentLength = request.getContentLength();
+        S3MetaDataEntry[] meta = request.getMetaEntries();
+        S3AccessControlList acl = request.getAcl();
+
+        SBucketVO bucket = bucketDao.getByName(bucketName);
+        if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
+
+        // Is the caller allowed to write the object?	
+        // The allocObjectItem checks for the bucket policy PutObject permissions
+        OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null);
+        OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
+
+        S3BucketAdapter bucketAdapter =  getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
+        String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
+        InputStream is = null;
+        Transaction txn = null;
+        try {
+            // explicit transaction control to avoid holding transaction during file-copy process
+
+            txn = Transaction.open(Transaction.AWSAPI_DB);
+            txn.start();
+
+            is = request.getInputStream();
+            String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName);
+            response.setETag(md5Checksum);
+            response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
+
+            SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId());
+            item.setMd5(md5Checksum);
+            item.setStoredSize(contentLength);
+            itemDao.update(item.getId(), item);
+            txn.commit();
+
+        } catch (OutOfStorageException e) {
+            logger.error("PutObject failed due to " + e.getMessage(), e);
+        } finally {
+            if(is != null) {
+                try {
+                    is.close();
+                } catch (IOException e) {
+                    logger.error("Unable to close stream from data handler.", e);
+                }
+            }
+            txn.close();
+        }
+
+        return response;
     }
 
     /**
@@ -849,795 +840,795 @@ public class S3Engine {
      * version of an object. To set the ACL of a different version, using the versionId subresource. 
      * Called from S3ObjectAction to PUT an object's ACL.
      */
-    
+
     public S3Response handleRequest(S3SetObjectAccessControlPolicyRequest request) 
     {
-    	S3PolicyContext context = null;
-    	
-    	// [A] First find the object in the bucket
-    	S3Response response  = new S3Response(); 	
-    	String bucketName = request.getBucketName();
-    	SBucketVO sbucket = bucketDao.getByName( bucketName );
-    	if(sbucket == null) {
-    		response.setResultCode(404);
-    		response.setResultDescription("Bucket " + bucketName + "does not exist");
-    		return response;
-    	}
-    	
-    	String nameKey = request.getKey();
-    	SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );   	
-    	if(sobject == null) {
-    		response.setResultCode(404);
-    		response.setResultDescription("Object " + request.getKey() + " in bucket " + bucketName + " does not exist");
-    		return response;
-    	}
-    	
-		String deletionMark = sobject.getDeletionMark();
-		if (null != deletionMark) {
-			response.setResultCode(404);
-			response.setResultDescription("Object " + request.getKey() + " has been deleted (1)");
-			return response;
-		}
-		
-
-		// [B] Versioning allow the client to ask for a specific version not just the latest
-		SObjectItemVO item = null;
+        S3PolicyContext context = null;
+
+        // [A] First find the object in the bucket
+        S3Response response  = new S3Response(); 	
+        String bucketName = request.getBucketName();
+        SBucketVO sbucket = bucketDao.getByName( bucketName );
+        if(sbucket == null) {
+            response.setResultCode(404);
+            response.setResultDescription("Bucket " + bucketName + "does not exist");
+            return response;
+        }
+
+        String nameKey = request.getKey();
+        SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );   	
+        if(sobject == null) {
+            response.setResultCode(404);
+            response.setResultDescription("Object " + request.getKey() + " in bucket " + bucketName + " does not exist");
+            return response;
+        }
+
+        String deletionMark = sobject.getDeletionMark();
+        if (null != deletionMark) {
+            response.setResultCode(404);
+            response.setResultDescription("Object " + request.getKey() + " has been deleted (1)");
+            return response;
+        }
+
+
+        // [B] Versioning allow the client to ask for a specific version not just the latest
+        SObjectItemVO item = null;
         int versioningStatus = sbucket.getVersioningStatus();
-		String wantVersion = request.getVersion();
-		if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion)
-			 item = sobject.getVersion( wantVersion );
-		else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus ));    
-    	
-		if (item == null) {
-    		response.setResultCode(404);
-			response.setResultDescription("Object " + request.getKey() + " has been deleted (2)");
-			return response;  		
-    	}
-
-		if ( SBucket.VERSIONING_ENABLED == versioningStatus ) {
-			 context = new S3PolicyContext( PolicyActions.PutObjectAclVersion, bucketName );
-			 context.setEvalParam( ConditionKeys.VersionId, wantVersion );
-			 response.setVersion( item.getVersion());
-		}
-		else context = new S3PolicyContext( PolicyActions.PutObjectAcl, bucketName );		
-		context.setKeyName( nameKey );
-		verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_WRITE_ACL );		
-
-		// -> the acl always goes on the instance of the object
-    	aclDao.save("SObjectItem", item.getId(), request.getAcl());
-    	
-    	response.setResultCode(200);
-    	response.setResultDescription("OK");
-    	return response;
+        String wantVersion = request.getVersion();
+        if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion)
+            item = sobject.getVersion( wantVersion );
+        else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus ));    
+
+        if (item == null) {
+            response.setResultCode(404);
+            response.setResultDescription("Object " + request.getKey() + " has been deleted (2)");
+            return response;  		
+        }
+
+        if ( SBucket.VERSIONING_ENABLED == versioningStatus ) {
+            context = new S3PolicyContext( PolicyActions.PutObjectAclVersion, bucketName );
+            context.setEvalParam( ConditionKeys.VersionId, wantVersion );
+            response.setVersion( item.getVersion());
+        }
+        else context = new S3PolicyContext( PolicyActions.PutObjectAcl, bucketName );		
+        context.setKeyName( nameKey );
+        verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_WRITE_ACL );		
+
+        // -> the acl always goes on the instance of the object
+        aclDao.save("SObjectItem", item.getId(), request.getAcl());
+
+        response.setResultCode(200);
+        response.setResultDescription("OK");
+        return response;
     }
-    
+
     /**
      * By default, GET returns ACL information about the latest version of an object. To return ACL 
      * information about a different version, use the versionId subresource
      * Called from S3ObjectAction to get an object's ACL.
      */
-    
+
     public S3AccessControlPolicy handleRequest(S3GetObjectAccessControlPolicyRequest request) 
     {
-    	S3PolicyContext context = null;
-
-    	// [A] Does the object exist that holds the ACL we are looking for?
-    	S3AccessControlPolicy policy = new S3AccessControlPolicy();	
-    	
-    	String bucketName = request.getBucketName();
-    	SBucketVO sbucket = bucketDao.getByName( bucketName );
-    	if (sbucket == null)
-    		throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
-    	
-    	//SObjectDaoImpl sobjectDao = new SObjectDaoImpl();
-    	String nameKey = request.getKey();
-    	SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
-    	if (sobject == null)
-    		throw new NoSuchObjectException("Object " + request.getKey() + " does not exist");
-    		
-		String deletionMark = sobject.getDeletionMark();
-		if (null != deletionMark) {
-			policy.setResultCode(404);
-			policy.setResultDescription("Object " + request.getKey() + " has been deleted (1)");
-			return policy;
-		}
-		
-
-		// [B] Versioning allow the client to ask for a specific version not just the latest
-		SObjectItemVO item = null;
+        S3PolicyContext context = null;
+
+        // [A] Does the object exist that holds the ACL we are looking for?
+        S3AccessControlPolicy policy = new S3AccessControlPolicy();	
+
+        String bucketName = request.getBucketName();
+        SBucketVO sbucket = bucketDao.getByName( bucketName );
+        if (sbucket == null)
+            throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
+
+        //SObjectDaoImpl sobjectDao = new SObjectDaoImpl();
+        String nameKey = request.getKey();
+        SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
+        if (sobject == null)
+            throw new NoSuchObjectException("Object " + request.getKey() + " does not exist");
+
+        String deletionMark = sobject.getDeletionMark();
+        if (null != deletionMark) {
+            policy.setResultCode(404);
+            policy.setResultDescription("Object " + request.getKey() + " has been deleted (1)");
+            return policy;
+        }
+
+
+        // [B] Versioning allow the client to ask for a specific version not just the latest
+        SObjectItemVO item = null;
         int versioningStatus = sbucket.getVersioningStatus();
-		String wantVersion = request.getVersion();
-		if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion)
-			 item = sobject.getVersion( wantVersion );
-		else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus ));    
-    	
-		if (item == null) {
-    		policy.setResultCode(404);
-			policy.setResultDescription("Object " + request.getKey() + " has been deleted (2)");
-			return policy;  		
-    	}
-
-		if ( SBucket.VERSIONING_ENABLED == versioningStatus ) {
-			 context = new S3PolicyContext( PolicyActions.GetObjectVersionAcl, bucketName );
-			 context.setEvalParam( ConditionKeys.VersionId, wantVersion );
-			 policy.setVersion( item.getVersion());
-		}
-		else context = new S3PolicyContext( PolicyActions.GetObjectAcl, bucketName );		
-		context.setKeyName( nameKey );
-		verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ_ACL );		
-
-    	
+        String wantVersion = request.getVersion();
+        if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion)
+            item = sobject.getVersion( wantVersion );
+        else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus ));    
+
+        if (item == null) {
+            policy.setResultCode(404);
+            policy.setResultDescription("Object " + request.getKey() + " has been deleted (2)");
+            return policy;  		
+        }
+
+        if ( SBucket.VERSIONING_ENABLED == versioningStatus ) {
+            context = new S3PolicyContext( PolicyActions.GetObjectVersionAcl, bucketName );
+            context.setEvalParam( ConditionKeys.VersionId, wantVersion );
+            policy.setVersion( item.getVersion());
+        }
+        else context = new S3PolicyContext( PolicyActions.GetObjectAcl, bucketName );		
+        context.setKeyName( nameKey );
+        verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ_ACL );		
+
+
         // [C] ACLs are ALWAYS on an instance of the object
-    	S3CanonicalUser owner = new S3CanonicalUser();
-    	owner.setID(sobject.getOwnerCanonicalId());
-    	owner.setDisplayName("");
-    	policy.setOwner(owner);
-		policy.setResultCode(200);
-   	
-    	
-    	List<SAclVO> grants = aclDao.listGrants( "SObjectItem", item.getId());
-    	policy.setGrants(S3Grant.toGrants(grants));    
-    	return policy;
+        S3CanonicalUser owner = new S3CanonicalUser();
+        owner.setID(sobject.getOwnerCanonicalId());
+        owner.setDisplayName("");
+        policy.setOwner(owner);
+        policy.setResultCode(200);
+
+
+        List<SAclVO> grants = aclDao.listGrants( "SObjectItem", item.getId());
+        policy.setGrants(S3Grant.toGrants(grants));    
+        return policy;
     }
-    
+
     /**
      * Handle requests for GET object and HEAD "get object extended"
      * Called from S3ObjectAction for GET and HEAD of an object.
      */
-    
+
     public S3GetObjectResponse handleRequest(S3GetObjectRequest request) 
     {
-    	S3GetObjectResponse response = new S3GetObjectResponse();
-    	S3PolicyContext context = null;
-    	boolean ifRange = false;
-		long bytesStart = request.getByteRangeStart();
-		long bytesEnd   = request.getByteRangeEnd();
-    	int resultCode  = 200;
-
-    	// [A] Verify that the bucket and the object exist
-		
-		String bucketName = request.getBucketName();
-		SBucketVO sbucket = bucketDao.getByName(bucketName);
-		if (sbucket == null) {
-			response.setResultCode(404);
-			response.setResultDescription("Bucket " + request.getBucketName() + " does not exist");
-			return response;
-		}
-
-		String nameKey = request.getKey();
-		SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
-		if (sobject == null) {
-			response.setResultCode(404);
-			response.setResultDescription("Object " + request.getKey() + " does not exist in bucket " + request.getBucketName());
-			return response;
-		}
-		
-		String deletionMark = sobject.getDeletionMark();
-		if (null != deletionMark) {
-		    response.setDeleteMarker( deletionMark );
-			response.setResultCode(404);
-			response.setResultDescription("Object " + request.getKey() + " has been deleted (1)");
-			return response;
-		}
-		
-
-		// [B] Versioning allow the client to ask for a specific version not just the latest
-		SObjectItemVO item = null;
+        S3GetObjectResponse response = new S3GetObjectResponse();
+        S3PolicyContext context = null;
+        boolean ifRange = false;
+        long bytesStart = request.getByteRangeStart();
+        long bytesEnd   = request.getByteRangeEnd();
+        int resultCode  = 200;
+
+        // [A] Verify that the bucket and the object exist
+
+        String bucketName = request.getBucketName();
+        SBucketVO sbucket = bucketDao.getByName(bucketName);
+        if (sbucket == null) {
+            response.setResultCode(404);
+            response.setResultDescription("Bucket " + request.getBucketName() + " does not exist");
+            return response;
+        }
+
+        String nameKey = request.getKey();
+        SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
+        if (sobject == null) {
+            response.setResultCode(404);
+            response.setResultDescription("Object " + request.getKey() + " does not exist in bucket " + request.getBucketName());
+            return response;
+        }
+
+        String deletionMark = sobject.getDeletionMark();
+        if (null != deletionMark) {
+            response.setDeleteMarker( deletionMark );
+            response.setResultCode(404);
+            response.setResultDescription("Object " + request.getKey() + " has been deleted (1)");
+            return response;
+        }
+
+
+        // [B] Versioning allow the client to ask for a specific version not just the latest
+        SObjectItemVO item = null;
         int versioningStatus = sbucket.getVersioningStatus();
-		String wantVersion = request.getVersion();
-		if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) 			
-			 item = sobject.getVersion( wantVersion );
-		else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus ));    
-    	
-		if (item == null) {
-    		response.setResultCode(404);
-			response.setResultDescription("Object " + request.getKey() + " has been deleted (2)");
-			return response;  		
-    	}
-		
-		if ( SBucket.VERSIONING_ENABLED == versioningStatus ) {
-			 context = new S3PolicyContext( PolicyActions.GetObjectVersion, bucketName );
-			 context.setEvalParam( ConditionKeys.VersionId, wantVersion );
-		}
-		else context = new S3PolicyContext( PolicyActions.GetObject, bucketName );		
- 		context.setKeyName( nameKey );
-		verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ );		
-		
-			
-	    // [C] Handle all the IFModifiedSince ... conditions, and access privileges
-		// -> http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 (HTTP If-Range header)
-		if (request.isReturnCompleteObjectOnConditionFailure() && (0 <= bytesStart && 0 <= bytesEnd)) ifRange = true;
-
-		resultCode = conditionPassed( request.getConditions(), item.getLastModifiedTime(), item.getMd5(), ifRange );
-	    if ( -1 == resultCode ) {
-	    	// -> If-Range implementation, we have to return the entire object
-	    	resultCode = 200;
-	    	bytesStart = -1;
-	    	bytesEnd = -1;
-	    }
-	    else if (200 != resultCode) {
-			response.setResultCode( resultCode );
-	    	response.setResultDescription( "Precondition Failed" );			
-			return response;
-		}
-
-
-		// [D] Return the contents of the object inline	
-		// -> extract the meta data that corresponds the specific versioned item 
-
-		List<SMetaVO> itemMetaData =   metaDao.getByTarget( "SObjectItem", item.getId());
-		if (null != itemMetaData) 
-		{
-			int i = 0;
-			S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ];
-		    ListIterator<SMetaVO> it = itemMetaData.listIterator();
-		    while( it.hasNext()) {
-		    	SMetaVO oneTag = (SMetaVO)it.next();
-		    	S3MetaDataEntry oneEntry = new S3MetaDataEntry();
-		    	oneEntry.setName( oneTag.getName());
-		    	oneEntry.setValue( oneTag.getValue());
-		    	metaEntries[i++] = oneEntry;
-		    }
-		    response.setMetaEntries( metaEntries );
-		}
-		    
-		//  -> support a single byte range
-		if ( 0 <= bytesStart && 0 <= bytesEnd ) {
-		     response.setContentLength( bytesEnd - bytesStart );	
-		     resultCode = 206;
-		}
-		else response.setContentLength( item.getStoredSize());
- 
-    	if(request.isReturnData()) 
-    	{
-    		response.setETag(item.getMd5());
-    		response.setLastModified(DateHelper.toCalendar( item.getLastModifiedTime()));
-    		response.setVersion( item.getVersion());
-    		if (request.isInlineData()) 
-    		{
-    			OrderedPair<SHostVO, String> tupleSHostInfo = getBucketStorageHost(sbucket);
-				S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst());
-				
-				if ( 0 <= bytesStart && 0 <= bytesEnd )
-					 response.setData(bucketAdapter.loadObjectRange(tupleSHostInfo.getSecond(), 
-						   request.getBucketName(), item.getStoredPath(), bytesStart, bytesEnd ));
-				else response.setData(bucketAdapter.loadObject(tupleSHostInfo.getSecond(), request.getBucketName(), item.getStoredPath()));
-    		} 
-    	}
-    	
-    	response.setResultCode( resultCode );
-    	response.setResultDescription("OK");
-    	return response;
+        String wantVersion = request.getVersion();
+        if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) 			
+            item = sobject.getVersion( wantVersion );
+        else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus ));    
+
+        if (item == null) {
+            response.setResultCode(404);
+            response.setResultDescription("Object " + request.getKey() + " has been deleted (2)");
+            return response;  		
+        }
+
+        if ( SBucket.VERSIONING_ENABLED == versioningStatus ) {
+            context = new S3PolicyContext( PolicyActions.GetObjectVersion, bucketName );
+            context.setEvalParam( ConditionKeys.VersionId, wantVersion );
+        }
+        else context = new S3PolicyContext( PolicyActions.GetObject, bucketName );		
+        context.setKeyName( nameKey );
+        verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ );		
+
+
+        // [C] Handle all the IFModifiedSince ... conditions, and access privileges
+        // -> http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 (HTTP If-Range header)
+        if (request.isReturnCompleteObjectOnConditionFailure() && (0 <= bytesStart && 0 <= bytesEnd)) ifRange = true;
+
+        resultCode = conditionPassed( request.getConditions(), item.getLastModifiedTime(), item.getMd5(), ifRange );
+        if ( -1 == resultCode ) {
+            // -> If-Range implementation, we have to return the entire object
+            resultCode = 200;
+            bytesStart = -1;
+            bytesEnd = -1;
+        }
+        else if (200 != resultCode) {
+            response.setResultCode( resultCode );
+            response.setResultDescription( "Precondition Failed" );			
+            return response;
+        }
+
+
+        // [D] Return the contents of the object inline	
+        // -> extract the meta data that corresponds the specific versioned item 
+
+        List<SMetaVO> itemMetaData =   metaDao.getByTarget( "SObjectItem", item.getId());
+        if (null != itemMetaData) 
+        {
+            int i = 0;
+            S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ];
+            ListIterator<SMetaVO> it = itemMetaData.listIterator();
+            while( it.hasNext()) {
+                SMetaVO oneTag = it.next();
+                S3MetaDataEntry oneEntry = new S3MetaDataEntry();
+                oneEntry.setName( oneTag.getName());
+                oneEntry.setValue( oneTag.getValue());
+                metaEntries[i++] = oneEntry;
+            }
+            response.setMetaEntries( metaEntries );
+        }
+
+        //  -> support a single byte range
+        if ( 0 <= bytesStart && 0 <= bytesEnd ) {
+            response.setContentLength( bytesEnd - bytesStart );	
+            resultCode = 206;
+        }
+        else response.setContentLength( item.getStoredSize());
+
+        if(request.isReturnData()) 
+        {
+            response.setETag(item.getMd5());
+            response.setLastModified(DateHelper.toCalendar( item.getLastModifiedTime()));
+            response.setVersion( item.getVersion());
+            if (request.isInlineData()) 
+            {
+                OrderedPair<SHostVO, String> tupleSHostInfo = getBucketStorageHost(sbucket);
+                S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst());
+
+                if ( 0 <= bytesStart && 0 <= bytesEnd )
+                    response.setData(bucketAdapter.loadObjectRange(tupleSHostInfo.getSecond(), 
+                            request.getBucketName(), item.getStoredPath(), bytesStart, bytesEnd ));
+                else response.setData(bucketAdapter.loadObject(tupleSHostInfo.getSecond(), request.getBucketName(), item.getStoredPath()));
+            } 
+        }
+
+        response.setResultCode( resultCode );
+        response.setResultDescription("OK");
+        return response;
     }
-    
+
     /**
      * Handle object deletion requests, both versioning and non-versioning requirements.
      * Called from S3ObjectAction for deletion.
      */
-	public S3Response handleRequest(S3DeleteObjectRequest request) 
-	{		
-		// Verify that the bucket and object exist
-		S3Response response  = new S3Response();
-		
-		String bucketName = request.getBucketName();
-		SBucketVO sbucket = bucketDao.getByName( bucketName );
-		if (sbucket == null) {
-			response.setResultCode(404);
-			response.setResultDescription("<Code>Bucket dosen't exists</Code><Message>Bucket " + bucketName + " does not exist</Message>");
-			return response;
-		}
-		
-		
-		String nameKey = request.getKey();
-		SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
-		if (sobject == null) {
-			response.setResultCode(404);
-			response.setResultDescription("<Code>Not Found</Code><Message>No object with key " +  nameKey + " exists in bucket " + bucketName+"</Message>");
-			return response;
-		}
-		
-				
-		// Discover whether versioning is enabled.  If so versioning requires the setting of a deletion marker.
-		String storedPath = null;
-		SObjectItemVO item = null;
+    public S3Response handleRequest(S3DeleteObjectRequest request) 
+    {		
+        // Verify that the bucket and object exist
+        S3Response response  = new S3Response();
+
+        String bucketName = request.getBucketName();
+        SBucketVO sbucket = bucketDao.getByName( bucketName );
+        if (sbucket == null) {
+            response.setResultCode(404);
+            response.setResultDescription("<Code>Bucket dosen't exists</Code><Message>Bucket " + bucketName + " does not exist</Message>");
+            return response;
+        }
+
+
+        String nameKey = request.getKey();
+        SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
+        if (sobject == null) {
+            response.setResultCode(404);
+            response.setResultDescription("<Code>Not Found</Code><Message>No object with key " +  nameKey + " exists in bucket " + bucketName+"</Message>");
+            return response;
+        }
+
+
+        // Discover whether versioning is enabled.  If so versioning requires the setting of a deletion marker.
+        String storedPath = null;
+        SObjectItemVO item = null;
         int versioningStatus = sbucket.getVersioningStatus();
-		if ( SBucket.VERSIONING_ENABLED == versioningStatus ) 
-	    {
-			 String wantVersion = request.getVersion();
-			 S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObjectVersion, bucketName );
-			 context.setKeyName( nameKey );
-			 context.setEvalParam( ConditionKeys.VersionId, wantVersion );
-			 verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE );
-
-			 if (null == wantVersion) {
-				 // If versioning is on and no versionId is given then we just write a deletion marker
-				 sobject.setDeletionMark( UUID.randomUUID().toString());
-				 objectDao.update(sobject.getId(), sobject );
-				 response.setResultDescription("<DeleteMarker>true</DeleteMarker><DeleteMarkerVersionId>"+ sobject.getDeletionMark() +"</DeleteMarkerVersionId>");
-			 }
-			 else {	
-				  // Otherwise remove the deletion marker if this has been set
-				  String deletionMarker = sobject.getDeletionMark();
-				  if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) {
-					  sobject.setDeletionMark( null );  
-			    	  objectDao.update(sobject.getId(), sobject );	
-			    	  response.setResultDescription("<VersionId>" + wantVersion +"</VersionId>");
-			    	  response.setResultDescription("<DeleteMarker>true</DeleteMarker><DeleteMarkerVersionId>"+ sobject.getDeletionMark() +"</DeleteMarkerVersionId>");
-			  		  response.setResultCode(204);
-					  return response;
-	              }
-				  
-				  // If versioning is on and the versionId is given (non-null) then delete the object matching that version
-			      if ( null == (item = sobject.getVersion( wantVersion ))) {
-			    	   response.setResultCode(404);
-			    	   return response;
-			      }
-			      else {
-			    	   // Providing versionId is non-null, then just delete the one item that matches the versionId from the database
-			    	   storedPath = item.getStoredPath();
-			    	   sobject.deleteItem( item.getId());
-			    	   objectDao.update(sobject.getId(), sobject );
-			    	   response.setResultDescription("<VersionId>" + wantVersion +"</VersionId>");
-			      }
-			 }
-	    }
-		else 
-		{	 // If versioning is off then we do delete the null object
-			 S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObject, bucketName );
-			 context.setKeyName( nameKey );
-			 verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE );
-
-			 if ( null == (item = sobject.getLatestVersion( true ))) {
-		    	  response.setResultCode(404);
-		    	  response.setResultDescription("<Code>AccessDenied</Code><Message>Access Denied</Message>");
-		    	  return response;
-		     }
-		     else {
-		    	  // If there is no item with a null version then we are done
-		    	  if (null == item.getVersion()) {
-		    	      // Otherwiswe remove the entire object 
-		    	      // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects.
-		    	      storedPath = item.getStoredPath();
-		    	      deleteMetaData( item.getId());
-		    	      deleteObjectAcls( "SObjectItem", item.getId());
-		    	      objectDao.remove(sobject.getId());
-		    	  }
-		     }
-		}
-		
-		// Delete the file holding the object
-		if (null != storedPath) 
-		{
-			 OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost( sbucket );
-			 S3BucketAdapter bucketAdapter =  getStorageHostBucketAdapter( host_storagelocation_pair.getFirst());		 
-			 bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), bucketName, storedPath );		
-		}
-		
-		response.setResultCode(204);
-		return response;
+        if ( SBucket.VERSIONING_ENABLED == versioningStatus ) 
+        {
+            String wantVersion = request.getVersion();
+            S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObjectVersion, bucketName );
+            context.setKeyName( nameKey );
+            context.setEvalParam( ConditionKeys.VersionId, wantVersion );
+            verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE );
+
+            if (null == wantVersion) {
+                // If versioning is on and no versionId is given then we just write a deletion marker
+                sobject.setDeletionMark( UUID.randomUUID().toString());
+                objectDao.update(sobject.getId(), sobject );
+                response.setResultDescription("<DeleteMarker>true</DeleteMarker><DeleteMarkerVersionId>"+ sobject.getDeletionMark() +"</DeleteMarkerVersionId>");
+            }
+            else {	
+                // Otherwise remove the deletion marker if this has been set
+                String deletionMarker = sobject.getDeletionMark();
+                if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) {
+                    sobject.setDeletionMark( null );  
+                    objectDao.update(sobject.getId(), sobject );	
+                    response.setResultDescription("<VersionId>" + wantVersion +"</VersionId>");
+                    response.setResultDescription("<DeleteMarker>true</DeleteMarker><DeleteMarkerVersionId>"+ sobject.getDeletionMark() +"</DeleteMarkerVersionId>");
+                    response.setResultCode(204);
+                    return response;
+                }
+
+                // If versioning is on and the versionId is given (non-null) then delete the object matching that version
+                if ( null == (item = sobject.getVersion( wantVersion ))) {
+                    response.setResultCode(404);
+                    return response;
+                }
+                else {
+                    // Providing versionId is non-null, then just delete the one item that matches the versionId from the database
+                    storedPath = item.getStoredPath();
+                    sobject.deleteItem( item.getId());
+                    objectDao.update(sobject.getId(), sobject );
+                    response.setResultDescription("<VersionId>" + wantVersion +"</VersionId>");
+                }
+            }
+        }
+        else 
+        {	 // If versioning is off then we do delete the null object
+            S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObject, bucketName );
+            context.setKeyName( nameKey );
+            verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE );
+
+            if ( null == (item = sobject.getLatestVersion( true ))) {
+                response.setResultCode(404);
+                response.setResultDescription("<Code>AccessDenied</Code><Message>Access Denied</Message>");
+                return response;
+            }
+            else {
+                // If there is no item with a null version then we are done
+                if (null == item.getVersion()) {
+                    // Otherwiswe remove the entire object 
+                    // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects.
+                    storedPath = item.getStoredPath();
+                    deleteMetaData( item.getId());
+                    deleteObjectAcls( "SObjectItem", item.getId());
+                    objectDao.remove(sobject.getId());
+                }
+            }
+        }
+
+        // Del

<TRUNCATED>