You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ah...@apache.org on 2012/09/05 00:12:52 UTC
[28/50] [abbrv] Moved Awsapi (EC2/S3) from Hibernate framework to
CloudStack Generic Dao Framework
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/39aa7d86/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java
----------------------------------------------------------------------
diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java b/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java
index c8ca2bd..2f1791e 100644
--- a/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java
+++ b/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java
@@ -35,18 +35,19 @@ import java.util.TimerTask;
import org.apache.axis2.AxisFault;
import org.apache.log4j.Logger;
import org.apache.log4j.xml.DOMConfigurator;
-import org.hibernate.SessionException;
import com.amazon.s3.AmazonS3SkeletonInterface;
import com.amazon.ec2.AmazonEC2SkeletonInterface;
-import com.cloud.bridge.model.MHost;
+import com.cloud.bridge.model.MHostVO;
import com.cloud.bridge.model.SHost;
-import com.cloud.bridge.model.UserCredentials;
-import com.cloud.bridge.persist.PersistContext;
-import com.cloud.bridge.persist.PersistException;
+import com.cloud.bridge.model.SHostVO;
+import com.cloud.bridge.model.UserCredentialsVO;
import com.cloud.bridge.persist.dao.MHostDao;
+import com.cloud.bridge.persist.dao.MHostDaoImpl;
import com.cloud.bridge.persist.dao.SHostDao;
+import com.cloud.bridge.persist.dao.SHostDaoImpl;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
+import com.cloud.bridge.persist.dao.UserCredentialsDaoImpl;
import com.cloud.bridge.service.EC2SoapServiceImpl;
import com.cloud.bridge.service.UserInfo;
import com.cloud.bridge.service.core.ec2.EC2Engine;
@@ -57,17 +58,23 @@ import com.cloud.bridge.util.ConfigurationHelper;
import com.cloud.bridge.util.DateHelper;
import com.cloud.bridge.util.NetHelper;
import com.cloud.bridge.util.OrderedPair;
+import com.cloud.utils.component.ComponentLocator;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.Transaction;
public class ServiceProvider {
protected final static Logger logger = Logger.getLogger(ServiceProvider.class);
-
+ protected final MHostDao mhostDao = ComponentLocator.inject(MHostDaoImpl.class);
+ protected final SHostDao shostDao = ComponentLocator.inject(SHostDaoImpl.class);
+ protected final UserCredentialsDao ucDao = ComponentLocator.inject(UserCredentialsDaoImpl.class);
+
public final static long HEARTBEAT_INTERVAL = 10000;
private static ServiceProvider instance;
private Map<Class<?>, Object> serviceMap = new HashMap<Class<?>, Object>();
private Timer timer = new Timer();
- private MHost mhost;
+ private MHostVO mhost;
private Properties properties;
private boolean useSubDomain = false; // use DNS sub domain for bucket name
private String serviceEndpoint = null;
@@ -81,6 +88,8 @@ public class ServiceProvider {
protected ServiceProvider() throws IOException {
// register service implementation object
+ Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
+ txn.close();
engine = new S3Engine();
EC2_engine = new EC2Engine();
serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine));
@@ -93,11 +102,9 @@ public class ServiceProvider {
try {
instance = new ServiceProvider();
instance.initialize();
- PersistContext.commitTransaction();
} catch(Throwable e) {
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
- PersistContext.closeSession();
}
}
return instance;
@@ -172,27 +179,34 @@ public class ServiceProvider {
return properties;
}
- public UserInfo getUserInfo(String accessKey)
- throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException {
+ public UserInfo getUserInfo(String accessKey) {
UserInfo info = new UserInfo();
-
- UserCredentialsDao credentialDao = new UserCredentialsDao();
- UserCredentials cloudKeys = credentialDao.getByAccessKey( accessKey );
- if ( null == cloudKeys ) {
- logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
- return null;
- } else {
- info.setAccessKey( accessKey );
- info.setSecretKey( cloudKeys.getSecretKey());
- info.setCanonicalUserId(accessKey);
- info.setDescription( "S3 REST request" );
- return info;
+ Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
+ try {
+ txn.start();
+ UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey );
+ if ( null == cloudKeys ) {
+ logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
+ return null;
+ } else {
+ info.setAccessKey( accessKey );
+ info.setSecretKey( cloudKeys.getSecretKey());
+ info.setCanonicalUserId(accessKey);
+ info.setDescription( "S3 REST request" );
+ return info;
+ }
+ }finally {
+ txn.commit();
}
}
-
+
+ @DB
protected void initialize() {
if(logger.isInfoEnabled())
logger.info("Initializing ServiceProvider...");
+
+ Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
+ //txn.close();
File file = ConfigurationHelper.findConfigurationFile("log4j-cloud.xml");
if(file != null) {
@@ -226,14 +240,16 @@ public class ServiceProvider {
setupHost(hostKey, host);
// we will commit and start a new transaction to allow host info be flushed to DB
- PersistContext.flush();
+ //PersistContext.flush();
String localStorageRoot = properties.getProperty("storage.root");
if (localStorageRoot != null) setupLocalStorage(localStorageRoot);
multipartDir = properties.getProperty("storage.multipartDir");
-
+
+ Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB);
timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL);
+ txn1.close();
if(logger.isInfoEnabled())
logger.info("ServiceProvider initialized");
@@ -264,45 +280,41 @@ public class ServiceProvider {
@Override
public void run() {
try {
- MHostDao mhostDao = new MHostDao();
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
- mhostDao.update(mhost);
- PersistContext.commitTransaction();
+ mhostDao.updateHeartBeat(mhost);
} catch(Throwable e){
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
- PersistContext.closeSession();
}
}
};
}
private void setupHost(String hostKey, String host) {
- MHostDao mhostDao = new MHostDao();
- mhost = mhostDao.getByHostKey(hostKey);
+
+ mhost = mhostDao.getByHostKey(hostKey);
if(mhost == null) {
- mhost = new MHost();
+ mhost = new MHostVO();
mhost.setHostKey(hostKey);
mhost.setHost(host);
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
- mhostDao.save(mhost);
+ mhost = mhostDao.persist(mhost);
} else {
mhost.setHost(host);
- mhostDao.update(mhost);
+ mhostDao.update(mhost.getId(), mhost);
}
}
private void setupLocalStorage(String storageRoot) {
- SHostDao shostDao = new SHostDao();
- SHost shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
+ SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
if(shost == null) {
- shost = new SHost();
+ shost = new SHostVO();
shost.setMhost(mhost);
- mhost.getLocalSHosts().add(shost);
+ shost.setMhostid(mhost.getId());
shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL);
shost.setHost(NetHelper.getHostName());
shost.setExportRoot(storageRoot);
- PersistContext.getSession().save(shost);
+ shostDao.persist(shost);
}
}
@@ -318,35 +330,36 @@ public class ServiceProvider {
return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(),
new Class[] { serviceInterface },
new InvocationHandler() {
- public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
- Object result = null;
- try {
- result = method.invoke(serviceObject, args);
- PersistContext.commitTransaction();
- PersistContext.commitTransaction(true);
- } catch (PersistException e) {
- } catch (SessionException e) {
- } catch(Throwable e) {
- // Rethrow the exception to Axis:
- // Check if the exception is an AxisFault or a RuntimeException
- // enveloped AxisFault and if so, pass it on as such. Otherwise
- // log to help debugging and throw as is.
- if (e.getCause() != null && e.getCause() instanceof AxisFault)
- throw e.getCause();
- else if (e.getCause() != null && e.getCause().getCause() != null
- && e.getCause().getCause() instanceof AxisFault)
- throw e.getCause().getCause();
- else {
- logger.warn("Unhandled exception " + e.getMessage(), e);
- throw e;
- }
- } finally {
- PersistContext.closeSession();
- PersistContext.closeSession(true);
- }
- return result;
- }
- });
+ public Object invoke(Object proxy, Method method,
+ Object[] args) throws Throwable {
+ Object result = null;
+ try {
+ result = method.invoke(serviceObject, args);
+ } catch (Throwable e) {
+ // Rethrow the exception to Axis:
+ // Check if the exception is an AxisFault or a
+ // RuntimeException
+ // enveloped AxisFault and if so, pass it on as
+ // such. Otherwise
+ // log to help debugging and throw as is.
+ if (e.getCause() != null
+ && e.getCause() instanceof AxisFault)
+ throw e.getCause();
+ else if (e.getCause() != null
+ && e.getCause().getCause() != null
+ && e.getCause().getCause() instanceof AxisFault)
+ throw e.getCause().getCause();
+ else {
+ logger.warn(
+ "Unhandled exception " + e.getMessage(),
+ e);
+ throw e;
+ }
+ } finally {
+ }
+ return result;
+ }
+ });
}
@SuppressWarnings("unchecked")
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/39aa7d86/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java
----------------------------------------------------------------------
diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java
index 2d40381..eb25249 100644
--- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java
+++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java
@@ -22,6 +22,9 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.security.SignatureException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.ArrayList;
@@ -32,12 +35,15 @@ import java.util.UUID;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.log4j.Logger;
-import org.hibernate.ejb.criteria.expression.UnaryArithmeticOperation.Operation;
import org.xml.sax.SAXException;
-import com.cloud.bridge.persist.dao.CloudStackSvcOfferingDao;
+import com.cloud.bridge.model.CloudStackServiceOfferingVO;
import com.cloud.bridge.persist.dao.CloudStackAccountDao;
-import com.cloud.bridge.persist.dao.OfferingDao;
+import com.cloud.bridge.persist.dao.CloudStackAccountDaoImpl;
+import com.cloud.bridge.persist.dao.CloudStackSvcOfferingDao;
+import com.cloud.bridge.persist.dao.CloudStackSvcOfferingDaoImpl;
+import com.cloud.bridge.persist.dao.OfferingDaoImpl;
+import com.cloud.bridge.persist.dao.SObjectItemDaoImpl;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.service.core.ec2.EC2ImageAttributes.ImageAttribute;
@@ -70,6 +76,8 @@ import com.cloud.stack.models.CloudStackUser;
import com.cloud.stack.models.CloudStackUserVm;
import com.cloud.stack.models.CloudStackVolume;
import com.cloud.stack.models.CloudStackZone;
+import com.cloud.utils.component.ComponentLocator;
+import com.cloud.utils.db.Transaction;
/**
* EC2Engine processes the ec2 commands and calls their cloudstack analogs
@@ -80,6 +88,9 @@ public class EC2Engine {
String managementServer = null;
String cloudAPIPort = null;
+ protected final CloudStackSvcOfferingDao scvoDao = ComponentLocator.inject(CloudStackSvcOfferingDaoImpl.class);
+ protected final OfferingDaoImpl ofDao = ComponentLocator.inject(OfferingDaoImpl.class);
+ CloudStackAccountDao accDao = ComponentLocator.inject(CloudStackAccountDaoImpl.class);
private CloudStackApi _eng = null;
private CloudStackAccount currentAccount = null;
@@ -110,7 +121,6 @@ public class EC2Engine {
managementServer = EC2Prop.getProperty( "managementServer" );
cloudAPIPort = EC2Prop.getProperty( "cloudAPIPort", null );
- OfferingDao ofDao = new OfferingDao();
try {
if(ofDao.getOfferingCount() == 0) {
String strValue = EC2Prop.getProperty("m1.small.serviceId");
@@ -1469,7 +1479,7 @@ public class EC2Engine {
if(request.getInstanceType() != null){
instanceType = request.getInstanceType();
}
- CloudStackServiceOffering svcOffering = getCSServiceOfferingId(instanceType);
+ CloudStackServiceOfferingVO svcOffering = getCSServiceOfferingId(instanceType);
if(svcOffering == null){
logger.info("No ServiceOffering found to be defined by name, please contact the administrator "+instanceType );
throw new EC2ServiceException(ClientError.Unsupported, "instanceType: [" + instanceType + "] not found!");
@@ -1779,12 +1789,11 @@ public class EC2Engine {
*
*/
- private CloudStackServiceOffering getCSServiceOfferingId(String instanceType){
+ private CloudStackServiceOfferingVO getCSServiceOfferingId(String instanceType){
try {
- if (null == instanceType) instanceType = "m1.small";
+ if (null == instanceType) instanceType = "m1.small";
- CloudStackSvcOfferingDao dao = new CloudStackSvcOfferingDao();
- return dao.getSvcOfferingByName(instanceType);
+ return scvoDao.getSvcOfferingByName(instanceType);
} catch(Exception e) {
logger.error( "Error while retrieving ServiceOffering information by name - ", e);
@@ -1802,8 +1811,8 @@ public class EC2Engine {
*/
private String serviceOfferingIdToInstanceType( String serviceOfferingId ){
try{
- CloudStackSvcOfferingDao dao = new CloudStackSvcOfferingDao();
- CloudStackServiceOffering offering = dao.getSvcOfferingById(serviceOfferingId);
+
+ CloudStackServiceOfferingVO offering = scvoDao.getSvcOfferingById(serviceOfferingId); //dao.getSvcOfferingById(serviceOfferingId);
if(offering == null){
logger.warn( "No instanceType match for serviceOfferingId: [" + serviceOfferingId + "]" );
return "m1.small";
@@ -2260,9 +2269,7 @@ public class EC2Engine {
*/
private String getDefaultZoneId(String accountId) {
try {
- CloudStackAccountDao dao = new CloudStackAccountDao();
- CloudStackAccount account = dao.getdefaultZoneId(accountId);
- return account.getDefaultZoneId();
+ return accDao.getDefaultZoneId(accountId);
} catch(Exception e) {
logger.error( "Error while retrieving Account information by id - ", e);
throw new EC2ServiceException(ServerError.InternalError, e.getMessage());
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/39aa7d86/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java
----------------------------------------------------------------------
diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java
index e8b73a4..a117d13 100644
--- a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java
+++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java
@@ -36,31 +36,40 @@ import java.util.UUID;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
-import org.hibernate.LockMode;
-import org.hibernate.Session;
import org.json.simple.parser.ParseException;
import com.cloud.bridge.io.S3FileSystemBucketAdapter;
-import com.cloud.bridge.model.MHost;
-import com.cloud.bridge.model.MHostMount;
+import com.cloud.bridge.model.BucketPolicyVO;
+import com.cloud.bridge.model.MHostMountVO;
+import com.cloud.bridge.model.MHostVO;
import com.cloud.bridge.model.SAcl;
+import com.cloud.bridge.model.SAclVO;
import com.cloud.bridge.model.SBucket;
+import com.cloud.bridge.model.SBucketVO;
import com.cloud.bridge.model.SHost;
-import com.cloud.bridge.model.SMeta;
-import com.cloud.bridge.model.SObject;
-import com.cloud.bridge.model.SObjectItem;
-import com.cloud.bridge.persist.PersistContext;
+import com.cloud.bridge.model.SHostVO;
+import com.cloud.bridge.model.SMetaVO;
+import com.cloud.bridge.model.SObjectVO;
+import com.cloud.bridge.model.SObjectItemVO;
import com.cloud.bridge.persist.dao.BucketPolicyDao;
+import com.cloud.bridge.persist.dao.BucketPolicyDaoImpl;
import com.cloud.bridge.persist.dao.MHostDao;
+import com.cloud.bridge.persist.dao.MHostDaoImpl;
import com.cloud.bridge.persist.dao.MHostMountDao;
+import com.cloud.bridge.persist.dao.MHostMountDaoImpl;
import com.cloud.bridge.persist.dao.MultipartLoadDao;
import com.cloud.bridge.persist.dao.SAclDao;
+import com.cloud.bridge.persist.dao.SAclDaoImpl;
import com.cloud.bridge.persist.dao.SBucketDao;
+import com.cloud.bridge.persist.dao.SBucketDaoImpl;
import com.cloud.bridge.persist.dao.SHostDao;
+import com.cloud.bridge.persist.dao.SHostDaoImpl;
import com.cloud.bridge.persist.dao.SMetaDao;
+import com.cloud.bridge.persist.dao.SMetaDaoImpl;
import com.cloud.bridge.persist.dao.SObjectDao;
+import com.cloud.bridge.persist.dao.SObjectDaoImpl;
import com.cloud.bridge.persist.dao.SObjectItemDao;
-import com.cloud.bridge.service.S3Constants;
+import com.cloud.bridge.persist.dao.SObjectItemDaoImpl;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.service.controller.s3.ServiceProvider;
import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess;
@@ -75,19 +84,31 @@ import com.cloud.bridge.service.exception.ObjectAlreadyExistsException;
import com.cloud.bridge.service.exception.OutOfServiceException;
import com.cloud.bridge.service.exception.OutOfStorageException;
import com.cloud.bridge.service.exception.PermissionDeniedException;
-import com.cloud.bridge.service.exception.UnsupportedException;
import com.cloud.bridge.util.DateHelper;
import com.cloud.bridge.util.PolicyParser;
import com.cloud.bridge.util.StringHelper;
import com.cloud.bridge.util.OrderedPair;
import com.cloud.bridge.util.Triple;
+import com.cloud.utils.component.ComponentLocator;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.Transaction;
/**
* The CRUD control actions to be invoked from S3BucketAction or S3ObjectAction.
*/
public class S3Engine {
protected final static Logger logger = Logger.getLogger(S3Engine.class);
-
+ protected final SHostDao shostDao = ComponentLocator.inject(SHostDaoImpl.class);
+ protected final MHostDao mhostDao = ComponentLocator.inject(MHostDaoImpl.class);
+ protected final static BucketPolicyDao bPolicy = ComponentLocator.inject(BucketPolicyDaoImpl.class);
+ protected final BucketPolicyDao bPolicyDao = ComponentLocator.inject(BucketPolicyDaoImpl.class);
+ protected final SBucketDao bucketDao = ComponentLocator.inject(SBucketDaoImpl.class);
+ protected final SAclDao aclDao = ComponentLocator.inject(SAclDaoImpl.class);
+ protected final static SAclDao saclDao = ComponentLocator.inject(SAclDaoImpl.class);
+ protected final SObjectDao objectDao = ComponentLocator.inject(SObjectDaoImpl.class);
+ protected final SObjectItemDao itemDao = ComponentLocator.inject(SObjectItemDaoImpl.class);
+ protected final SMetaDao metaDao = ComponentLocator.inject(SMetaDaoImpl.class);
+ protected final MHostMountDao mountDao = ComponentLocator.inject(MHostMountDaoImpl.class);
private final int LOCK_ACQUIRING_TIMEOUT_SECONDS = 10; // ten seconds
private final Map<Integer, S3BucketAdapter> bucketAdapters = new HashMap<Integer, S3BucketAdapter>();
@@ -169,61 +190,53 @@ public class S3Engine {
String cannedAccessPolicy = request.getCannedAccess();
String bucketName = request.getBucketName();
response.setBucketName( bucketName );
-
+ Transaction txn= null;
verifyBucketName( bucketName, false );
S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket, bucketName );
context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy );
if (PolicyAccess.DENY == verifyPolicy( context ))
throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" );
-
- if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS))
- {
- OrderedPair<SHost, String> shost_storagelocation_pair = null;
- boolean success = false;
- try {
- SBucketDao bucketDao = new SBucketDao();
- SAclDao aclDao = new SAclDao();
-
- if (bucketDao.getByName(request.getBucketName()) != null)
- throw new ObjectAlreadyExistsException("Bucket already exists");
-
- shost_storagelocation_pair = allocBucketStorageHost(request.getBucketName(), null);
-
- SBucket sbucket = new SBucket();
- sbucket.setName(request.getBucketName());
- sbucket.setCreateTime(DateHelper.currentGMTTime());
- sbucket.setOwnerCanonicalId( UserContext.current().getCanonicalUserId());
- sbucket.setShost(shost_storagelocation_pair.getFirst());
- shost_storagelocation_pair.getFirst().getBuckets().add(sbucket);
- bucketDao.save(sbucket);
-
- S3AccessControlList acl = request.getAcl();
-
- if ( null != cannedAccessPolicy )
- setCannedAccessControls( cannedAccessPolicy, "SBucket", sbucket.getId(), sbucket );
- else if (null != acl)
- aclDao.save( "SBucket", sbucket.getId(), acl );
- else setSingleAcl( "SBucket", sbucket.getId(), SAcl.PERMISSION_FULL );
-
- // explicitly commit the transaction
- PersistContext.commitTransaction();
- success = true;
- }
- finally
- {
- if(!success && shost_storagelocation_pair != null) {
- S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(shost_storagelocation_pair.getFirst());
- bucketAdapter.deleteContainer(shost_storagelocation_pair.getSecond(), request.getBucketName());
- }
- PersistContext.rollbackTransaction(false);
- PersistContext.releaseNamedLock("bucket.creation");
- }
-
- } else {
- throw new OutOfServiceException("Unable to acquire synchronization lock");
- }
-
+ OrderedPair<SHostVO, String> shost_storagelocation_pair = null;
+ boolean success = false;
+ try {
+ txn = Transaction.open(Transaction.AWSAPI_DB);
+
+ if (bucketDao.getByName(request.getBucketName()) != null)
+ throw new ObjectAlreadyExistsException("Bucket already exists");
+
+ shost_storagelocation_pair = allocBucketStorageHost(
+ request.getBucketName(), null);
+ SBucketVO sbucket = new SBucketVO(request.getBucketName(),
+ DateHelper.currentGMTTime(), UserContext.current()
+ .getCanonicalUserId(),
+ shost_storagelocation_pair.getFirst());
+
+ shost_storagelocation_pair.getFirst().getBuckets().add(sbucket);
+ // bucketDao.save(sbucket);
+ sbucket = bucketDao.persist(sbucket);
+ S3AccessControlList acl = request.getAcl();
+
+ if (null != cannedAccessPolicy)
+ setCannedAccessControls(cannedAccessPolicy, "SBucket",
+ sbucket.getId(), sbucket);
+ else if (null != acl)
+ aclDao.save("SBucket", sbucket.getId(), acl);
+ else
+ setSingleAcl("SBucket", sbucket.getId(), SAcl.PERMISSION_FULL);
+
+ success = true;
+ } finally {
+ if (!success && shost_storagelocation_pair != null) {
+ S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(shost_storagelocation_pair
+ .getFirst());
+ bucketAdapter.deleteContainer(
+ shost_storagelocation_pair.getSecond(),
+ request.getBucketName());
+ }
+ txn.rollback();
+ txn.close();
+ }
return response;
}
@@ -234,37 +247,43 @@ public class S3Engine {
public S3Response handleRequest( S3DeleteBucketRequest request )
{
- S3Response response = new S3Response();
- SBucketDao bucketDao = new SBucketDao();
+ S3Response response = new S3Response();
+ //
String bucketName = request.getBucketName();
- SBucket sbucket = bucketDao.getByName( bucketName );
+ SBucketVO sbucket = bucketDao.getByName(bucketName);
+ Transaction txn = null;
if ( sbucket != null )
- {
- S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName );
- switch( verifyPolicy( context ))
- {
- case ALLOW:
- // The bucket policy can give users permission to delete a bucket whereas ACLs cannot
- break;
-
- case DENY:
- throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" );
-
- case DEFAULT_DENY:
- default:
- // Irrespective of what the ACLs say, only the owner can delete a bucket
- String client = UserContext.current().getCanonicalUserId();
- if (!client.equals( sbucket.getOwnerCanonicalId())) {
- throw new PermissionDeniedException( "Access Denied - only the owner can delete a bucket" );
- }
- break;
- }
+ {
+ txn = Transaction.open(Transaction.AWSAPI_DB);
+ txn.start();
+ S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName );
+ switch( verifyPolicy( context ))
+ {
+ case ALLOW:
+ // The bucket policy can give users permission to delete a
+ // bucket whereas ACLs cannot
+ break;
-
+ case DENY:
+ throw new PermissionDeniedException(
+ "Access Denied - bucket policy DENY result");
+
+ case DEFAULT_DENY:
+ default:
+ // Irrespective of what the ACLs say, only the owner can delete
+ // a bucket
+ String client = UserContext.current().getCanonicalUserId();
+ if (!client.equals(sbucket.getOwnerCanonicalId())) {
+ throw new PermissionDeniedException(
+ "Access Denied - only the owner can delete a bucket");
+ }
+ break;
+ }
+
// Delete the file from its storage location
- OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(sbucket);
- S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
+ OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(sbucket);
+ S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName());
// Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects.
@@ -272,35 +291,37 @@ public class S3Engine {
// (1)Get all the objects in the bucket,
// (2)then all the items in each object,
// (3) then all meta & acl data for each item
- Set<SObject> objectsInBucket = sbucket.getObjectsInBucket();
- Iterator<SObject> it = objectsInBucket.iterator();
+ Set<SObjectVO> objectsInBucket = sbucket.getObjectsInBucket();
+ Iterator<SObjectVO> it = objectsInBucket.iterator();
while( it.hasNext())
{
- SObject oneObject = (SObject)it.next();
- Set<SObjectItem> itemsInObject = oneObject.getItems();
- Iterator<SObjectItem> is = itemsInObject.iterator();
+ SObjectVO oneObject = (SObjectVO)it.next();
+ Set<SObjectItemVO> itemsInObject = oneObject.getItems();
+ Iterator<SObjectItemVO> is = itemsInObject.iterator();
while( is.hasNext())
{
- SObjectItem oneItem = (SObjectItem)is.next();
- deleteMetaData( oneItem.getId());
- deleteObjectAcls( "SObjectItem", oneItem.getId());
+ SObjectItemVO oneItem = (SObjectItemVO) is.next();
+ deleteMetaData(oneItem.getId());
+ deleteObjectAcls("SObjectItem", oneItem.getId());
}
}
// Delete all the policy state associated with the bucket
try {
- ServiceProvider.getInstance().deleteBucketPolicy( bucketName );
- BucketPolicyDao policyDao = new BucketPolicyDao();
- policyDao.deletePolicy( bucketName );
- }
- catch( Exception e ) {
- logger.error("When deleting a bucket we must try to delete its policy: ", e);
+ ServiceProvider.getInstance().deleteBucketPolicy(bucketName);
+ bPolicyDao.deletePolicy(bucketName);
+ } catch( Exception e ) {
+ logger.error("When deleting a bucket we must try to delete its policy: ", e);
}
deleteBucketAcls( sbucket.getId());
- bucketDao.delete( sbucket );
+ bucketDao.remove(sbucket.getId());
+
+
response.setResultCode(204);
response.setResultDescription("OK");
+
+ txn.close();
}
else
{ response.setResultCode(404);
@@ -327,10 +348,10 @@ public class S3Engine {
int maxKeys = request.getMaxKeys();
if(maxKeys <= 0) maxKeys = 1000;
- SBucketDao bucketDao = new SBucketDao();
- SBucket sbucket = bucketDao.getByName(bucketName);
+ //
+ SBucketVO sbucket = bucketDao.getByName(bucketName);
if (sbucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
-
+
PolicyActions action = (includeVersions ? PolicyActions.ListBucketVersions : PolicyActions.ListBucket);
S3PolicyContext context = new S3PolicyContext( action, bucketName );
context.setEvalParam( ConditionKeys.MaxKeys, new String( "" + maxKeys ));
@@ -340,23 +361,23 @@ public class S3Engine {
// Wen execting the query, request one more item so that we know how to set isTruncated flag
- SObjectDao sobjectDao = new SObjectDao();
- List<SObject> l = null;
+ List<SObjectVO> l = null;
if ( includeVersions )
- l = sobjectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 );
- else l = sobjectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 );
+ l = objectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 );
+ else l = objectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 );
response.setBucketName(bucketName);
response.setMarker(marker);
response.setMaxKeys(maxKeys);
response.setPrefix(prefix);
response.setDelimiter(delimiter);
- response.setTruncated(l.size() > maxKeys);
- if(l.size() > maxKeys) {
- response.setNextMarker(l.get(l.size() - 1).getNameKey());
+ if (null != l ) {
+ response.setTruncated(l.size() > maxKeys);
+ if(l.size() > maxKeys) {
+ response.setNextMarker(l.get(l.size() - 1).getNameKey());
+ }
}
-
// If needed - SOAP response does not support versioning
response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker()));
response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys));
@@ -372,10 +393,10 @@ public class S3Engine {
public S3ListAllMyBucketsResponse handleRequest(S3ListAllMyBucketsRequest request)
{
S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse();
- SBucketDao bucketDao = new SBucketDao();
+
// "...you can only list buckets for which you are the owner."
- List<SBucket> buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId());
+ List<SBucketVO> buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId());
S3CanonicalUser owner = new S3CanonicalUser();
owner.setID(UserContext.current().getCanonicalUserId());
owner.setDisplayName("");
@@ -385,7 +406,7 @@ public class S3Engine {
{
S3ListAllMyBucketsEntry[] entries = new S3ListAllMyBucketsEntry[buckets.size()];
int i = 0;
- for(SBucket bucket : buckets)
+ for(SBucketVO bucket : buckets)
{
String bucketName = bucket.getName();
S3PolicyContext context = new S3PolicyContext( PolicyActions.ListAllMyBuckets, bucketName );
@@ -409,9 +430,8 @@ public class S3Engine {
public S3Response handleRequest(S3SetBucketAccessControlPolicyRequest request)
{
S3Response response = new S3Response();
- SBucketDao bucketDao = new SBucketDao();
String bucketName = request.getBucketName();
- SBucket sbucket = bucketDao.getByName(bucketName);
+ SBucketVO sbucket = bucketDao.getByName(bucketName);
if(sbucket == null) {
response.setResultCode(404);
response.setResultDescription("Bucket does not exist");
@@ -421,7 +441,6 @@ public class S3Engine {
S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName );
verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL );
- SAclDao aclDao = new SAclDao();
aclDao.save("SBucket", sbucket.getId(), request.getAcl());
response.setResultCode(200);
@@ -438,9 +457,8 @@ public class S3Engine {
public S3AccessControlPolicy handleRequest(S3GetBucketAccessControlPolicyRequest request)
{
S3AccessControlPolicy policy = new S3AccessControlPolicy();
- SBucketDao bucketDao = new SBucketDao();
String bucketName = request.getBucketName();
- SBucket sbucket = bucketDao.getByName( bucketName );
+ SBucketVO sbucket = bucketDao.getByName( bucketName );
if (sbucket == null)
throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
@@ -452,8 +470,8 @@ public class S3Engine {
S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketAcl, bucketName );
verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ_ACL );
- SAclDao aclDao = new SAclDao();
- List<SAcl> grants = aclDao.listGrants("SBucket", sbucket.getId());
+
+ List<SAclVO> grants = aclDao.listGrants("SBucket", sbucket.getId());
policy.setGrants(S3Grant.toGrants(grants));
return policy;
}
@@ -464,61 +482,69 @@ public class S3Engine {
* Called from S3ObjectAction when executing at completion or when aborting multipart upload.
* @param bucketName
* @param uploadId
- * @param verifyPermission - If false then do not check the user's permission to clean up the state
+ * @param verifyPermissiod - If false then do not check the user's permission to clean up the state
*/
- public int freeUploadParts(String bucketName, int uploadId, boolean verifyPermission)
- {
- // -> we need to look up the final bucket to figure out which mount point to use to save the part in
- SBucketDao bucketDao = new SBucketDao();
- SBucket bucket = bucketDao.getByName(bucketName);
- if (bucket == null) {
- logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" );
- return 404;
- }
+ public int freeUploadParts(String bucketName, int uploadId, boolean verifyPermission) {
- OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
- S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
+ // -> we need to look up the final bucket to figure out which mount
+ // point to use to save the part in
+ // SBucketDao bucketDao = new SBucketDao();
+ SBucketVO bucket = bucketDao.getByName(bucketName);
+ if (bucket == null) {
+ logger.error("initiateMultipartUpload failed since " + bucketName
+ + " does not exist");
+ return 404;
+ }
- try {
- MultipartLoadDao uploadDao = new MultipartLoadDao();
- OrderedPair<String,String> exists = uploadDao.multipartExits( uploadId );
- if (null == exists) {
- logger.error( "initiateMultipartUpload failed since multipart upload" + uploadId + " does not exist" );
- return 404;
- }
-
- // -> the multipart initiator or bucket owner can do this action by default
- if (verifyPermission)
- {
- String initiator = uploadDao.getInitiator( uploadId );
- if (null == initiator || !initiator.equals( UserContext.current().getAccessKey()))
- {
- // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket
- S3PolicyContext context = new S3PolicyContext( PolicyActions.AbortMultipartUpload, bucketName );
- context.setKeyName( exists.getSecond());
- verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE );
- }
- }
-
- // -> first get a list of all the uploaded files and delete one by one
- S3MultipartPart[] parts = uploadDao.getParts( uploadId, 10000, 0 );
- for( int i=0; i < parts.length; i++ )
- {
- bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), parts[i].getPath());
- }
-
- uploadDao.deleteUpload( uploadId );
- return 204;
+ OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
+ S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
+ try {
+ MultipartLoadDao uploadDao = new MultipartLoadDao();
+ OrderedPair<String, String> exists = uploadDao.multipartExits(uploadId);
+
+ if (null == exists) {
+ logger.error("initiateMultipartUpload failed since multipart upload"
+ + uploadId + " does not exist");
+ return 404;
+ }
+
+ // -> the multipart initiator or bucket owner can do this action by
+ // default
+ if (verifyPermission) {
+ String initiator = uploadDao.getInitiator(uploadId);
+ if (null == initiator
+ || !initiator.equals(UserContext.current()
+ .getAccessKey())) {
+ // -> write permission on a bucket allows a PutObject /
+ // DeleteObject action on any object in the bucket
+ S3PolicyContext context = new S3PolicyContext(
+ PolicyActions.AbortMultipartUpload, bucketName);
+ context.setKeyName(exists.getSecond());
+ verifyAccess(context, "SBucket", bucket.getId(),
+ SAcl.PERMISSION_WRITE);
}
- catch( PermissionDeniedException e ) {
- logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e);
- throw e;
- }
- catch (Exception e) {
- logger.error("freeUploadParts failed due to [" + e.getMessage() + "]", e);
- return 500;
- }
+ }
+
+ // -> first get a list of all the uploaded files and delete one by
+ // one
+ S3MultipartPart[] parts = uploadDao.getParts(uploadId, 10000, 0);
+ for (int i = 0; i < parts.length; i++) {
+ bucketAdapter.deleteObject(host_storagelocation_pair.getSecond(), ServiceProvider.getInstance()
+ .getMultipartDir(), parts[i].getPath());
+ }
+ uploadDao.deleteUpload(uploadId);
+ return 204;
+
+ } catch (PermissionDeniedException e) {
+ logger.error("freeUploadParts failed due to [" + e.getMessage()
+ + "]", e);
+ throw e;
+ } catch (Exception e) {
+ logger.error("freeUploadParts failed due to [" + e.getMessage()
+ + "]", e);
+ return 500;
+ }
}
/**
@@ -534,8 +560,7 @@ public class S3Engine {
String nameKey = request.getKey();
// -> does the bucket exist and can we write to it?
- SBucketDao bucketDao = new SBucketDao();
- SBucket bucket = bucketDao.getByName(bucketName);
+ SBucketVO bucket = bucketDao.getByName(bucketName);
if (bucket == null) {
logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" );
response.setResultCode(404);
@@ -577,8 +602,8 @@ public class S3Engine {
String bucketName = request.getBucketName();
// -> we need to look up the final bucket to figure out which mount point to use to save the part in
- SBucketDao bucketDao = new SBucketDao();
- SBucket bucket = bucketDao.getByName(bucketName);
+ //SBucketDao bucketDao = new SBucketDao();
+ SBucketVO bucket = bucketDao.getByName(bucketName);
if (bucket == null) {
logger.error( "saveUploadedPart failed since " + bucketName + " does not exist" );
response.setResultCode(404);
@@ -587,20 +612,19 @@ public class S3Engine {
context.setKeyName( request.getKey());
verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE );
- OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
+ OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
String itemFileName = new String( uploadId + "-" + partNumber );
InputStream is = null;
try {
- is = request.getDataInputStream();
- String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName);
- response.setETag(md5Checksum);
-
- MultipartLoadDao uploadDao = new MultipartLoadDao();
- uploadDao.savePart( uploadId, partNumber, md5Checksum, itemFileName, (int)request.getContentLength());
- response.setResultCode(200);
-
+ is = request.getDataInputStream();
+ String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName);
+ response.setETag(md5Checksum);
+ MultipartLoadDao uploadDao = new MultipartLoadDao();
+ uploadDao.savePart(uploadId, partNumber, md5Checksum, itemFileName,(int) request.getContentLength());
+ response.setResultCode(200);
+
} catch (IOException e) {
logger.error("UploadPart failed due to " + e.getMessage(), e);
response.setResultCode(500);
@@ -637,70 +661,71 @@ public class S3Engine {
public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream outputStream) throws IOException
{
// [A] Set up and initial error checking
- S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();
- String bucketName = request.getBucketName();
- String key = request.getKey();
- S3MetaDataEntry[] meta = request.getMetaEntries();
-
- SBucketDao bucketDao = new SBucketDao();
- SBucket bucket = bucketDao.getByName(bucketName);
- if (bucket == null) {
- logger.error( "completeMultipartUpload( failed since " + bucketName + " does not exist" );
- response.setResultCode(404);
- }
+ S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();
+ String bucketName = request.getBucketName();
+ String key = request.getKey();
+ S3MetaDataEntry[] meta = request.getMetaEntries();
- // [B] Now we need to create the final re-assembled object
- // -> the allocObjectItem checks for the bucket policy PutObject permissions
- OrderedPair<SObject, SObjectItem> object_objectitem_pair = allocObjectItem(bucket, key, meta, null, request.getCannedAccess());
- OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
-
- S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
- String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
-
- // -> Amazon defines that we must return a 200 response immediately to the client, but
- // -> we don't know the version header until we hit here
- httpResp.setStatus(200);
- httpResp.setContentType("text/xml; charset=UTF-8");
- String version = object_objectitem_pair.getSecond().getVersion();
- if (null != version) httpResp.addHeader( "x-amz-version-id", version );
- httpResp.flushBuffer();
-
+ SBucketVO bucket = bucketDao.getByName(bucketName);
+ if (bucket == null) {
+ logger.error("completeMultipartUpload( failed since " + bucketName
+ + " does not exist");
+ response.setResultCode(404);
+ }
- // [C] Re-assemble the object from its uploaded file parts
- try {
- // explicit transaction control to avoid holding transaction during long file concatenation process
- PersistContext.commitTransaction();
-
- OrderedPair<String, Long> result = bucketAdapter.
- concatentateObjects
- ( host_storagelocation_pair.getSecond(),
- bucket.getName(),
- itemFileName,
- ServiceProvider.getInstance().getMultipartDir(),
- parts,
- outputStream );
- response.setETag(result.getFirst());
- response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
-
- SObjectItemDao itemDao = new SObjectItemDao();
- SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId());
- item.setMd5(result.getFirst());
- item.setStoredSize(result.getSecond().longValue());
- response.setResultCode(200);
+ // [B] Now we need to create the final re-assembled object
+ // -> the allocObjectItem checks for the bucket policy PutObject
+ // permissions
+ OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(
+ bucket, key, meta, null, request.getCannedAccess());
+ OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
- PersistContext.getSession().save(item);
- }
- catch (Exception e) {
- logger.error("completeMultipartUpload failed due to " + e.getMessage(), e);
- }
- return response;
+ S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair
+ .getFirst());
+ String itemFileName = object_objectitem_pair.getSecond()
+ .getStoredPath();
+
+ // -> Amazon defines that we must return a 200 response immediately to
+ // the client, but
+ // -> we don't know the version header until we hit here
+ httpResp.setStatus(200);
+ httpResp.setContentType("text/xml; charset=UTF-8");
+ String version = object_objectitem_pair.getSecond().getVersion();
+ if (null != version)
+ httpResp.addHeader("x-amz-version-id", version);
+ httpResp.flushBuffer();
+ Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
+ // [C] Re-assemble the object from its uploaded file parts
+ try {
+ // explicit transaction control to avoid holding transaction during
+ // long file concatenation process
+ txn.start();
+ OrderedPair<String, Long> result = bucketAdapter
+ .concatentateObjects(host_storagelocation_pair.getSecond(),
+ bucket.getName(), itemFileName, ServiceProvider
+ .getInstance().getMultipartDir(), parts,
+ outputStream);
+
+ response.setETag(result.getFirst());
+ response.setLastModified(DateHelper.toCalendar(object_objectitem_pair.getSecond().getLastModifiedTime()));
+ SObjectItemVO item = itemDao.findById(object_objectitem_pair
+ .getSecond().getId());
+ item.setMd5(result.getFirst());
+ item.setStoredSize(result.getSecond().longValue());
+ itemDao.update(item.getId(), item);
+ response.setResultCode(200);
+ } catch (Exception e) {
+ logger.error("completeMultipartUpload failed due to " + e.getMessage(),e);
+ txn.close();
+ }
+ return response;
}
/**
* Return a S3PutObjectInlineResponse which represents an object being created into a bucket
* Called from S3ObjectAction when PUTting or POTing an object.
*/
-
+ @DB
public S3PutObjectInlineResponse handleRequest(S3PutObjectInlineRequest request)
{
S3PutObjectInlineResponse response = new S3PutObjectInlineResponse();
@@ -710,36 +735,36 @@ public class S3Engine {
S3MetaDataEntry[] meta = request.getMetaEntries();
S3AccessControlList acl = request.getAcl();
- SBucketDao bucketDao = new SBucketDao();
- SBucket bucket = bucketDao.getByName(bucketName);
+ SBucketVO bucket = bucketDao.getByName(bucketName);
if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
// Is the caller allowed to write the object?
// The allocObjectItem checks for the bucket policy PutObject permissions
- OrderedPair<SObject, SObjectItem> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess());
- OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
+ OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess());
+ OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
InputStream is = null;
-
+ Transaction txn = null;
try {
// explicit transaction control to avoid holding transaction during file-copy process
- PersistContext.commitTransaction();
+ txn = Transaction.open(Transaction.AWSAPI_DB);
+ txn.start();
is = request.getDataInputStream();
String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName);
response.setETag(md5Checksum);
response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
response.setVersion( object_objectitem_pair.getSecond().getVersion());
- SObjectItemDao itemDao = new SObjectItemDao();
- SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId());
+ //SObjectItemDaoImpl itemDao = new SObjectItemDaoImpl();
+ SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId());
item.setMd5(md5Checksum);
item.setStoredSize(contentLength);
- PersistContext.getSession().save(item);
-
+ itemDao.update(item.getId(), item);
+ txn.commit();
} catch (IOException e) {
logger.error("PutObjectInline failed due to " + e.getMessage(), e);
} catch (OutOfStorageException e) {
@@ -752,6 +777,7 @@ public class S3Engine {
logger.error("PutObjectInline unable to close stream from data handler.", e);
}
}
+ txn.close();
}
return response;
@@ -771,32 +797,34 @@ public class S3Engine {
S3MetaDataEntry[] meta = request.getMetaEntries();
S3AccessControlList acl = request.getAcl();
- SBucketDao bucketDao = new SBucketDao();
- SBucket bucket = bucketDao.getByName(bucketName);
+ SBucketVO bucket = bucketDao.getByName(bucketName);
if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
// Is the caller allowed to write the object?
// The allocObjectItem checks for the bucket policy PutObject permissions
- OrderedPair<SObject, SObjectItem> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null);
- OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost(bucket);
+ OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null);
+ OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst());
String itemFileName = object_objectitem_pair.getSecond().getStoredPath();
InputStream is = null;
+ Transaction txn = null;
try {
// explicit transaction control to avoid holding transaction during file-copy process
- PersistContext.commitTransaction();
+
+ txn = Transaction.open(Transaction.AWSAPI_DB);
+ txn.start();
is = request.getInputStream();
String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName);
response.setETag(md5Checksum);
response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime()));
- SObjectItemDao itemDao = new SObjectItemDao();
- SObjectItem item = itemDao.get( object_objectitem_pair.getSecond().getId());
+ SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId());
item.setMd5(md5Checksum);
item.setStoredSize(contentLength);
- PersistContext.getSession().save(item);
+ itemDao.update(item.getId(), item);
+ txn.commit();
} catch (OutOfStorageException e) {
logger.error("PutObject failed due to " + e.getMessage(), e);
@@ -808,6 +836,7 @@ public class S3Engine {
logger.error("Unable to close stream from data handler.", e);
}
}
+ txn.close();
}
return response;
@@ -825,18 +854,16 @@ public class S3Engine {
// [A] First find the object in the bucket
S3Response response = new S3Response();
- SBucketDao bucketDao = new SBucketDao();
String bucketName = request.getBucketName();
- SBucket sbucket = bucketDao.getByName( bucketName );
+ SBucketVO sbucket = bucketDao.getByName( bucketName );
if(sbucket == null) {
response.setResultCode(404);
response.setResultDescription("Bucket " + bucketName + "does not exist");
return response;
}
- SObjectDao sobjectDao = new SObjectDao();
String nameKey = request.getKey();
- SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey );
+ SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
if(sobject == null) {
response.setResultCode(404);
response.setResultDescription("Object " + request.getKey() + " in bucket " + bucketName + " does not exist");
@@ -852,7 +879,7 @@ public class S3Engine {
// [B] Versioning allow the client to ask for a specific version not just the latest
- SObjectItem item = null;
+ SObjectItemVO item = null;
int versioningStatus = sbucket.getVersioningStatus();
String wantVersion = request.getVersion();
if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion)
@@ -875,7 +902,6 @@ public class S3Engine {
verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_WRITE_ACL );
// -> the acl always goes on the instance of the object
- SAclDao aclDao = new SAclDao();
aclDao.save("SObjectItem", item.getId(), request.getAcl());
response.setResultCode(200);
@@ -895,15 +921,15 @@ public class S3Engine {
// [A] Does the object exist that holds the ACL we are looking for?
S3AccessControlPolicy policy = new S3AccessControlPolicy();
- SBucketDao bucketDao = new SBucketDao();
+
String bucketName = request.getBucketName();
- SBucket sbucket = bucketDao.getByName( bucketName );
+ SBucketVO sbucket = bucketDao.getByName( bucketName );
if (sbucket == null)
throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
- SObjectDao sobjectDao = new SObjectDao();
+ //SObjectDaoImpl sobjectDao = new SObjectDaoImpl();
String nameKey = request.getKey();
- SObject sobject = sobjectDao.getByNameKey( sbucket, nameKey );
+ SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
if (sobject == null)
throw new NoSuchObjectException("Object " + request.getKey() + " does not exist");
@@ -916,7 +942,7 @@ public class S3Engine {
// [B] Versioning allow the client to ask for a specific version not just the latest
- SObjectItem item = null;
+ SObjectItemVO item = null;
int versioningStatus = sbucket.getVersioningStatus();
String wantVersion = request.getVersion();
if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion)
@@ -946,8 +972,8 @@ public class S3Engine {
policy.setOwner(owner);
policy.setResultCode(200);
- SAclDao aclDao = new SAclDao();
- List<SAcl> grants = aclDao.listGrants( "SObjectItem", item.getId());
+
+ List<SAclVO> grants = aclDao.listGrants( "SObjectItem", item.getId());
policy.setGrants(S3Grant.toGrants(grants));
return policy;
}
@@ -967,18 +993,17 @@ public class S3Engine {
int resultCode = 200;
// [A] Verify that the bucket and the object exist
- SBucketDao bucketDao = new SBucketDao();
+
String bucketName = request.getBucketName();
- SBucket sbucket = bucketDao.getByName(bucketName);
+ SBucketVO sbucket = bucketDao.getByName(bucketName);
if (sbucket == null) {
response.setResultCode(404);
response.setResultDescription("Bucket " + request.getBucketName() + " does not exist");
return response;
}
- SObjectDao objectDao = new SObjectDao();
String nameKey = request.getKey();
- SObject sobject = objectDao.getByNameKey( sbucket, nameKey );
+ SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
if (sobject == null) {
response.setResultCode(404);
response.setResultDescription("Object " + request.getKey() + " does not exist in bucket " + request.getBucketName());
@@ -995,7 +1020,7 @@ public class S3Engine {
// [B] Versioning allow the client to ask for a specific version not just the latest
- SObjectItem item = null;
+ SObjectItemVO item = null;
int versioningStatus = sbucket.getVersioningStatus();
String wantVersion = request.getVersion();
if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion)
@@ -1037,15 +1062,15 @@ public class S3Engine {
// [D] Return the contents of the object inline
// -> extract the meta data that corresponds the specific versioned item
- SMetaDao metaDao = new SMetaDao();
- List<SMeta> itemMetaData = metaDao.getByTarget( "SObjectItem", item.getId());
+
+ List<SMetaVO> itemMetaData = metaDao.getByTarget( "SObjectItem", item.getId());
if (null != itemMetaData)
{
int i = 0;
S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ];
- ListIterator<SMeta> it = itemMetaData.listIterator();
+ ListIterator<SMetaVO> it = itemMetaData.listIterator();
while( it.hasNext()) {
- SMeta oneTag = (SMeta)it.next();
+ SMetaVO oneTag = (SMetaVO)it.next();
S3MetaDataEntry oneEntry = new S3MetaDataEntry();
oneEntry.setName( oneTag.getName());
oneEntry.setValue( oneTag.getValue());
@@ -1068,7 +1093,7 @@ public class S3Engine {
response.setVersion( item.getVersion());
if (request.isInlineData())
{
- OrderedPair<SHost, String> tupleSHostInfo = getBucketStorageHost(sbucket);
+ OrderedPair<SHostVO, String> tupleSHostInfo = getBucketStorageHost(sbucket);
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst());
if ( 0 <= bytesStart && 0 <= bytesEnd )
@@ -1091,18 +1116,18 @@ public class S3Engine {
{
// Verify that the bucket and object exist
S3Response response = new S3Response();
- SBucketDao bucketDao = new SBucketDao();
+
String bucketName = request.getBucketName();
- SBucket sbucket = bucketDao.getByName( bucketName );
+ SBucketVO sbucket = bucketDao.getByName( bucketName );
if (sbucket == null) {
response.setResultCode(404);
response.setResultDescription("<Code>Bucket dosen't exists</Code><Message>Bucket " + bucketName + " does not exist</Message>");
return response;
}
- SObjectDao objectDao = new SObjectDao();
+
String nameKey = request.getKey();
- SObject sobject = objectDao.getByNameKey( sbucket, nameKey );
+ SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey );
if (sobject == null) {
response.setResultCode(404);
response.setResultDescription("<Code>Not Found</Code><Message>No object with key " + nameKey + " exists in bucket " + bucketName+"</Message>");
@@ -1112,7 +1137,7 @@ public class S3Engine {
// Discover whether versioning is enabled. If so versioning requires the setting of a deletion marker.
String storedPath = null;
- SObjectItem item = null;
+ SObjectItemVO item = null;
int versioningStatus = sbucket.getVersioningStatus();
if ( SBucket.VERSIONING_ENABLED == versioningStatus )
{
@@ -1125,7 +1150,7 @@ public class S3Engine {
if (null == wantVersion) {
// If versioning is on and no versionId is given then we just write a deletion marker
sobject.setDeletionMark( UUID.randomUUID().toString());
- objectDao.update( sobject );
+ objectDao.update(sobject.getId(), sobject );
response.setResultDescription("<DeleteMarker>true</DeleteMarker><DeleteMarkerVersionId>"+ sobject.getDeletionMark() +"</DeleteMarkerVersionId>");
}
else {
@@ -1133,7 +1158,7 @@ public class S3Engine {
String deletionMarker = sobject.getDeletionMark();
if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) {
sobject.setDeletionMark( null );
- objectDao.update( sobject );
+ objectDao.update(sobject.getId(), sobject );
response.setResultDescription("<VersionId>" + wantVersion +"</VersionId>");
response.setResultDescription("<DeleteMarker>true</DeleteMarker><DeleteMarkerVersionId>"+ sobject.getDeletionMark() +"</DeleteMarkerVersionId>");
response.setResultCode(204);
@@ -1149,7 +1174,7 @@ public class S3Engine {
// Providing versionId is non-null, then just delete the one item that matches the versionId from the database
storedPath = item.getStoredPath();
sobject.deleteItem( item.getId());
- objectDao.update( sobject );
+ objectDao.update(sobject.getId(), sobject );
response.setResultDescription("<VersionId>" + wantVersion +"</VersionId>");
}
}
@@ -1171,9 +1196,9 @@ public class S3Engine {
// Otherwiswe remove the entire object
// Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects.
storedPath = item.getStoredPath();
- deleteMetaData( item.getId());
- deleteObjectAcls( "SObjectItem", item.getId());
- objectDao.delete( sobject );
+ deleteMetaData( item.getId());
+ deleteObjectAcls( "SObjectItem", item.getId());
+ objectDao.remove(sobject.getId());
}
}
}
@@ -1181,7 +1206,7 @@ public class S3Engine {
// Delete the file holding the object
if (null != storedPath)
{
- OrderedPair<SHost, String> host_storagelocation_pair = getBucketStorageHost( sbucket );
+ OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost( sbucket );
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( host_storagelocation_pair.getFirst());
bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), bucketName, storedPath );
}
@@ -1192,50 +1217,48 @@ public class S3Engine {
private void deleteMetaData( long itemId ) {
- SMetaDao metaDao = new SMetaDao();
- List<SMeta> itemMetaData = metaDao.getByTarget( "SObjectItem", itemId );
+ List<SMetaVO> itemMetaData = metaDao.getByTarget( "SObjectItem", itemId );
if (null != itemMetaData)
{
- ListIterator<SMeta> it = itemMetaData.listIterator();
+ ListIterator<SMetaVO> it = itemMetaData.listIterator();
while( it.hasNext()) {
- SMeta oneTag = (SMeta)it.next();
- metaDao.delete( oneTag );
+ SMetaVO oneTag = (SMetaVO)it.next();
+ metaDao.remove(oneTag.getId());
}
}
}
private void deleteObjectAcls( String target, long itemId ) {
- SAclDao aclDao = new SAclDao();
- List<SAcl> itemAclData = aclDao.listGrants( target, itemId );
+ List<SAclVO> itemAclData = aclDao.listGrants( target, itemId );
if (null != itemAclData)
{
- ListIterator<SAcl> it = itemAclData.listIterator();
+ ListIterator<SAclVO> it = itemAclData.listIterator();
while( it.hasNext()) {
- SAcl oneTag = (SAcl)it.next();
- aclDao.delete( oneTag );
+ SAclVO oneTag = (SAclVO)it.next();
+ aclDao.remove(oneTag.getId());
}
}
}
private void deleteBucketAcls( long bucketId ) {
- SAclDao aclDao = new SAclDao();
- List<SAcl> bucketAclData = aclDao.listGrants( "SBucket", bucketId );
+
+ List<SAclVO> bucketAclData = aclDao.listGrants( "SBucket", bucketId );
if (null != bucketAclData)
{
- ListIterator<SAcl> it = bucketAclData.listIterator();
+ ListIterator<SAclVO> it = bucketAclData.listIterator();
while( it.hasNext()) {
- SAcl oneTag = (SAcl)it.next();
- aclDao.delete( oneTag );
+ SAclVO oneTag = (SAclVO)it.next();
+ aclDao.remove(oneTag.getId());
}
}
}
- private S3ListBucketPrefixEntry[] composeListBucketPrefixEntries(List<SObject> l, String prefix, String delimiter, int maxKeys)
+ private S3ListBucketPrefixEntry[] composeListBucketPrefixEntries(List<SObjectVO> l, String prefix, String delimiter, int maxKeys)
{
List<S3ListBucketPrefixEntry> entries = new ArrayList<S3ListBucketPrefixEntry>();
int count = 0;
- for(SObject sobject : l)
+ for(SObjectVO sobject : l)
{
if(delimiter != null && !delimiter.isEmpty())
{
@@ -1264,14 +1287,14 @@ public class S3Engine {
*
* TODO - how does the versionIdMarker work when there is a deletion marker in the object?
*/
- private S3ListBucketObjectEntry[] composeListBucketContentEntries(List<SObject> l, String prefix, String delimiter, int maxKeys, boolean enableVersion, String versionIdMarker)
+ private S3ListBucketObjectEntry[] composeListBucketContentEntries(List<SObjectVO> l, String prefix, String delimiter, int maxKeys, boolean enableVersion, String versionIdMarker)
{
List<S3ListBucketObjectEntry> entries = new ArrayList<S3ListBucketObjectEntry>();
- SObjectItem latest = null;
+ SObjectItemVO latest = null;
boolean hitIdMarker = false;
int count = 0;
- for( SObject sobject : l )
+ for( SObjectVO sobject : l )
{
if (delimiter != null && !delimiter.isEmpty())
{
@@ -1301,10 +1324,10 @@ public class S3Engine {
}
else latest = sobject.getLatestVersion( false );
- Iterator<SObjectItem> it = sobject.getItems().iterator();
+ Iterator<SObjectItemVO> it = sobject.getItems().iterator();
while( it.hasNext())
{
- SObjectItem item = (SObjectItem)it.next();
+ SObjectItemVO item = (SObjectItemVO)it.next();
if ( !hitIdMarker )
{
@@ -1318,13 +1341,13 @@ public class S3Engine {
}
else
{ // -> if there are multiple versions of an object then just return its last version
- Iterator<SObjectItem> it = sobject.getItems().iterator();
- SObjectItem lastestItem = null;
+ Iterator<SObjectItemVO> it = sobject.getItems().iterator();
+ SObjectItemVO lastestItem = null;
int maxVersion = 0;
int version = 0;
while(it.hasNext())
{
- SObjectItem item = (SObjectItem)it.next();
+ SObjectItemVO item = (SObjectItemVO)it.next();
String versionStr = item.getVersion();
if ( null != versionStr )
@@ -1351,7 +1374,7 @@ public class S3Engine {
else return null;
}
- private static S3ListBucketObjectEntry toListEntry( SObject sobject, SObjectItem item, SObjectItem latest )
+ private static S3ListBucketObjectEntry toListEntry( SObjectVO sobject, SObjectItemVO item, SObjectItemVO latest )
{
S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry();
entry.setKey(sobject.getNameKey());
@@ -1367,22 +1390,21 @@ public class S3Engine {
return entry;
}
- private OrderedPair<SHost, String> getBucketStorageHost(SBucket bucket)
+ private OrderedPair<SHostVO, String> getBucketStorageHost(SBucketVO bucket)
{
- MHostMountDao mountDao = new MHostMountDao();
- SHost shost = bucket.getShost();
+ SHostVO shost = shostDao.findById(bucket.getShostID());
if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_LOCAL) {
- return new OrderedPair<SHost, String>(shost, shost.getExportRoot());
+ return new OrderedPair<SHostVO, String>(shost, shost.getExportRoot());
}
- MHostMount mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId());
+ MHostMountVO mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId());
if(mount != null) {
- return new OrderedPair<SHost, String>(shost, mount.getMountPath());
+ return new OrderedPair<SHostVO, String>(shost, mount.getMountPath());
}
-
+ //return null;
// need to redirect request to other node
- throw new HostNotMountedException("Storage host " + shost.getHost() + " is not locally mounted");
+ throw new HostNotMountedException("Storage host "); // + shost.getHost() + " is not locally mounted");
}
/**
@@ -1393,15 +1415,12 @@ public class S3Engine {
*/
private void createUploadFolder(String bucketName)
{
- if (PersistContext.acquireNamedLock("bucket.creation", LOCK_ACQUIRING_TIMEOUT_SECONDS))
- {
try {
allocBucketStorageHost(bucketName, ServiceProvider.getInstance().getMultipartDir());
- }
+ }
finally {
- PersistContext.releaseNamedLock("bucket.creation");
+
}
- }
}
/**
@@ -1413,40 +1432,39 @@ public class S3Engine {
* @param overrideName
* @return
*/
- private OrderedPair<SHost, String> allocBucketStorageHost(String bucketName, String overrideName)
+ private OrderedPair<SHostVO, String> allocBucketStorageHost(String bucketName, String overrideName)
{
- MHostDao mhostDao = new MHostDao();
- SHostDao shostDao = new SHostDao();
+ //SHostDao shostDao = new SHostDao();
- MHost mhost = mhostDao.get(ServiceProvider.getInstance().getManagementHostId());
+ MHostVO mhost = mhostDao.findById(ServiceProvider.getInstance().getManagementHostId());
if(mhost == null)
throw new OutOfServiceException("Temporarily out of service");
if(mhost.getMounts().size() > 0) {
Random random = new Random();
- MHostMount[] mounts = (MHostMount[])mhost.getMounts().toArray();
- MHostMount mount = mounts[random.nextInt(mounts.length)];
+ MHostMountVO[] mounts = (MHostMountVO[])mhost.getMounts().toArray();
+ MHostMountVO mount = mounts[random.nextInt(mounts.length)];
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(mount.getShost());
bucketAdapter.createContainer(mount.getMountPath(), (null != overrideName ? overrideName : bucketName));
- return new OrderedPair<SHost, String>(mount.getShost(), mount.getMountPath());
+ return new OrderedPair<SHostVO, String>(mount.getShost(), mount.getMountPath());
}
// To make things simple, only allow one local mounted storage root TODO - Change in the future
String localStorageRoot = ServiceProvider.getInstance().getStartupProperties().getProperty("storage.root");
if(localStorageRoot != null) {
- SHost localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot);
+ SHostVO localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot);
if(localSHost == null)
throw new InternalErrorException("storage.root is configured but not initialized");
S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(localSHost);
bucketAdapter.createContainer(localSHost.getExportRoot(),(null != overrideName ? overrideName : bucketName));
- return new OrderedPair<SHost, String>(localSHost, localStorageRoot);
+ return new OrderedPair<SHostVO, String>(localSHost, localStorageRoot);
}
throw new OutOfStorageException("No storage host is available");
}
- public S3BucketAdapter getStorageHostBucketAdapter(SHost shost)
+ public S3BucketAdapter getStorageHostBucketAdapter(SHostVO shost)
{
S3BucketAdapter adapter = bucketAdapters.get(shost.getHostType());
if(adapter == null)
@@ -1464,17 +1482,13 @@ public class S3Engine {
* @throws IOException
*/
@SuppressWarnings("deprecation")
- public OrderedPair<SObject, SObjectItem> allocObjectItem(SBucket bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy)
+ public OrderedPair<SObjectVO, SObjectItemVO> allocObjectItem(SBucketVO bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy)
{
- SObjectDao objectDao = new SObjectDao();
- SObjectItemDao objectItemDao = new SObjectItemDao();
- SMetaDao metaDao = new SMetaDao();
- SAclDao aclDao = new SAclDao();
- SObjectItem item = null;
+ SObjectItemVO item = null;
int versionSeq = 1;
int versioningStatus = bucket.getVersioningStatus();
- Session session = PersistContext.getSession();
+ //Session session = PersistContext.getSession();
// [A] To write into a bucket the user must have write permission to that bucket
S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucket.getName());
@@ -1482,65 +1496,79 @@ public class S3Engine {
context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy);
verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); // TODO - check this validates plain POSTs
+ Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
+ txn.start();
// [B] If versioning is off them we over write a null object item
- SObject object = objectDao.getByNameKey(bucket, nameKey);
+ SObjectVO object = objectDao.getByNameKey(bucket, nameKey);
if ( object != null )
- {
+ {
// -> if versioning is on create new object items
if ( SBucket.VERSIONING_ENABLED == versioningStatus )
{
- session.lock(object, LockMode.UPGRADE);
+
versionSeq = object.getNextSequence();
object.setNextSequence(versionSeq + 1);
- session.save(object);
+ objectDao.update(object.getId(), object);
- item = new SObjectItem();
+ item = new SObjectItemVO();
item.setTheObject(object);
object.getItems().add(item);
+ item.setsObjectID(object.getId());
item.setVersion(String.valueOf(versionSeq));
Date ts = DateHelper.currentGMTTime();
item.setCreateTime(ts);
item.setLastAccessTime(ts);
item.setLastModifiedTime(ts);
- session.save(item);
+ item = itemDao.persist(item);
+ txn.commit();
+ //session.save(item);
}
else
{ // -> find an object item with a null version, can be null
// if bucket started out with versioning enabled and was then suspended
- item = objectItemDao.getByObjectIdNullVersion( object.getId());
+ item = itemDao.getByObjectIdNullVersion( object.getId());
if (item == null)
{
- item = new SObjectItem();
+ item = new SObjectItemVO();
item.setTheObject(object);
+ item.setsObjectID(object.getId());
object.getItems().add(item);
Date ts = DateHelper.currentGMTTime();
item.setCreateTime(ts);
item.setLastAccessTime(ts);
item.setLastModifiedTime(ts);
- session.save(item);
+ item = itemDao.persist(item);
+ txn.commit();
}
}
}
else
- { // -> there is no object nor an object item
- object = new SObject();
- object.setBucket(bucket);
- object.setNameKey(nameKey);
- object.setNextSequence(2);
- object.setCreateTime(DateHelper.currentGMTTime());
- object.setOwnerCanonicalId(UserContext.current().getCanonicalUserId());
- session.save(object);
-
- item = new SObjectItem();
- item.setTheObject(object);
- object.getItems().add(item);
- if (SBucket.VERSIONING_ENABLED == versioningStatus) item.setVersion(String.valueOf(versionSeq));
- Date ts = DateHelper.currentGMTTime();
- item.setCreateTime(ts);
- item.setLastAccessTime(ts);
- item.setLastModifiedTime(ts);
- session.save(item);
+ {
+ Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB);
+ txn1.start();
+ // -> there is no object nor an object item
+ object = new SObjectVO();
+ object.setBucket(bucket);
+ object.setNameKey(nameKey);
+ object.setNextSequence(2);
+ object.setBucketID(bucket.getId());
+ object.setCreateTime(DateHelper.currentGMTTime());
+ object.setOwnerCanonicalId(UserContext.current().getCanonicalUserId());
+ object = objectDao.persist(object);
+ item = new SObjectItemVO();
+ item.setTheObject(object);
+ item.setsObjectID(object.getId());
+ object.getItems().add(item);
+ if (SBucket.VERSIONING_ENABLED == versioningStatus) item.setVersion(String.valueOf(versionSeq));
+ Date ts = DateHelper.currentGMTTime();
+ item.setCreateTime(ts);
+ item.setLastAccessTime(ts);
+ item.setLastModifiedTime(ts);
+ item = itemDao.persist(item);
+ txn.commit();
+ txn.close();
+
}
@@ -1570,8 +1598,9 @@ public class S3Engine {
aclDao.save( "SObjectItem", item.getId(), acl );
}
- session.update(item);
- return new OrderedPair<SObject, SObjectItem>(object, item);
+ itemDao.update(item.getId(), item);
+ txn.close();
+ return new OrderedPair<SObjectVO, SObjectItemVO>(object, item);
}
@@ -1579,11 +1608,11 @@ public class S3Engine {
* Access controls that are specified via the "x-amz-acl:" headers in REST requests.
* Note that canned policies can be set when the object's contents are set
*/
- public void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucket bucket )
+ public void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucketVO bucket )
{
// Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy
Triple<Integer,Integer,String> permission_permission_symbol_triple =
- SAcl.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId());
+ SAclVO.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId());
if ( null == permission_permission_symbol_triple.getThird() )
setSingleAcl(target, objectId, permission_permission_symbol_triple.getFirst());
else
@@ -1599,7 +1628,6 @@ public class S3Engine {
private void setSingleAcl( String target, long targetId, int permission )
{
- SAclDao aclDao = new SAclDao();
S3AccessControlList defaultAcl = new S3AccessControlList();
// -> if an annoymous request, then do not rewrite the ACL
@@ -1626,7 +1654,6 @@ public class S3Engine {
*/
private void setDefaultAcls( String target, long objectId, int permission1, int permission2, String owner )
{
- SAclDao aclDao = new SAclDao();
S3AccessControlList defaultAcl = new S3AccessControlList();
// -> object owner
@@ -1712,20 +1739,18 @@ public class S3Engine {
{
if (SAcl.PERMISSION_PASS == requestedPermission) return;
- SAclDao aclDao = new SAclDao();
-
// If an annoymous request, then canonicalUserId is an empty string
String userId = UserContext.current().getCanonicalUserId();
if ( 0 == userId.length())
{
// Is an anonymous principal ACL set for this <target, targetId>?
- if (hasPermission( aclDao.listGrants( target, targetId, "A" ), requestedPermission )) return;
+ if (hasPermission( saclDao.listGrants( target, targetId, "A" ), requestedPermission )) return;
}
else
{
- if (hasPermission( aclDao.listGrants( target, targetId, userId ), requestedPermission )) return;
+ if (hasPermission( saclDao.listGrants( target, targetId, userId ), requestedPermission )) return;
// Or alternatively is there is any principal authenticated ACL set for this <target, targetId>?
- if (hasPermission( aclDao.listGrants( target, targetId, "*" ), requestedPermission )) return;
+ if (hasPermission( saclDao.listGrants( target, targetId, "*" ), requestedPermission )) return;
}
// No privileges implies that no access is allowed in the case of an anonymous user
throw new PermissionDeniedException( "Access Denied - ACLs do not give user the required permission" );
@@ -1749,8 +1774,11 @@ public class S3Engine {
// -> do we have to load it from the database (any other value means there is no policy)?
if (-1 == result.getSecond().intValue())
{
- BucketPolicyDao policyDao = new BucketPolicyDao();
- String policyInJson = policyDao.getPolicy( context.getBucketName());
+ BucketPolicyVO policyvo = bPolicy.getByName(context.getBucketName());
+ String policyInJson = null;
+ if (null != policyvo)
+ policyInJson = policyvo.getPolicy();
+
// -> place in cache that no policy exists in the database
if (null == policyInJson) {
ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), null);
@@ -1835,13 +1863,13 @@ public class S3Engine {
}
}
- private static boolean hasPermission( List<SAcl> privileges, int requestedPermission )
+ private static boolean hasPermission( List<SAclVO> privileges, int requestedPermission )
{
- ListIterator<SAcl> it = privileges.listIterator();
+ ListIterator<SAclVO> it = privileges.listIterator();
while( it.hasNext())
{
// True providing the requested permission is contained in one or the granted rights for this user. False otherwise.
- SAcl rights = (SAcl)it.next();
+ SAclVO rights = (SAclVO)it.next();
int permission = rights.getPermission();
if (requestedPermission == (permission & requestedPermission)) return true;
}
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/39aa7d86/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java
----------------------------------------------------------------------
diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java
index b3c07be..28e30e2 100644
--- a/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java
+++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3Grant.java
@@ -19,6 +19,7 @@ package com.cloud.bridge.service.core.s3;
import java.util.List;
import com.cloud.bridge.model.SAcl;
+import com.cloud.bridge.model.SAclVO;
import com.cloud.bridge.model.SBucket;
import com.cloud.bridge.service.exception.UnsupportedException;
@@ -64,12 +65,12 @@ public class S3Grant {
/* Return an array of S3Grants holding the permissions of grantees by grantee type and their canonicalUserIds.
* Used by S3 engine to get ACL policy requests for buckets and objects.
*/
- public static S3Grant[] toGrants(List<SAcl> grants) {
+ public static S3Grant[] toGrants(List<SAclVO> grants) {
if(grants != null)
{
S3Grant[] entries = new S3Grant[grants.size()];
int i = 0;
- for(SAcl acl: grants) {
+ for(SAclVO acl: grants) {
entries[i] = new S3Grant();
entries[i].setGrantee(acl.getGranteeType());
entries[i].setCanonicalUserID(acl.getGranteeCanonicalId());
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/39aa7d86/awsapi/src/com/cloud/bridge/util/CloudSessionFactory.java
----------------------------------------------------------------------
diff --git a/awsapi/src/com/cloud/bridge/util/CloudSessionFactory.java b/awsapi/src/com/cloud/bridge/util/CloudSessionFactory.java
deleted file mode 100644
index 404f463..0000000
--- a/awsapi/src/com/cloud/bridge/util/CloudSessionFactory.java
+++ /dev/null
@@ -1,106 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.bridge.util;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Properties;
-
-import org.hibernate.Session;
-import org.hibernate.SessionFactory;
-import org.hibernate.cfg.Configuration;
-import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
-import org.jasypt.properties.EncryptableProperties;
-import org.apache.log4j.Logger;
-
-public class CloudSessionFactory {
- private static CloudSessionFactory instance;
- public static final Logger logger = Logger.getLogger(CloudSessionFactory.class);
-
- private SessionFactory factory;
-
- private CloudSessionFactory() {
- Configuration cfg = new Configuration();
- File file = ConfigurationHelper.findConfigurationFile("hibernate.cfg.xml");
-
- File propertiesFile = ConfigurationHelper.findConfigurationFile("db.properties");
- Properties dbProp = null;
- String dbName = null;
- String dbHost = null;
- String dbUser = null;
- String dbPassword = null;
- String dbPort = null;
-
- if (null != propertiesFile) {
-
- if(EncryptionSecretKeyCheckerUtil.useEncryption()){
- StandardPBEStringEncryptor encryptor = EncryptionSecretKeyCheckerUtil.getEncryptor();
- dbProp = new EncryptableProperties(encryptor);
- } else {
- dbProp = new Properties();
- }
-
- try {
- dbProp.load( new FileInputStream( propertiesFile ));
- } catch (FileNotFoundException e) {
- logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e);
- } catch (IOException e) {
- logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e);
- }
- }
-
-
- //
- // we are packaging hibernate mapping files along with the class files,
- // make sure class loader use the same class path when initializing hibernate mapping.
- // This is important when we are deploying and testing at different environment (Tomcat/JUnit test runner)
- //
- if(file != null && dbProp != null){
- Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader());
- cfg.configure(file);
-
- dbHost = dbProp.getProperty( "db.cloud.host" );
- dbName = dbProp.getProperty( "db.awsapi.name" );
- dbUser = dbProp.getProperty( "db.cloud.username" );
- dbPassword = dbProp.getProperty( "db.cloud.password" );
- dbPort = dbProp.getProperty( "db.cloud.port" );
-
- cfg.setProperty("hibernate.connection.url", "jdbc:mysql://" + dbHost + ":" + dbPort + "/" + dbName);
- cfg.setProperty("hibernate.connection.username", dbUser);
- cfg.setProperty("hibernate.connection.password", dbPassword);
-
-
- factory = cfg.buildSessionFactory();
- }else{
- logger.warn("Unable to open load db configuration");
- throw new RuntimeException("nable to open load db configuration");
- }
- }
-
- public synchronized static CloudSessionFactory getInstance() {
- if(instance == null) {
- instance = new CloudSessionFactory();
- }
- return instance;
- }
-
- public Session openSession() {
- return factory.openSession();
- }
-}