You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by nv...@apache.org on 2022/04/14 14:12:08 UTC
[cloudstack] branch main updated: StorPool storage plugin (#6007)
This is an automated email from the ASF dual-hosted git repository.
nvazquez pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudstack.git
The following commit(s) were added to refs/heads/main by this push:
new 4004dfcfd8 StorPool storage plugin (#6007)
4004dfcfd8 is described below
commit 4004dfcfd8bf20645f9af6f486753d004c4cbb8d
Author: slavkap <51...@users.noreply.github.com>
AuthorDate: Thu Apr 14 17:12:01 2022 +0300
StorPool storage plugin (#6007)
* StorPool storage plugin
Adds volume storage plugin for StorPool SDS
* Added support for alternative endpoint
Added option to switch to alternative endpoint for SP primary storage
* renamed all classes from Storpool to StorPool
* Address review
* removed unnecessary else
* Removed check about the storage provider
We don't need this check, we'll get if the snapshot is on StorPool be
its name from path
* Check that current plugin supports all functionality before upgrade CS
* Smoke tests for StorPool plug-in
* Fixed conflicts
* Fixed conflicts and added missed Apache license header
* Removed whitespaces in smoke tests
* Added StorPool plugin jar for Debian
the StorPool jar will be included into cloudstack-agent package for
Debian/Ubuntu
---
client/pom.xml | 12 +
.../cloudstack/storage/command/CopyCmdAnswer.java | 5 +
debian/rules | 1 +
.../api/storage/PrimaryDataStoreDriver.java | 29 +
.../api/storage/StorageStrategyFactory.java | 2 +-
.../subsystem/api/storage/VMSnapshotStrategy.java | 2 +-
.../vmsnapshot/DefaultVMSnapshotStrategy.java | 2 +-
packaging/centos7/cloud.spec | 1 +
packaging/centos8/cloud.spec | 1 +
packaging/suse15/cloud.spec | 1 +
plugins/pom.xml | 4 +-
.../driver/DateraPrimaryDataStoreDriver.java | 18 +
.../CloudStackPrimaryDataStoreDriverImpl.java | 18 +
.../driver/LinstorPrimaryDataStoreDriverImpl.java | 18 +
.../driver/NexentaPrimaryDataStoreDriver.java | 18 +
.../driver/SamplePrimaryDataStoreDriverImpl.java | 18 +
.../driver/ScaleIOPrimaryDataStoreDriver.java | 18 +
.../driver/SolidFirePrimaryDataStoreDriver.java | 18 +
plugins/storage/volume/storpool/README.md | 344 ++++
plugins/storage/volume/storpool/pom.xml | 68 +
.../api/storage/StorPoolBackupSnapshotCommand.java | 30 +
.../StorPoolBackupTemplateFromSnapshotCommand.java | 30 +
.../agent/api/storage/StorPoolCopyCommand.java | 60 +
.../StorPoolCopyVolumeToSecondaryCommand.java | 30 +
.../storage/StorPoolDownloadTemplateCommand.java | 23 +-
.../api/storage/StorPoolDownloadVolumeCommand.java | 30 +
.../storage/StorPoolMigrateWithVolumesCommand.java | 55 +
.../storage/StorPoolModifyStoragePoolAnswer.java | 94 +
.../storage/StorPoolModifyStoragePoolCommand.java | 36 +
.../api/storage/StorPoolResizeVolumeCommand.java | 39 +
.../StorPoolBackupSnapshotCommandWrapper.java | 109 +
...olBackupTemplateFromSnapshotCommandWrapper.java | 161 ++
...torPoolCopyVolumeToSecondaryCommandWrapper.java | 124 ++
.../StorPoolDownloadTemplateCommandWrapper.java | 134 ++
.../StorPoolDownloadVolumeCommandWrapper.java | 162 ++
.../StorPoolModifyStorageCommandWrapper.java | 146 ++
.../StorPoolResizeVolumeCommandWrapper.java | 98 +
.../kvm/storage/StorPoolStorageAdaptor.java | 388 ++++
.../kvm/storage/StorPoolStoragePool.java | 164 ++
.../collector/StorPoolAbandonObjectsCollector.java | 323 +++
.../driver/StorPoolPrimaryDataStoreDriver.java | 976 +++++++++
.../StorPoolPrimaryDataStoreLifeCycle.java | 321 +++
.../datastore/provider/StorPoolHostListener.java | 234 +++
.../provider/StorPoolPrimaryDataStoreProvider.java | 78 +
.../datastore/util/StorPoolFeaturesAndFixes.java | 40 +
.../storage/datastore/util/StorPoolHelper.java | 298 +++
.../storage/datastore/util/StorPoolUtil.java | 609 ++++++
.../storage/motion/StorPoolDataMotionStrategy.java | 575 ++++++
.../snapshot/StorPoolConfigurationManager.java | 46 +
.../storage/snapshot/StorPoolSnapshotStrategy.java | 289 +++
.../snapshot/StorPoolVMSnapshotStrategy.java | 387 ++++
.../storage-volume-storpool/module.properties | 18 +
.../spring-storage-volume-storpool-context.xml | 38 +
.../com/cloud/storage/VolumeApiServiceImpl.java | 15 +
.../com/cloud/tags/TaggedResourceManagerImpl.java | 32 +-
.../plugins/storpool/MigrateVolumeToStorPool.py | 439 ++++
.../plugins/storpool/TestStorPoolVolumes.py | 2153 ++++++++++++++++++++
.../plugins/storpool/TestTagsOnStorPool.py | 576 ++++++
.../plugins/storpool/TestVmSnapshots.py | 369 ++++
test/integration/plugins/storpool/sp_util.py | 748 +++++++
60 files changed, 11057 insertions(+), 18 deletions(-)
diff --git a/client/pom.xml b/client/pom.xml
index b9cbae91b7..aae0006174 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -97,6 +97,11 @@
<artifactId>cloud-plugin-storage-volume-linstor</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.apache.cloudstack</groupId>
+ <artifactId>cloud-plugin-storage-volume-storpool</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-server</artifactId>
@@ -755,6 +760,12 @@
<artifactId>bcpkix-jdk15on</artifactId>
<overWrite>false</overWrite>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
+ </artifactItem>
+ <artifactItem>
+ <groupId>org.apache.cloudstack</groupId>
+ <artifactId>cloud-plugin-storage-volume-storpool</artifactId>
+ <overWrite>false</overWrite>
+ <outputDirectory>${project.build.directory}/lib</outputDirectory>
</artifactItem>
<artifactItem>
<groupId>org.bouncycastle</groupId>
@@ -799,6 +810,7 @@
<exclude>org.bouncycastle:bcpkix-jdk15on</exclude>
<exclude>org.bouncycastle:bctls-jdk15on</exclude>
<exclude>mysql:mysql-connector-java</exclude>
+ <exclude>org.apache.cloudstack:cloud-plugin-storage-volume-storpool</exclude>
</excludes>
</artifactSet>
<transformers>
diff --git a/core/src/main/java/org/apache/cloudstack/storage/command/CopyCmdAnswer.java b/core/src/main/java/org/apache/cloudstack/storage/command/CopyCmdAnswer.java
index 9b581decd6..fef272bfbd 100644
--- a/core/src/main/java/org/apache/cloudstack/storage/command/CopyCmdAnswer.java
+++ b/core/src/main/java/org/apache/cloudstack/storage/command/CopyCmdAnswer.java
@@ -20,6 +20,7 @@
package org.apache.cloudstack.storage.command;
import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.Command;
import com.cloud.agent.api.to.DataTO;
public class CopyCmdAnswer extends Answer {
@@ -37,4 +38,8 @@ public class CopyCmdAnswer extends Answer {
public CopyCmdAnswer(String errMsg) {
super(null, false, errMsg);
}
+
+ public CopyCmdAnswer(Command cmd, Exception e) {
+ super(cmd, e);
+ }
}
diff --git a/debian/rules b/debian/rules
index ed1559a46d..b590e5d40f 100755
--- a/debian/rules
+++ b/debian/rules
@@ -41,6 +41,7 @@ override_dh_auto_install:
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
install -D plugins/hypervisors/kvm/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
+ install -D plugins/storage/volume/storpool/target/cloud-plugin-storage-volume-storpool-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
install -d -m0755 debian/$(PACKAGE)-agent/lib/systemd/system
install -m0644 packaging/systemd/$(PACKAGE)-agent.service debian/$(PACKAGE)-agent/lib/systemd/system/$(PACKAGE)-agent.service
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
index 622dda31f9..540d4f6673 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
@@ -103,4 +103,33 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver {
* returns true if the host can access the storage pool
*/
boolean canHostAccessStoragePool(Host host, StoragePool pool);
+
+ /**
+ * Used by storage pools which want to keep VMs' information
+ * @return true if additional VM info is needed (intended for storage pools).
+ */
+ boolean isVmInfoNeeded();
+
+ /**
+ * Provides additional info for a VM (intended for storage pools).
+ * E.g. the storage pool may want to keep/delete information if the volume is attached/detached to any VM.
+ * @param vmId The ID of the virtual machine
+ * @param volumeId the ID of the volume
+ */
+ void provideVmInfo(long vmId, long volumeId);
+
+ /**
+ * Returns true if the storage have to know about the VM's tags (intended for storage pools).
+ * @param tagKey The name of the tag
+ * @return true if the storage have to know about the VM's tags
+ */
+ boolean isVmTagsNeeded(String tagKey);
+
+ /**
+ * Provide VM's tags to storage (intended for storage pools).
+ * @param vmId The ID of the virtual machine
+ * @param volumeId The ID of the volume
+ * @param tagValue The value of the VM's tag
+ */
+ void provideVmTags(long vmId, long volumeId, String tagValue);
}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StorageStrategyFactory.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StorageStrategyFactory.java
index eac9a31327..e309b9842b 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StorageStrategyFactory.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StorageStrategyFactory.java
@@ -39,9 +39,9 @@ public interface StorageStrategyFactory {
/**
* Used only for KVM hypervisors when allocating a VM snapshot
* @param vmId the ID of the virtual machine
+ * @param rootPoolId volume pool ID
* @param snapshotMemory for VM snapshots with memory
* @return VMSnapshotStrategy
*/
VMSnapshotStrategy getVmSnapshotStrategy(Long vmId, Long rootPoolId, boolean snapshotMemory);
-
}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VMSnapshotStrategy.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VMSnapshotStrategy.java
index 223229b5ee..6372aa3bd9 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VMSnapshotStrategy.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VMSnapshotStrategy.java
@@ -30,7 +30,7 @@ public interface VMSnapshotStrategy {
StrategyPriority canHandle(VMSnapshot vmSnapshot);
/**
- * Used only for KVM hypervisors when allocating a VM snapshot
+ * Verifies if the strategy can handle the VM snapshot. This method is used only for KVM hypervisors when allocating a VM snapshot.
* @param vmId the ID of the virtual machine
* @param snapshotMemory for VM snapshots with memory
* @return StrategyPriority
diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java
index c647b11d22..90f6ccd1f9 100644
--- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java
+++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java
@@ -52,8 +52,8 @@ import com.cloud.host.dao.HostDao;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.GuestOSHypervisorVO;
import com.cloud.storage.GuestOSVO;
-import com.cloud.storage.VolumeVO;
import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.GuestOSHypervisorDao;
diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec
index d8ecf662c6..4288463d2e 100644
--- a/packaging/centos7/cloud.spec
+++ b/packaging/centos7/cloud.spec
@@ -351,6 +351,7 @@ install -D agent/target/transformed/cloudstack-agent-profile.sh ${RPM_BUILD_ROOT
install -D agent/target/transformed/cloudstack-agent.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-agent
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/lib/cloud-plugin-hypervisor-kvm-%{_maventag}.jar
cp plugins/hypervisors/kvm/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
+cp plugins/storage/volume/storpool/target/*.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
# Usage server
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage
diff --git a/packaging/centos8/cloud.spec b/packaging/centos8/cloud.spec
index e86a98781b..099ad59b3d 100644
--- a/packaging/centos8/cloud.spec
+++ b/packaging/centos8/cloud.spec
@@ -344,6 +344,7 @@ install -D agent/target/transformed/cloudstack-agent-profile.sh ${RPM_BUILD_ROOT
install -D agent/target/transformed/cloudstack-agent.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-agent
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/lib/cloud-plugin-hypervisor-kvm-%{_maventag}.jar
cp plugins/hypervisors/kvm/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
+cp plugins/storage/volume/storpool/target/*.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
# Usage server
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage
diff --git a/packaging/suse15/cloud.spec b/packaging/suse15/cloud.spec
index d6be798e09..bf6ef66710 100644
--- a/packaging/suse15/cloud.spec
+++ b/packaging/suse15/cloud.spec
@@ -346,6 +346,7 @@ install -D agent/target/transformed/cloudstack-agent-profile.sh ${RPM_BUILD_ROOT
install -D agent/target/transformed/cloudstack-agent.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-agent
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/lib/cloud-plugin-hypervisor-kvm-%{_maventag}.jar
cp plugins/hypervisors/kvm/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
+cp plugins/storage/volume/storpool/target/*.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
# Usage server
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage
diff --git a/plugins/pom.xml b/plugins/pom.xml
index 04c94a22f9..961884faac 100755
--- a/plugins/pom.xml
+++ b/plugins/pom.xml
@@ -123,6 +123,8 @@
<module>storage/volume/solidfire</module>
<module>storage/volume/scaleio</module>
<module>storage/volume/linstor</module>
+ <module>storage/volume/storpool</module>
+
<module>storage-allocators/random</module>
@@ -211,4 +213,4 @@
</modules>
</profile>
</profiles>
-</project>
\ No newline at end of file
+</project>
diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java
index 91e73aa442..0002dbe1c1 100644
--- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java
@@ -1860,4 +1860,22 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
+
+ @Override
+ public boolean isVmInfoNeeded() {
+ return false;
+ }
+
+ @Override
+ public void provideVmInfo(long vmId, long volumeId) {
+ }
+
+ @Override
+ public boolean isVmTagsNeeded(String tagKey) {
+ return false;
+ }
+
+ @Override
+ public void provideVmTags(long vmId, long volumeId, String tagValue) {
+ }
}
diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
index 56d2c3b7b2..7c8b5fb22c 100644
--- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
+++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
@@ -491,4 +491,22 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
+
+ @Override
+ public boolean isVmInfoNeeded() {
+ return false;
+ }
+
+ @Override
+ public void provideVmInfo(long vmId, long volumeId) {
+ }
+
+ @Override
+ public boolean isVmTagsNeeded(String tagKey) {
+ return false;
+ }
+
+ @Override
+ public void provideVmTags(long vmId, long volumeId, String tagValue) {
+ }
}
diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java
index 320860380c..2a486a1103 100644
--- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java
+++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java
@@ -765,4 +765,22 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
+
+ @Override
+ public boolean isVmInfoNeeded() {
+ return false;
+ }
+
+ @Override
+ public void provideVmInfo(long vmId, long volumeId) {
+ }
+
+ @Override
+ public boolean isVmTagsNeeded(String tagKey) {
+ return false;
+ }
+
+ @Override
+ public void provideVmTags(long vmId, long volumeId, String tagValue) {
+ }
}
diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java
index 92f8938060..84051888c5 100644
--- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java
@@ -239,4 +239,22 @@ public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
+
+ @Override
+ public boolean isVmInfoNeeded() {
+ return false;
+ }
+
+ @Override
+ public void provideVmInfo(long vmId, long volumeId) {
+ }
+
+ @Override
+ public boolean isVmTagsNeeded(String tagKey) {
+ return false;
+ }
+
+ @Override
+ public void provideVmTags(long vmId, long volumeId, String tagValue) {
+ }
}
diff --git a/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java b/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
index a416277234..732786047c 100644
--- a/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
+++ b/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
@@ -265,4 +265,22 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
+
+ @Override
+ public boolean isVmInfoNeeded() {
+ return false;
+ }
+
+ @Override
+ public void provideVmInfo(long vmId, long volumeId) {
+ }
+
+ @Override
+ public boolean isVmTagsNeeded(String tagKey) {
+ return false;
+ }
+
+ @Override
+ public void provideVmTags(long vmId, long volumeId, String tagValue) {
+ }
}
diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
index 8b8c7e7e3e..3bb6e0d7e6 100644
--- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
@@ -947,4 +947,22 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
String msg = "SDC not connected on the host: " + host.getId() + ", reconnect the SDC to MDM";
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg);
}
+
+ @Override
+ public boolean isVmInfoNeeded() {
+ return false;
+ }
+
+ @Override
+ public void provideVmInfo(long vmId, long volumeId) {
+ }
+
+ @Override
+ public boolean isVmTagsNeeded(String tagKey) {
+ return false;
+ }
+
+ @Override
+ public void provideVmTags(long vmId, long volumeId, String tagValue) {
+ }
}
diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java
index 6bad1efbb7..702bdc3669 100644
--- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java
@@ -1619,4 +1619,22 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
+
+ @Override
+ public boolean isVmInfoNeeded() {
+ return false;
+ }
+
+ @Override
+ public void provideVmInfo(long vmId, long volumeId) {
+ }
+
+ @Override
+ public boolean isVmTagsNeeded(String tagKey) {
+ return false;
+ }
+
+ @Override
+ public void provideVmTags(long vmId, long volumeId, String tagValue) {
+ }
}
diff --git a/plugins/storage/volume/storpool/README.md b/plugins/storage/volume/storpool/README.md
new file mode 100644
index 0000000000..7f710732e0
--- /dev/null
+++ b/plugins/storage/volume/storpool/README.md
@@ -0,0 +1,344 @@
+# StorPool CloudStack Integration
+
+## CloudStack Overview
+
+### Primary and Secondary storage
+
+Primary storage is associated with a cluster or zone, and it stores the virtual disks for all the VMs running on hosts in that cluster/zone.
+
+Secondary storage stores the following:
+* Templates — OS images that can be used to boot VMs and can include additional configuration information, such as installed applications
+* ISO images — disc images containing data or bootable media for operating systems
+* Disk volume snapshots — saved copies of VM data which can be used for data recovery or to create new templates
+
+
+### ROOT and DATA volumes
+
+ROOT volumes correspond to the boot disk of a VM. They are created automatically by CloudStack during VM creation.
+ROOT volumes are created based on a system disk offering, corresponding to the service offering the user VM
+is based on. We may change the ROOT volume disk offering but only to another system created disk offering.
+
+DATA volumes correspond to additional disks. These can be created by users and then attached/detached to VMs.
+DATA volumes are created based on a user-defined disk offering.
+
+
+## Plugin Organization
+
+The StorPool plugin consists of two parts:
+
+### KVM hypervisor plugin patch
+
+Source directory: ./apache-cloudstack-4.17-src/plugins/hypervisors/kvm
+
+### StorPool primary storage plugin
+
+Source directory: ./apache-cloudstack-4.17.0-src/plugins/storage/volume
+
+There is one plugin for both the CloudStack management and agents, in the hope that having all the source
+in one place will ease development and maintenance. The plugin itself though is separated into two mainly
+independent parts:
+
+ * ./src/com/... directory tree: agent related classes and commands send from management to agent
+ * ./src/org/... directory tree: management related classes
+
+The plugin is intended to be self contained and non-intrusive, thus ideally deploying it would consist of only
+dropping the jar file into the appropriate places. This is the reason why all StorPool related communication
+(ex. data copying, volume resize) is done with StorPool specific commands even when there is a CloudStack command
+that does pretty much the same.
+
+Note that for the present the StorPool plugin may only be used for a single primary storage cluster; support for
+multiple clusters is planned.
+
+
+## Build, Install, Setup
+
+### Build
+
+Go to the source directory and run:
+
+ mvn -Pdeveloper -DskipTests install
+
+The resulting jar file is located in the target/ subdirectory.
+
+Note: checkstyle errors: before compilation a code style check is performed; if this fails compilation is aborted.
+In short: no trailing whitespace, indent using 4 spaces, not tabs, comment-out or remove unused imports.
+
+Note: Need to build both the KVM plugin and the StorPool plugin proper.
+
+### Install
+
+#### StorPool primary storage plugin
+
+For each CloudStack management host:
+
+```bash
+scp ./target/cloud-plugin-storage-volume-storpool-{version}.jar {MGMT_HOST}:/usr/share/cloudstack-management/lib/
+```
+
+For each CloudStack agent host:
+
+```bash
+scp ./target/cloud-plugin-storage-volume-storpool-{version}.jar {AGENT_HOST}:/usr/share/cloudstack-agent/plugins/
+```
+
+Note: CloudStack managements/agents services must be restarted after adding the plugin to the respective directories
+
+Note: Agents should have access to the StorPool management API, since attach and detach operations happens on the agent.
+This is a CloudStack design issue, can't do much about it.
+
+### Setup
+
+#### Setting up StorPool
+
+Perform the StorPool installation following the StorPool Installation Guide.
+
+Create a template to be used by CloudStack. Must set *placeHead*, *placeAll*, *placeTail* and *replication*.
+No need to set default volume size because it is determined by the CloudStack disks and services offering.
+
+#### Setting up a StorPool PRIMARY storage pool in CloudStack
+
+From the WEB UI, go to Infrastructure -> Primary Storage -> Add Primary Storage
+
+Scope: select Zone-Wide
+Hypervisor: select KVM
+Zone: pick appropriate zone.
+Name: user specified name
+
+Protocol: select *SharedMountPoint*
+Path: enter */dev/storpool* (required argument, actually not needed in practice).
+
+Provider: select *StorPool*
+Managed: leave unchecked (currently ignored)
+Capacity Bytes: used for accounting purposes only. May be more or less than the actual StorPool template capacity.
+Capacity IOPS: currently not used (may use for max IOPS limitations on volumes from this pool).
+URL: enter SP_API_HTTP=address:port;SP_AUTH_TOKEN=token;SP_TEMPLATE=template_name. At present one template can be used for at most one Storage Pool.
+
+SP_API_HTTP - address of StorPool Api
+SP_AUTH_TOKEN - StorPool's token
+SP_TEMPLATE - name of StorPool's template
+
+Storage Tags: If left blank, the StorPool storage plugin will use the pool name to create a corresponding storage tag.
+This storage tag may be used later, when defining service or disk offerings.
+
+
+## Plugin Functionality
+
+<table cellpadding="5">
+<tr>
+ <th>Plugin Action</th>
+ <th>CloudStack Action</th>
+ <th>management/agent</th>
+ <th>impl. details</th>
+</tr>
+<tr>
+ <td>Create ROOT volume from ISO</td>
+ <td>create VM from ISO</td>
+ <td>management</td>
+ <td>createVolumeAsync</td>
+</tr>
+<tr>
+ <td>Create ROOT volume from Template</td>
+ <td>create VM from Template</td>
+ <td>management + agent</td>
+ <td>copyAsync (T => T, T => V)</td>
+</tr>
+<tr>
+ <td>Create DATA volume</td>
+ <td>create Volume</td>
+ <td>management</td>
+ <td>createVolumeAsync</td>
+</tr>
+<tr>
+ <td>Attach ROOT/DATA volume</td>
+ <td>start VM (+attach/detach Volume)</td>
+ <td>agent</td>
+ <td>connectPhysicalDisk</td>
+</tr>
+<tr>
+ <td>Detach ROOT/DATA volume</td>
+ <td>stop VM</td>
+ <td>agent</td>
+ <td>disconnectPhysicalDiskByPath</td>
+</tr>
+<tr>
+ <td> </td>
+ <td>Migrate VM</td>
+ <td>agent</td>
+ <td>attach + detach</td>
+</tr>
+<tr>
+ <td>Delete ROOT volume</td>
+ <td>destroy VM (expunge)</td>
+ <td>management</td>
+ <td>deleteAsync</td>
+</tr>
+<tr>
+ <td>Delete DATA volume</td>
+ <td>delete Volume (detached)</td>
+ <td>management</td>
+ <td>deleteAsync</td>
+</tr>
+<tr>
+ <td>Create ROOT/DATA volume snapshot</td>
+ <td>snapshot volume</td>
+ <td>management + agent</td>
+ <td>takeSnapshot + copyAsync (S => S)</td>
+</tr>
+<tr>
+ <td>Create volume from snapshoot</td>
+ <td>create volume from snapshot</td>
+ <td>management + agent(?)</td>
+ <td>copyAsync (S => V)</td>
+</tr>
+<tr>
+ <td>Create TEMPLATE from ROOT volume</td>
+ <td>create template from volume</td>
+ <td>management + agent</td>
+ <td>copyAsync (V => T)</td>
+</tr>
+<tr>
+ <td>Create TEMPLATE from snapshot</td>
+ <td>create template from snapshot</td>
+ <td>SECONDARY STORAGE</td>
+ <td> </td>
+</tr>
+<tr>
+ <td>Download volume</td>
+ <td>download volume</td>
+ <td>management + agent</td>
+ <td>copyAsync (V => V)</td>
+</tr>
+<tr>
+ <td>Revert ROOT/DATA volume to snapshot</td>
+ <td>revert to snapshot</td>
+ <td>management</td>
+ <td>revertSnapshot</td>
+</tr>
+<tr>
+ <td>(Live) resize ROOT/DATA volume</td>
+ <td>resize volume</td>
+ <td>management + agent</td>
+ <td>resize + StorpoolResizeCmd</td>
+</tr>
+<tr>
+ <td>Delete SNAPSHOT (ROOT/DATA)</td>
+ <td>delete snapshot</td>
+ <td>management</td>
+ <td>StorpoolSnapshotStrategy</td>
+</tr>
+<tr>
+ <td>Delete TEMPLATE</td>
+ <td>delete template</td>
+ <td>agent</td>
+ <td>deletePhysicalDisk</td>
+</tr>
+<tr>
+ <td>migrate VM/volume</td>
+ <td>migrate VM/volume to another storage</td>
+ <td>management/management + agent</td>
+ <td>copyAsync (V => V)</td>
+</tr>
+<tr>
+ <td>VM snapshot</td>
+ <td>group snapshot of VM's disks</td>
+ <td>management</td>
+ <td>StorpoolVMSnapshotStrategy takeVMSnapshot</td>
+</tr>
+<tr>
+ <td>revert VM snapshot</td>
+ <td>revert group snapshot of VM's disks</td>
+ <td>management</td>
+ <td>StorpoolVMSnapshotStrategy revertVMSnapshot</td>
+</tr>
+<tr>
+ <td>delete VM snapshot</td>
+ <td>delete group snapshot of VM's disks</td>
+ <td>management</td>
+ <td>StorpoolVMSnapshotStrategy deleteVMSnapshot</td>
+</tr>
+<tr>
+ <td>VM vc_policy tag</td>
+ <td>vc_policy tag for all disks attached to VM</td>
+ <td>management</td>
+ <td>StorPoolCreateTagsCmd</td>
+</tr>
+<tr>
+ <td>delete VM vc_policy tag</td>
+ <td>remove vc_policy tag for all disks attached to VM</td>
+ <td>management</td>
+ <td>StorPoolDeleteTagsCmd</td>
+</tr>
+</table>
+
+>NOTE: When using multicluster for each CloudStack cluster in its settings set the value of StorPool's SP_CLUSTER_ID in "sp.cluster.id".
+>
+
+>NOTE: Secondary storage could be bypassed with Configuration setting "sp.bypass.secondary.storage" set to true. </br>
+In this case only snapshots won't be downloaded to secondary storage.
+>
+
+### Creating template from snapshot
+
+#### If bypass option is enabled
+
+The snapshot exists only on PRIMARY (StorPool) storage. From this snapshot it will be created a template on SECONADRY.
+
+#### If bypass option is disabled
+
+TODO: Maybe we should not use CloudStack functionality, and to use that one when bypass option is enabled
+
+This is independent of StorPool as snapshots exist on secondary.
+
+### Creating ROOT volume from templates
+
+When creating the first volume based on the given template, if snapshot of the template does not exists on StorPool it will be first downloaded (cached) to PRIMARY storage.
+This is mapped to a StorPool snapshot so, creating succecutive volumes from the same template does not incur additional
+copying of data to PRIMARY storage.
+
+This cached snapshot is garbage collected when the original template is deleted from CloudStack. This cleanup is done
+by a background task in CloudStack.
+
+### Creating a ROOT volume from an ISO image
+
+We just need to create the volume. The ISO installation is handled by CloudStack.
+
+### Creating a DATA volume
+
+DATA volumes are created by CloudStack the first time it is attached to a VM.
+
+### Creating volume from snapshot
+
+We use the fact that the snapshot already exists on PRIMARY, so no data is copied. We will copy snapshots from SECONDARY to StorPool PRIMARY,
+when there is no corresponding StorPool snapshot.
+
+### Resizing volumes
+
+We need to send a resize cmd to agent, where the VM the volume is attached to is running, so that
+the resize is visible by the VM.
+
+### Creating snapshots
+
+The snapshot is first created on the PRIMARY storage (i.e. StorPool), then backed-up on SECONDARY storage
+(tested with NFS secondary) if bypass option is not enabled. The original StorPool snapshot is kept, so that creating volumes from the snapshot does not need to copy
+the data again to PRIMARY. When the snapshot is deleted from CloudStack so is the corresponding StorPool snapshot.
+
+Currently snapshots are taken in RAW format.
+
+### Reverting volume to snapshot
+
+It's handled by StorPool
+
+### Migrating volumes to other Storage pools
+
+Tested with storage pools on NFS only.
+
+### Virtual Machine Snapshot/Group Snapshot
+
+StorPool supports consistent snapshots of volumes attached to a virtual machine.
+
+### BW/IOPS limitations
+
+Max IOPS are kept in StorPool's volumes with the help of custom service offerings, by adding IOPS limits to the
+corresponding system disk offering.
+
+CloudStack has no way to specify max BW. Do they want to be able to specify max BW only is sufficient.
diff --git a/plugins/storage/volume/storpool/pom.xml b/plugins/storage/volume/storpool/pom.xml
new file mode 100644
index 0000000000..af8d521133
--- /dev/null
+++ b/plugins/storage/volume/storpool/pom.xml
@@ -0,0 +1,68 @@
+<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
+ license agreements. See the NOTICE file distributed with this work for additional
+ information regarding copyright ownership. The ASF licenses this file to
+ you under the Apache License, Version 2.0 (the "License"); you may not use
+ this file except in compliance with the License. You may obtain a copy of
+ the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
+ by applicable law or agreed to in writing, software distributed under the
+ License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+ OF ANY KIND, either express or implied. See the License for the specific
+ language governing permissions and limitations under the License. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>cloud-plugin-storage-volume-storpool</artifactId>
+ <name>Apache CloudStack Plugin - Storage Volume StorPool provider</name>
+ <parent>
+ <groupId>org.apache.cloudstack</groupId>
+ <artifactId>cloudstack-plugins</artifactId>
+ <version>4.17.0.0-SNAPSHOT</version>
+ <relativePath>../../../pom.xml</relativePath>
+ </parent>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.cloudstack</groupId>
+ <artifactId>cloud-engine-storage-volume</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.cloudstack</groupId>
+ <artifactId>cloud-engine-storage-snapshot</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.cloudstack</groupId>
+ <artifactId>cloud-plugin-hypervisor-kvm</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.cloudstack</groupId>
+ <artifactId>cloud-engine-orchestration</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-collections4</artifactId>
+ <version>4.4</version>
+ </dependency>
+ </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <skipTests>true</skipTests>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>integration-test</phase>
+ <goals>
+ <goal>test</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolBackupSnapshotCommand.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolBackupSnapshotCommand.java
new file mode 100644
index 0000000000..afdd2c270a
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolBackupSnapshotCommand.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.agent.api.storage;
+
+import org.apache.cloudstack.storage.to.SnapshotObjectTO;
+
+import com.cloud.agent.api.to.DataTO;
+
+public class StorPoolBackupSnapshotCommand extends StorPoolCopyCommand<SnapshotObjectTO, SnapshotObjectTO> {
+ public StorPoolBackupSnapshotCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence) {
+ super(srcTO, dstTO, timeout, executeInSequence);
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolBackupTemplateFromSnapshotCommand.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolBackupTemplateFromSnapshotCommand.java
new file mode 100644
index 0000000000..d15357e970
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolBackupTemplateFromSnapshotCommand.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.agent.api.storage;
+
+import org.apache.cloudstack.storage.to.TemplateObjectTO;
+
+import com.cloud.agent.api.to.DataTO;
+
+public class StorPoolBackupTemplateFromSnapshotCommand extends StorPoolCopyCommand<DataTO, TemplateObjectTO> {
+ public StorPoolBackupTemplateFromSnapshotCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence) {
+ super(srcTO, dstTO, timeout, executeInSequence);
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolCopyCommand.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolCopyCommand.java
new file mode 100644
index 0000000000..09fb1ca1dd
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolCopyCommand.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.agent.api.storage;
+
+import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
+
+import com.cloud.agent.api.to.DataTO;
+
+public class StorPoolCopyCommand<S extends DataTO, D extends DataTO> extends StorageSubSystemCommand {
+ private S sourceTO;
+ private D destinationTO;
+ private boolean executeInSequence = false;
+
+ public StorPoolCopyCommand(final DataTO sourceTO, final DataTO destinationTO, final int timeout, final boolean executeInSequence) {
+ super();
+ this.sourceTO = (S)sourceTO;
+ this.destinationTO = (D)destinationTO;
+ setWait(timeout);
+ this.executeInSequence = executeInSequence;
+ }
+
+ public S getSourceTO() {
+ return sourceTO;
+ }
+
+ public D getDestinationTO() {
+ return destinationTO;
+ }
+
+ public int getWaitInMillSeconds() {
+ return getWait() * 1000;
+ }
+
+ @Override
+ public boolean executeInSequence() {
+ return executeInSequence;
+ }
+
+ @Override
+ public void setExecuteInSequence(final boolean inSeq) {
+ executeInSequence = inSeq;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolCopyVolumeToSecondaryCommand.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolCopyVolumeToSecondaryCommand.java
new file mode 100644
index 0000000000..9bf8b2eefd
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolCopyVolumeToSecondaryCommand.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.agent.api.storage;
+
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+
+import com.cloud.agent.api.to.DataTO;
+
+public class StorPoolCopyVolumeToSecondaryCommand extends StorPoolCopyCommand<VolumeObjectTO, VolumeObjectTO> {
+ public StorPoolCopyVolumeToSecondaryCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence) {
+ super(srcTO, dstTO, timeout, executeInSequence);
+ }
+}
diff --git a/core/src/main/java/org/apache/cloudstack/storage/command/CopyCmdAnswer.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolDownloadTemplateCommand.java
similarity index 60%
copy from core/src/main/java/org/apache/cloudstack/storage/command/CopyCmdAnswer.java
copy to plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolDownloadTemplateCommand.java
index 9b581decd6..6dc0db2071 100644
--- a/core/src/main/java/org/apache/cloudstack/storage/command/CopyCmdAnswer.java
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolDownloadTemplateCommand.java
@@ -17,24 +17,21 @@
// under the License.
//
-package org.apache.cloudstack.storage.command;
+package com.cloud.agent.api.storage;
-import com.cloud.agent.api.Answer;
import com.cloud.agent.api.to.DataTO;
-public class CopyCmdAnswer extends Answer {
- private DataTO newData;
-
- public CopyCmdAnswer(DataTO newData) {
- super(null);
- this.newData = newData;
+public class StorPoolDownloadTemplateCommand extends StorPoolCopyCommand<DataTO, DataTO> {
+ protected String objectType;
+ public StorPoolDownloadTemplateCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence, String objectType) {
+ super(srcTO, dstTO, timeout, executeInSequence);
+ this.objectType = objectType;
}
- public DataTO getNewData() {
- return this.newData;
+ public String getObjectType() {
+ return objectType;
}
-
- public CopyCmdAnswer(String errMsg) {
- super(null, false, errMsg);
+ public void setObjectType(String objectType) {
+ this.objectType = objectType;
}
}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolDownloadVolumeCommand.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolDownloadVolumeCommand.java
new file mode 100644
index 0000000000..d670ff36fc
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolDownloadVolumeCommand.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.agent.api.storage;
+
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+
+import com.cloud.agent.api.to.DataTO;
+
+public class StorPoolDownloadVolumeCommand extends StorPoolCopyCommand<VolumeObjectTO, VolumeObjectTO> {
+ public StorPoolDownloadVolumeCommand(final DataTO srcTO, final DataTO dstTO, final int timeout, final boolean executeInSequence) {
+ super(srcTO, dstTO, timeout, executeInSequence);
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolMigrateWithVolumesCommand.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolMigrateWithVolumesCommand.java
new file mode 100644
index 0000000000..d002267a48
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolMigrateWithVolumesCommand.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.agent.api.storage;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.cloud.agent.api.MigrateCommand;
+import com.cloud.agent.api.to.VirtualMachineTO;
+
+public class StorPoolMigrateWithVolumesCommand extends MigrateCommand {
+ private List<MigrateDiskInfo> migrateDiskInfoList = new ArrayList<>();
+
+ public StorPoolMigrateWithVolumesCommand() {
+ super();
+ }
+
+ public StorPoolMigrateWithVolumesCommand(String vmName, String destIp, boolean isWindows, VirtualMachineTO vmTO,
+ boolean executeInSequence) {
+ super(vmName, destIp, isWindows, vmTO, executeInSequence);
+ }
+
+ public List<MigrateDiskInfo> getMigrateDiskInfoList() {
+ return migrateDiskInfoList;
+ }
+
+ public void setMigrateDiskInfoList(List<MigrateDiskInfo> migrateDiskInfoList) {
+ this.migrateDiskInfoList = migrateDiskInfoList;
+ }
+
+ public boolean isMigrateStorageManaged() {
+ return true;
+ }
+
+ public boolean isMigrateNonSharedInc() {
+ return false;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolAnswer.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolAnswer.java
new file mode 100644
index 0000000000..c27f3f8138
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolAnswer.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.agent.api.storage;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.ModifyStoragePoolAnswer;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.storage.template.TemplateProp;
+
+public class StorPoolModifyStoragePoolAnswer extends Answer{
+ private StoragePoolInfo poolInfo;
+ private Map<String, TemplateProp> templateInfo;
+ private String localDatastoreName;
+ private String poolType;
+ private List<ModifyStoragePoolAnswer> datastoreClusterChildren = new ArrayList<>();
+ private String clusterId;
+
+ public StorPoolModifyStoragePoolAnswer(StorPoolModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map<String, TemplateProp> tInfo, String clusterId) {
+ super(cmd);
+ result = true;
+ poolInfo = new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes);
+ templateInfo = tInfo;
+ this.clusterId = clusterId;
+ }
+
+ public StorPoolModifyStoragePoolAnswer(String errMsg) {
+ super(null, false, errMsg);
+ }
+
+ public void setPoolInfo(StoragePoolInfo poolInfo) {
+ this.poolInfo = poolInfo;
+ }
+
+ public StoragePoolInfo getPoolInfo() {
+ return poolInfo;
+ }
+
+ public void setTemplateInfo(Map<String, TemplateProp> templateInfo) {
+ this.templateInfo = templateInfo;
+ }
+
+ public Map<String, TemplateProp> getTemplateInfo() {
+ return templateInfo;
+ }
+
+ public void setLocalDatastoreName(String localDatastoreName) {
+ this.localDatastoreName = localDatastoreName;
+ }
+
+ public String getLocalDatastoreName() {
+ return localDatastoreName;
+ }
+
+ public String getPoolType() {
+ return poolType;
+ }
+
+ public void setPoolType(String poolType) {
+ this.poolType = poolType;
+ }
+
+ public List<ModifyStoragePoolAnswer> getDatastoreClusterChildren() {
+ return datastoreClusterChildren;
+ }
+
+ public void setDatastoreClusterChildren(List<ModifyStoragePoolAnswer> datastoreClusterChildren) {
+ this.datastoreClusterChildren = datastoreClusterChildren;
+ }
+
+ public String getClusterId() {
+ return clusterId;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolCommand.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolCommand.java
new file mode 100644
index 0000000000..a72a4620de
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolCommand.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.agent.api.storage;
+
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.storage.StoragePool;
+
+public class StorPoolModifyStoragePoolCommand extends ModifyStoragePoolCommand {
+ private String volumeName;
+
+ public StorPoolModifyStoragePoolCommand(boolean add, StoragePool pool, String volumeName) {
+ super(add, pool);
+ this.volumeName = volumeName;
+ }
+
+ public String getVolumeName() {
+ return volumeName;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolResizeVolumeCommand.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolResizeVolumeCommand.java
new file mode 100644
index 0000000000..10b40bfeb0
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolResizeVolumeCommand.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.agent.api.storage;
+
+import com.cloud.agent.api.to.StorageFilerTO;
+
+
+public class StorPoolResizeVolumeCommand extends ResizeVolumeCommand {
+ protected boolean isAttached;
+ protected StorPoolResizeVolumeCommand() {
+ super();
+ }
+
+ public StorPoolResizeVolumeCommand(String path, StorageFilerTO pool, Long currentSize, Long newSize, boolean shrinkOk, String vmInstance, boolean isAttached) {
+ super(path, pool, currentSize, newSize, shrinkOk, vmInstance);
+ this.isAttached = isAttached;
+ }
+
+ public boolean isAttached() {
+ return isAttached;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java
new file mode 100644
index 0000000000..f83a4292e2
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java
@@ -0,0 +1,109 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
+
+import java.io.File;
+
+import org.apache.cloudstack.storage.command.CopyCmdAnswer;
+import org.apache.cloudstack.storage.to.SnapshotObjectTO;
+import org.apache.cloudstack.utils.qemu.QemuImg;
+import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
+import org.apache.cloudstack.utils.qemu.QemuImgFile;
+import org.apache.commons.io.FileUtils;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.storage.StorPoolBackupSnapshotCommand;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.NfsTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+
+@ResourceWrapper(handles = StorPoolBackupSnapshotCommand.class)
+public final class StorPoolBackupSnapshotCommandWrapper extends CommandWrapper<StorPoolBackupSnapshotCommand, CopyCmdAnswer, LibvirtComputingResource> {
+
+ private static final Logger s_logger = Logger.getLogger(StorPoolBackupSnapshotCommandWrapper.class);
+
+ @Override
+ public CopyCmdAnswer execute(final StorPoolBackupSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
+ String srcPath = null;
+ KVMStoragePool secondaryPool = null;
+
+ try {
+ final SnapshotObjectTO src = cmd.getSourceTO();
+ final SnapshotObjectTO dst = cmd.getDestinationTO();
+ final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
+
+ SP_LOG("StorpoolBackupSnapshotCommandWrapper.execute: src=" + src.getPath() + "dst=" + dst.getPath());
+ StorPoolStorageAdaptor.attachOrDetachVolume("attach", "snapshot", src.getPath());
+ srcPath = src.getPath();
+
+ final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
+
+ final DataStoreTO dstDataStore = dst.getDataStore();
+ if (!(dstDataStore instanceof NfsTO)) {
+ return new CopyCmdAnswer("Backup Storpool snapshot: Only NFS secondary supported at present!");
+ }
+
+ secondaryPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl());
+
+ final String dstDir = secondaryPool.getLocalPath() + File.separator + dst.getPath();
+ FileUtils.forceMkdir(new File(dstDir));
+
+ final String dstPath = dstDir + File.separator + dst.getName();
+ final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.QCOW2);
+
+ final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
+ qemu.convert(srcFile, dstFile);
+
+ SP_LOG("StorpoolBackupSnapshotCommandWrapper srcFileFormat=%s, dstFileFormat=%s", srcFile.getFormat(), dstFile.getFormat());
+ final File snapFile = new File(dstPath);
+ final long size = snapFile.exists() ? snapFile.length() : 0;
+
+ final SnapshotObjectTO snapshot = new SnapshotObjectTO();
+ snapshot.setPath(dst.getPath() + File.separator + dst.getName());
+ snapshot.setPhysicalSize(size);
+
+ return new CopyCmdAnswer(snapshot);
+ } catch (final Exception e) {
+ final String error = String.format("Failed to backup snapshot with id [%s] with a pool %s, due to %s", cmd.getSourceTO().getId(), cmd.getSourceTO().getDataStore().getUuid(), e.getMessage());
+ SP_LOG(error);
+ s_logger.debug(error);
+ return new CopyCmdAnswer(cmd, e);
+ } finally {
+ if (srcPath != null) {
+ StorPoolStorageAdaptor.attachOrDetachVolume("detach", "snapshot", srcPath);
+ }
+
+ if (secondaryPool != null) {
+ try {
+ secondaryPool.delete();
+ } catch (final Exception e) {
+ s_logger.debug("Failed to delete secondary storage", e);
+ }
+ }
+ }
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupTemplateFromSnapshotCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupTemplateFromSnapshotCommandWrapper.java
new file mode 100644
index 0000000000..518cbb8d5e
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupTemplateFromSnapshotCommandWrapper.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.cloudstack.storage.command.CopyCmdAnswer;
+import org.apache.cloudstack.storage.to.SnapshotObjectTO;
+import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.cloudstack.utils.qemu.QemuImg;
+import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
+import org.apache.cloudstack.utils.qemu.QemuImgFile;
+import org.apache.commons.io.FileUtils;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.storage.StorPoolBackupTemplateFromSnapshotCommand;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.NfsTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.storage.StorageLayer;
+import com.cloud.storage.template.Processor;
+import com.cloud.storage.template.Processor.FormatInfo;
+import com.cloud.storage.template.QCOW2Processor;
+import com.cloud.storage.template.TemplateLocation;
+import com.cloud.storage.template.TemplateProp;
+
+@ResourceWrapper(handles = StorPoolBackupTemplateFromSnapshotCommand.class)
+public class StorPoolBackupTemplateFromSnapshotCommandWrapper extends CommandWrapper<StorPoolBackupTemplateFromSnapshotCommand, CopyCmdAnswer, LibvirtComputingResource> {
+
+ private static final Logger s_logger = Logger.getLogger(StorPoolBackupTemplateFromSnapshotCommandWrapper.class);
+
+ @Override
+ public CopyCmdAnswer execute(final StorPoolBackupTemplateFromSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
+ String srcPath = null;
+ KVMStoragePool secondaryPool = null;
+ String objectType = cmd.getSourceTO().getObjectType().toString().toLowerCase();
+
+ try {
+ final DataTO src = cmd.getSourceTO();
+ final TemplateObjectTO dst = cmd.getDestinationTO();
+ String name = null;
+ String volumeFormatExtension = null;
+
+ if (src instanceof SnapshotObjectTO) {
+ name = ((SnapshotObjectTO) src).getName();
+ volumeFormatExtension = ((SnapshotObjectTO) src).getVolume().getFormat().getFileExtension();
+ } else if (src instanceof VolumeObjectTO) {
+ name = ((VolumeObjectTO) src).getName();
+ volumeFormatExtension = ((VolumeObjectTO) src).getFormat().getFileExtension();
+ } else {
+ return new CopyCmdAnswer("Backup of a template is not supported for data object: " + src.getObjectType() );
+ }
+ final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
+ StorageLayer storage = libvirtComputingResource.getStorage();
+ Processor processor = new QCOW2Processor();
+ String _tmpltpp = "template.properties";
+
+ SP_LOG("StorpoolBackupTemplateFromSnapshotCommandWrapper.execute: src=" + src.getPath() + "dst=" + dst.getPath());
+ StorPoolStorageAdaptor.attachOrDetachVolume("attach", objectType, src.getPath());
+ srcPath = src.getPath();
+
+ final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
+
+ final DataStoreTO dstDataStore = dst.getDataStore();
+ if (!(dstDataStore instanceof NfsTO)) {
+ return new CopyCmdAnswer("Backup Storpool snapshot: Only NFS secondary supported at present!");
+ }
+
+ secondaryPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl());
+
+ final String dstDir = secondaryPool.getLocalPath() + File.separator + dst.getPath();
+ FileUtils.forceMkdir(new File(dstDir));
+
+ String nameWithExtension = name + "." + volumeFormatExtension;
+
+ final String dstPath = dstDir + File.separator + nameWithExtension;
+ final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.QCOW2);
+
+ final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
+ qemu.convert(srcFile, dstFile);
+
+ storage.create(dstDir, _tmpltpp);
+ String metaFileName = dstDir + File.separator + _tmpltpp;
+ File metaFile = new File(metaFileName);
+
+ try ( FileWriter writer = new FileWriter(metaFile);
+ BufferedWriter bufferWriter = new BufferedWriter(writer);) {
+ bufferWriter.write("uniquename=" + dst.getName());
+ bufferWriter.write("\n");
+ bufferWriter.write("filename=" + nameWithExtension);
+ }
+ Map<String, Object> params = new HashMap<String, Object>();
+ params.put(StorageLayer.InstanceConfigKey, storage);
+
+ processor.configure("template processor", params);
+
+ FormatInfo info = processor.process(dstDir, null, name);
+ TemplateLocation loc = new TemplateLocation(storage, dstDir);
+ loc.create(1, true, dst.getName());
+ loc.addFormat(info);
+ loc.save();
+
+ TemplateProp prop = loc.getTemplateInfo();
+ final TemplateObjectTO template = new TemplateObjectTO();
+ template.setPath(dst.getPath() + File.separator + nameWithExtension);
+ template.setFormat(ImageFormat.QCOW2);
+ template.setSize(prop.getSize());
+ template.setPhysicalSize(prop.getPhysicalSize());
+
+ return new CopyCmdAnswer(template);
+ } catch (final Exception e) {
+ final String error = "failed to backup snapshot: " + e.getMessage();
+ SP_LOG(error);
+ s_logger.debug(error);
+ return new CopyCmdAnswer(cmd, e);
+ } finally {
+ if (srcPath != null) {
+ StorPoolStorageAdaptor.attachOrDetachVolume("detach", objectType, srcPath);
+ }
+
+ if (secondaryPool != null) {
+ try {
+ secondaryPool.delete();
+ } catch (final Exception e) {
+ s_logger.debug("Failed to delete secondary storage", e);
+ }
+ }
+ }
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolCopyVolumeToSecondaryCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolCopyVolumeToSecondaryCommandWrapper.java
new file mode 100644
index 0000000000..29e8979bd8
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolCopyVolumeToSecondaryCommandWrapper.java
@@ -0,0 +1,124 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
+
+import java.io.File;
+
+import org.apache.cloudstack.storage.command.CopyCmdAnswer;
+import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.cloudstack.utils.qemu.QemuImg;
+import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
+import org.apache.cloudstack.utils.qemu.QemuImgFile;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.storage.StorPoolCopyVolumeToSecondaryCommand;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.NfsTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+
+@ResourceWrapper(handles = StorPoolCopyVolumeToSecondaryCommand.class)
+public final class StorPoolCopyVolumeToSecondaryCommandWrapper extends CommandWrapper<StorPoolCopyVolumeToSecondaryCommand, CopyCmdAnswer, LibvirtComputingResource> {
+
+ private static final Logger s_logger = Logger.getLogger(StorPoolCopyVolumeToSecondaryCommandWrapper.class);
+
+ @Override
+ public CopyCmdAnswer execute(final StorPoolCopyVolumeToSecondaryCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
+ String srcPath = null;
+ KVMStoragePool secondaryPool = null;
+
+ try {
+ final VolumeObjectTO src = cmd.getSourceTO();
+ final VolumeObjectTO dst = cmd.getDestinationTO();
+ final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
+ final String destVolumePath = dst.getPath();
+
+ SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: src=" + src.getPath() + "dst=" + dst.getPath());
+
+ StorPoolStorageAdaptor.attachOrDetachVolume("attach", "snapshot", src.getPath());
+ srcPath = src.getPath();
+
+ final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
+
+ final DataStoreTO dstDataStore = dst.getDataStore();
+
+ final KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr();
+ SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: KVMStoragePoolManager " + poolMgr);
+ KVMStoragePool destPool;
+ if( dstDataStore instanceof NfsTO ) {
+ destPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl());
+ destPool.createFolder(destVolumePath);
+ storagePoolMgr.deleteStoragePool(destPool.getType(), destPool.getUuid());
+ destPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl() + File.separator + destVolumePath);
+ SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: Nfs destPool=%s ",destPool);
+ } else if( dstDataStore instanceof PrimaryDataStoreTO ) {
+ PrimaryDataStoreTO primaryDst = (PrimaryDataStoreTO)dstDataStore;
+ destPool = poolMgr.getStoragePool(primaryDst.getPoolType(), dstDataStore.getUuid());
+ SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: not Nfs destPool=%s " ,destPool);
+ } else {
+ return new CopyCmdAnswer("Don't know how to copy to " + dstDataStore.getClass().getName() + ", " + dst.getPath() );
+ }
+ SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: dstName=%s, dstProvisioningType=%s, srcSize=%s, dstUUID=%s, srcUUID=%s " ,dst.getName(), dst.getProvisioningType(), src.getSize(),dst.getUuid(), src.getUuid());
+
+ KVMPhysicalDisk newDisk = destPool.createPhysicalDisk(dst.getUuid(), dst.getProvisioningType(), src.getSize());
+ SP_LOG("NewDisk path=%s, uuid=%s ", newDisk.getPath(), dst.getUuid());
+ String destPath = newDisk.getPath();
+ newDisk.setPath(dst.getUuid());
+
+ PhysicalDiskFormat destFormat = newDisk.getFormat();
+ SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: KVMPhysicalDisk name=%s, format=%s, path=%s, destinationPath=%s " , newDisk.getName(), newDisk.getFormat(), newDisk.getPath(), destPath);
+ QemuImgFile destFile = new QemuImgFile(destPath, destFormat);
+ QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
+ qemu.convert(srcFile, destFile);
+
+ final File file = new File(destPath);
+ final long size = file.exists() ? file.length() : 0;
+ dst.setPath(destVolumePath + File.separator + dst.getUuid());
+ dst.setSize(size);
+
+ return new CopyCmdAnswer(dst);
+ } catch (final Exception e) {
+ final String error = "Failed to copy volume to secondary storage: " + e.getMessage();
+ s_logger.debug(error);
+ return new CopyCmdAnswer(error);
+ } finally {
+ if (srcPath != null) {
+ StorPoolStorageAdaptor.attachOrDetachVolume("detach", "snapshot", srcPath);
+ }
+
+ if (secondaryPool != null) {
+ try {
+ SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: secondaryPool=%s " , secondaryPool);
+ secondaryPool.delete();
+ } catch (final Exception e) {
+ s_logger.debug("Failed to delete secondary storage", e);
+ }
+ }
+ }
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java
new file mode 100644
index 0000000000..07b08a1da2
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java
@@ -0,0 +1,134 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
+
+import java.io.File;
+import java.util.List;
+
+import org.apache.cloudstack.storage.command.CopyCmdAnswer;
+import org.apache.cloudstack.utils.qemu.QemuImg;
+import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
+import org.apache.cloudstack.utils.qemu.QemuImgFile;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.storage.StorPoolDownloadTemplateCommand;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.NfsTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+
+@ResourceWrapper(handles = StorPoolDownloadTemplateCommand.class)
+public final class StorPoolDownloadTemplateCommandWrapper extends CommandWrapper<StorPoolDownloadTemplateCommand, CopyCmdAnswer, LibvirtComputingResource> {
+
+ private static final Logger s_logger = Logger.getLogger(StorPoolDownloadTemplateCommandWrapper.class);
+
+ @Override
+ public CopyCmdAnswer execute(final StorPoolDownloadTemplateCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
+ String dstPath = null;
+ KVMStoragePool secondaryPool = null;
+ DataTO src = cmd.getSourceTO();
+ DataTO dst = cmd.getDestinationTO();
+
+ try {
+ final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
+ SP_LOG("StorpoolDownloadTemplateCommandWrapper.execute: src=" + src.getPath() + " dst=" + dst.getPath());
+
+ final DataStoreTO srcDataStore = src.getDataStore();
+ if (!(srcDataStore instanceof NfsTO)) {
+ return new CopyCmdAnswer("Download template to Storpool: Only NFS secondary supported at present!");
+ }
+
+ final NfsTO nfsImageStore = (NfsTO)srcDataStore;
+ final String tmplturl = nfsImageStore.getUrl() + File.separator + src.getPath();
+ final int index = tmplturl.lastIndexOf("/");
+ final String mountpoint = tmplturl.substring(0, index);
+ String tmpltname = null;
+ if (index < tmplturl.length() - 1) {
+ tmpltname = tmplturl.substring(index + 1);
+ }
+
+ secondaryPool = storagePoolMgr.getStoragePoolByURI(mountpoint);
+
+ KVMPhysicalDisk srcDisk = null;
+
+ if (tmpltname == null) {
+ secondaryPool.refresh();
+ final List<KVMPhysicalDisk> disks = secondaryPool.listPhysicalDisks();
+ if (CollectionUtils.isEmpty(disks)) {
+ SP_LOG("Failed to get volumes from pool: " + secondaryPool.getUuid());
+ return new CopyCmdAnswer("Failed to get volumes from pool: " + secondaryPool.getUuid());
+ }
+ for (final KVMPhysicalDisk disk : disks) {
+ if (disk.getName().endsWith("qcow2")) {
+ srcDisk = disk;
+ break;
+ }
+ }
+ } else {
+ srcDisk = secondaryPool.getPhysicalDisk(tmpltname);
+ }
+
+ if (srcDisk == null) {
+ SP_LOG("Failed to get template from pool: " + secondaryPool.getUuid());
+ return new CopyCmdAnswer("Failed to get template from pool: " + secondaryPool.getUuid());
+ }
+
+ SP_LOG("got src path: " + srcDisk.getPath() + " srcSize " + srcDisk.getVirtualSize());
+
+ final QemuImgFile srcFile = new QemuImgFile(srcDisk.getPath(), srcDisk.getFormat());
+
+ final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
+ StorPoolStorageAdaptor.resize( Long.toString(srcDisk.getVirtualSize()), dst.getPath());
+
+ dstPath = dst.getPath();
+ StorPoolStorageAdaptor.attachOrDetachVolume("attach", cmd.getObjectType(), dstPath);
+
+ final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.RAW);
+
+ qemu.convert(srcFile, dstFile);
+ return new CopyCmdAnswer(dst);
+ } catch (final Exception e) {
+ final String error = "Failed to copy template to primary: " + e.getMessage();
+ s_logger.debug(error);
+ return new CopyCmdAnswer(cmd, e);
+ } finally {
+ if (dstPath != null) {
+ StorPoolStorageAdaptor.attachOrDetachVolume("detach", cmd.getObjectType(), dstPath);
+ }
+
+ if (secondaryPool != null) {
+ try {
+ secondaryPool.delete();
+ } catch (final Exception e) {
+ s_logger.debug("Failed to delete secondary storage", e);
+ }
+ }
+ }
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadVolumeCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadVolumeCommandWrapper.java
new file mode 100644
index 0000000000..d1a58a4aeb
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadVolumeCommandWrapper.java
@@ -0,0 +1,162 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
+
+import java.util.List;
+
+import org.apache.cloudstack.storage.command.CopyCmdAnswer;
+import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.cloudstack.utils.qemu.QemuImg;
+import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
+import org.apache.cloudstack.utils.qemu.QemuImgFile;
+//import java.io.File;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.storage.StorPoolDownloadVolumeCommand;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.NfsTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.storage.Storage.StoragePoolType;
+
+@ResourceWrapper(handles = StorPoolDownloadVolumeCommand.class)
+public final class StorPoolDownloadVolumeCommandWrapper extends CommandWrapper<StorPoolDownloadVolumeCommand, CopyCmdAnswer, LibvirtComputingResource> {
+
+ private static final Logger s_logger = Logger.getLogger(StorPoolDownloadVolumeCommandWrapper.class);
+
+ @Override
+ public CopyCmdAnswer execute(final StorPoolDownloadVolumeCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
+ String dstPath = null;
+ KVMStoragePool secondaryPool = null;
+
+ try {
+ final VolumeObjectTO src = cmd.getSourceTO();
+ final VolumeObjectTO dst = cmd.getDestinationTO();
+ final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
+ SP_LOG("StorpoolDownloadVolumeCommandWrapper.execute: src=" + src.getPath() + " srcName=" + src.getName() + " dst=" + dst.getPath());
+
+ final DataStoreTO srcDataStore = src.getDataStore();
+ KVMPhysicalDisk srcDisk = null;
+
+ if(srcDataStore instanceof NfsTO) {
+ SP_LOG("StorpoolDownloadVolumeCommandWrapper.execute: srcIsNfsTO");
+
+ final String tmplturl = srcDataStore.getUrl() + srcDataStore.getPathSeparator() + src.getPath();
+ final int index = tmplturl.lastIndexOf("/");
+ final String mountpoint = tmplturl.substring(0, index);
+ String tmpltname = null;
+ if (index < tmplturl.length() - 1) {
+ tmpltname = tmplturl.substring(index + 1);
+ }
+
+ secondaryPool = storagePoolMgr.getStoragePoolByURI(mountpoint);
+
+ if (tmpltname == null) {
+ secondaryPool.refresh();
+ final List<KVMPhysicalDisk> disks = secondaryPool.listPhysicalDisks();
+ if (disks == null || disks.isEmpty()) {
+ SP_LOG("Failed to get volumes from pool: " + secondaryPool.getUuid());
+ return new CopyCmdAnswer("Failed to get volumes from pool: " + secondaryPool.getUuid());
+ }
+ for (final KVMPhysicalDisk disk : disks) {
+ if (disk.getName().endsWith("qcow2")) {
+ srcDisk = disk;
+ break;
+ }
+ }
+ } else {
+ srcDisk = secondaryPool.getPhysicalDisk(tmpltname);
+ }
+ } else if(srcDataStore instanceof PrimaryDataStoreTO) {
+ SP_LOG("SrcDisk is Primary Storage");
+ PrimaryDataStoreTO primarySrc = (PrimaryDataStoreTO)srcDataStore;
+ SP_LOG("StorpoolDownloadVolumeCommandWrapper.execute primarySrcPoolType=%s, uuid-%s ", primarySrc.getPoolType(), primarySrc.getUuid());
+ final KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr();
+ srcDisk = poolMgr.getPhysicalDisk(primarySrc.getPoolType(), srcDataStore.getUuid(), src.getPath());
+ SP_LOG("PhysicalDisk: disk=%s", srcDisk );
+ } else {
+ return new CopyCmdAnswer("Don't know how to copy from " + srcDataStore.getClass().getName() + ", " + src.getPath() );
+ }
+
+ if (srcDisk == null) {
+ SP_LOG("Failed to get src volume");
+ return new CopyCmdAnswer("Failed to get src volume");
+ }
+
+ SP_LOG("got src path: " + srcDisk.getPath() + " srcSize " + srcDisk.getVirtualSize());
+
+ String srcPath = null;
+ boolean isRBDPool = srcDisk.getPool().getType() == StoragePoolType.RBD;
+ if (isRBDPool) {
+ KVMStoragePool srcPool = srcDisk.getPool();
+ String rbdDestPath = srcPool.getSourceDir() + "/" + srcDisk.getName();
+ srcPath = KVMPhysicalDisk.RBDStringBuilder(srcPool.getSourceHost(),
+ srcPool.getSourcePort(),
+ srcPool.getAuthUserName(),
+ srcPool.getAuthSecret(),
+ rbdDestPath);
+ } else {
+ srcPath = srcDisk.getPath();
+ }
+ final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
+
+ final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
+ StorPoolStorageAdaptor.resize( Long.toString(srcDisk.getVirtualSize()), dst.getPath());
+
+ dstPath = dst.getPath();
+ StorPoolStorageAdaptor.attachOrDetachVolume("attach", "volume", dstPath);
+
+ final QemuImgFile dstFile = new QemuImgFile(dstPath, srcFile.getFormat());
+ SP_LOG("SRC format=%s, DST format=%s",srcFile.getFormat(), dstFile.getFormat());
+ qemu.convert(srcFile, dstFile);
+ SP_LOG("StorpoolDownloadVolumeCommandWrapper VolumeObjectTO format=%s, hypervisor=%s", dst.getFormat(), dst.getHypervisorType());
+ if (isRBDPool) {
+ dst.setFormat(ImageFormat.QCOW2);
+ }
+ return new CopyCmdAnswer(dst);
+ } catch (final Exception e) {
+ final String error = "Failed to copy volume to primary: " + e.getMessage();
+ SP_LOG(error);
+ s_logger.debug(error);
+ return new CopyCmdAnswer(cmd, e);
+ } finally {
+ if (dstPath != null) {
+ StorPoolStorageAdaptor.attachOrDetachVolume("detach", "volume", dstPath);
+ }
+
+ if (secondaryPool != null) {
+ try {
+ secondaryPool.delete();
+ } catch (final Exception e) {
+ s_logger.debug("Failed to delete secondary storage", e);
+ }
+ }
+ }
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java
new file mode 100644
index 0000000000..b357970833
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.storage.StorPoolModifyStoragePoolAnswer;
+import com.cloud.agent.api.storage.StorPoolModifyStoragePoolCommand;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import com.cloud.storage.template.TemplateProp;
+import com.cloud.utils.script.OutputInterpreter;
+import com.cloud.utils.script.Script;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonParser;
+
+@ResourceWrapper(handles = StorPoolModifyStoragePoolCommand.class)
+public final class StorPoolModifyStorageCommandWrapper extends CommandWrapper<StorPoolModifyStoragePoolCommand, Answer, LibvirtComputingResource> {
+ private static final Logger log = Logger.getLogger(StorPoolModifyStorageCommandWrapper.class);
+
+ @Override
+ public Answer execute(final StorPoolModifyStoragePoolCommand command, final LibvirtComputingResource libvirtComputingResource) {
+ String clusterId = getSpClusterId();
+ if (clusterId == null) {
+ log.debug(String.format("Could not get StorPool cluster id for a command $s", command.getClass()));
+ return new Answer(command, false, "spNotFound");
+ }
+ try {
+ String result = attachOrDetachVolume("attach", "volume", command.getVolumeName());
+ if (result != null) {
+ return new Answer(command, false, result);
+ }
+ final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
+ final KVMStoragePool storagepool =
+ storagePoolMgr.createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool()
+ .getUserInfo(), command.getPool().getType());
+ if (storagepool == null) {
+ log.debug(String.format("Did not find a storage pool [%s]", command.getPool().getId()));
+ return new Answer(command, false, String.format("Failed to create storage pool [%s]", command.getPool().getId()));
+ }
+
+ final Map<String, TemplateProp> tInfo = new HashMap<String, TemplateProp>();
+ final StorPoolModifyStoragePoolAnswer answer = new StorPoolModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo, clusterId);
+
+ return answer;
+ } catch (Exception e) {
+ log.debug(String.format("Could not modify storage due to %s", e.getMessage()));
+ return new Answer(command, e);
+ }
+ }
+
+ private String getSpClusterId() {
+ Script sc = new Script("storpool_confget", 0, log);
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+
+ String SP_CLUSTER_ID = null;
+ final String err = sc.execute(parser);
+ if (err != null) {
+ final String errMsg = String.format("Could not execute storpool_confget. Error: %s", err);
+ log.warn(errMsg);
+ StorPoolStorageAdaptor.SP_LOG("Could not execute storpool_confget. Error: %s", err);
+ return SP_CLUSTER_ID;
+ }
+
+ for (String line: parser.getLines().split("\n")) {
+ String[] toks = line.split("=");
+ if( toks.length != 2 ) {
+ continue;
+ }
+ if (toks[0].equals("SP_CLUSTER_ID")) {
+ SP_CLUSTER_ID = toks[1];
+ return SP_CLUSTER_ID;
+ }
+ }
+ return SP_CLUSTER_ID;
+ }
+
+ public String attachOrDetachVolume(String command, String type, String volumeUuid) {
+ final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(volumeUuid, true);
+ if (name == null) {
+ return null;
+ }
+
+ String err = null;
+ Script sc = new Script("storpool", 300000, log);
+ sc.add("-M");
+ sc.add("-j");
+ sc.add(command);
+ sc.add(type, name);
+ sc.add("here");
+ sc.add("onRemoteAttached");
+ sc.add("export");
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+
+ String res = sc.execute(parser);
+
+ if (res != null) {
+ if (!res.equals(Script.ERR_TIMEOUT)) {
+ try {
+ Set<Entry<String, JsonElement>> obj2 = new JsonParser().parse(res).getAsJsonObject().entrySet();
+ for (Entry<String, JsonElement> entry : obj2) {
+ if (entry.getKey().equals("error")) {
+ res = entry.getValue().getAsJsonObject().get("name").getAsString();
+ }
+ }
+ } catch (Exception e) {
+ }
+ }
+
+ err = String.format("Unable to %s volume %s. Error: %s", command, name, res);
+ }
+
+ if (err != null) {
+ log.warn(err);
+ }
+ return res;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolResizeVolumeCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolResizeVolumeCommandWrapper.java
new file mode 100644
index 0000000000..9f9277768e
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolResizeVolumeCommandWrapper.java
@@ -0,0 +1,98 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.storage.ResizeVolumeAnswer;
+import com.cloud.agent.api.storage.StorPoolResizeVolumeCommand;
+import com.cloud.agent.api.to.StorageFilerTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import com.cloud.utils.script.Script;
+
+
+@ResourceWrapper(handles = StorPoolResizeVolumeCommand.class)
+public final class StorPoolResizeVolumeCommandWrapper extends CommandWrapper<StorPoolResizeVolumeCommand, ResizeVolumeAnswer, LibvirtComputingResource> {
+
+ private static final Logger s_logger = Logger.getLogger(StorPoolResizeVolumeCommandWrapper.class);
+
+ @Override
+ public ResizeVolumeAnswer execute(final StorPoolResizeVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
+ final String volid = command.getPath();
+ final long newSize = command.getNewSize();
+ final long currentSize = command.getCurrentSize();
+ final String vmInstanceName = command.getInstanceName();
+ final boolean shrinkOk = command.getShrinkOk();
+ final StorageFilerTO spool = command.getPool();
+ String volPath = null;
+
+ if (currentSize == newSize) {
+ // nothing to do
+ s_logger.info("No need to resize volume: current size " + currentSize + " is same as new size " + newSize);
+ return new ResizeVolumeAnswer(command, true, "success", currentSize);
+ }
+
+ try {
+ final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
+ KVMStoragePool pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid());
+
+ final KVMPhysicalDisk vol = pool.getPhysicalDisk(volid);
+ final String path = vol.getPath();
+ volPath = path;
+ if (!command.isAttached()) {
+ StorPoolStorageAdaptor.attachOrDetachVolume("attach", "volume", path);
+ }
+ final Script resizecmd = new Script(libvirtComputingResource.getResizeVolumePath(), libvirtComputingResource.getCmdsTimeout(), s_logger);
+ resizecmd.add("-s", String.valueOf(newSize));
+ resizecmd.add("-c", String.valueOf(currentSize));
+ resizecmd.add("-p", path);
+ resizecmd.add("-t", "NOTIFYONLY");
+ resizecmd.add("-r", String.valueOf(shrinkOk));
+ resizecmd.add("-v", vmInstanceName);
+ final String result = resizecmd.execute();
+
+ if (result != null) {
+ return new ResizeVolumeAnswer(command, true, "Resize succeeded, but need reboot to notify guest");
+ }
+
+ /* fetch new size as seen from libvirt, don't want to assume anything */
+ pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid());
+ pool.refresh();
+
+ final long finalSize = pool.getPhysicalDisk(volid).getVirtualSize();
+ s_logger.debug("after resize, size reports as " + finalSize + ", requested " + newSize);
+ return new ResizeVolumeAnswer(command, true, "success", finalSize);
+ } catch (final Exception e) {
+ final String error = "Failed to resize volume: " + e.getMessage();
+ s_logger.debug(error);
+ return new ResizeVolumeAnswer(command, false, error);
+ } finally {
+ if (!command.isAttached() && volPath != null) {
+ StorPoolStorageAdaptor.attachOrDetachVolume("detach", "volume", volPath);
+ }
+ }
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStorageAdaptor.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStorageAdaptor.java
new file mode 100644
index 0000000000..0373e3bdf8
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStorageAdaptor.java
@@ -0,0 +1,388 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.hypervisor.kvm.storage;
+
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.PrintWriter;
+import java.text.SimpleDateFormat;
+import java.util.Calendar;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.to.DiskTO;
+import com.cloud.storage.Storage;
+import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.storage.Storage.ProvisioningType;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.script.OutputInterpreter;
+import com.cloud.utils.script.Script;
+
+@StorageAdaptorInfo(storagePoolType=StoragePoolType.SharedMountPoint)
+public class StorPoolStorageAdaptor implements StorageAdaptor {
+ public static void SP_LOG(String fmt, Object... args) {
+ try (PrintWriter spLogFile = new PrintWriter(new BufferedWriter(new FileWriter("/var/log/cloudstack/agent/storpool-agent.log", true)))) {
+ final String line = String.format(fmt, args);
+ String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,ms").format(Calendar.getInstance().getTime());
+ spLogFile.println(timeStamp +" "+line);
+ spLogFile.flush();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private static final Logger log = Logger.getLogger(StorPoolStorageAdaptor.class);
+
+ private static final Map<String, KVMStoragePool> storageUuidToStoragePool = new HashMap<String, KVMStoragePool>();
+
+ @Override
+ public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, StoragePoolType storagePoolType) {
+ SP_LOG("StorpooolStorageAdaptor.createStoragePool: uuid=%s, host=%s:%d, path=%s, userInfo=%s, type=%s", uuid, host, port, path, userInfo, storagePoolType);
+
+ StorPoolStoragePool storagePool = new StorPoolStoragePool(uuid, host, port, storagePoolType, this);
+ storageUuidToStoragePool.put(uuid, storagePool);
+ return storagePool;
+ }
+
+ @Override
+ public KVMStoragePool getStoragePool(String uuid) {
+ SP_LOG("StorpooolStorageAdaptor.getStoragePool: uuid=%s", uuid);
+ return storageUuidToStoragePool.get(uuid);
+ }
+
+ @Override
+ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
+ SP_LOG("StorpooolStorageAdaptor.getStoragePool: uuid=%s, refresh=%s", uuid, refreshInfo);
+ return storageUuidToStoragePool.get(uuid);
+ }
+
+ @Override
+ public boolean deleteStoragePool(String uuid) {
+ SP_LOG("StorpooolStorageAdaptor.deleteStoragePool: uuid=%s", uuid);
+ return storageUuidToStoragePool.remove(uuid) != null;
+ }
+
+ @Override
+ public boolean deleteStoragePool(KVMStoragePool pool) {
+ SP_LOG("StorpooolStorageAdaptor.deleteStoragePool: uuid=%s", pool.getUuid());
+ return deleteStoragePool(pool.getUuid());
+ }
+
+ private static long getDeviceSize(final String devPath) {
+ SP_LOG("StorpooolStorageAdaptor.getDeviceSize: path=%s", devPath);
+
+ if (getVolumeNameFromPath(devPath, true) == null) {
+ return 0;
+ }
+ File file = new File(devPath);
+ if (!file.exists()) {
+ return 0;
+ }
+ Script sc = new Script("blockdev", 0, log);
+ sc.add("--getsize64", devPath);
+
+ OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
+
+ String res = sc.execute(parser);
+ if (res != null) {
+ SP_LOG("Unable to retrieve device size for %s. Res: %s", devPath, res);
+
+ log.debug(String.format("Unable to retrieve device size for %s. Res: %s", devPath, res));
+ return 0;
+ }
+
+ return Long.parseLong(parser.getLine());
+ }
+
+ private static boolean waitForDeviceSymlink(String devPath) {
+ final int numTries = 10;
+ final int sleepTime = 100;
+
+ for(int i = 0; i < numTries; i++) {
+ if (getDeviceSize(devPath) != 0) {
+ return true;
+ } else {
+ try {
+ Thread.sleep(sleepTime);
+ } catch (Exception ex) {
+ // don't do anything
+ }
+ }
+ }
+ return false;
+ }
+
+ public static String getVolumeNameFromPath(final String volumeUuid, boolean tildeNeeded) {
+ if (volumeUuid.startsWith("/dev/storpool/")) {
+ return volumeUuid.split("/")[3];
+ } else if (volumeUuid.startsWith("/dev/storpool-byid/")) {
+ return tildeNeeded ? "~" + volumeUuid.split("/")[3] : volumeUuid.split("/")[3];
+ }
+
+ return null;
+ }
+
+ public static boolean attachOrDetachVolume(String command, String type, String volumeUuid) {
+ final String name = getVolumeNameFromPath(volumeUuid, true);
+ if (name == null) {
+ return false;
+ }
+
+ SP_LOG("StorpooolStorageAdaptor.attachOrDetachVolume: cmd=%s, type=%s, uuid=%s, name=%s", command, type, volumeUuid, name);
+
+ final int numTries = 10;
+ final int sleepTime = 1000;
+ String err = null;
+
+ for(int i = 0; i < numTries; i++) {
+ Script sc = new Script("storpool", 0, log);
+ sc.add("-M");
+ sc.add(command);
+ sc.add(type, name);
+ sc.add("here");
+ if (command.equals("attach")) {
+ sc.add("onRemoteAttached");
+ sc.add("export");
+ }
+
+ OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
+
+ String res = sc.execute(parser);
+ if (res == null) {
+ err = null;
+ break;
+ }
+ err = String.format("Unable to %s volume %s. Error: %s", command, name, res);
+
+ if (command.equals("detach")) {
+ try {
+ Thread.sleep(sleepTime);
+ } catch (Exception ex) {
+ // don't do anything
+ }
+ } else {
+ break;
+ }
+ }
+
+ if (err != null) {
+ SP_LOG(err);
+ log.warn(err);
+ throw new CloudRuntimeException(err);
+ }
+
+ if (command.equals("attach")) {
+ return waitForDeviceSymlink(volumeUuid);
+ } else {
+ return true;
+ }
+ }
+
+ public static boolean resize(String newSize, String volumeUuid ) {
+ final String name = getVolumeNameFromPath(volumeUuid, true);
+ if (name == null) {
+ return false;
+ }
+
+ SP_LOG("StorpooolStorageAdaptor.resize: size=%s, uuid=%s, name=%s", newSize, volumeUuid, name);
+
+ Script sc = new Script("storpool", 0, log);
+ sc.add("-M");
+ sc.add("volume");
+ sc.add(name);
+ sc.add("update");
+ sc.add("size");
+ sc.add(newSize);
+ sc.add("shrinkOk");
+
+ OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
+ String res = sc.execute(parser);
+ if (res == null) {
+ return true;
+ }
+
+ String err = String.format("Unable to resize volume %s. Error: %s", name, res);
+ SP_LOG(err);
+ log.warn(err);
+ throw new CloudRuntimeException(err);
+ }
+
+ @Override
+ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
+ SP_LOG("StorpooolStorageAdaptor.getPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool);
+
+ log.debug(String.format("getPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool));
+
+ final long deviceSize = getDeviceSize(volumeUuid);
+
+ KVMPhysicalDisk physicalDisk = new KVMPhysicalDisk(volumeUuid, volumeUuid, pool);
+ physicalDisk.setFormat(PhysicalDiskFormat.RAW);
+ physicalDisk.setSize(deviceSize);
+ physicalDisk.setVirtualSize(deviceSize);
+ return physicalDisk;
+ }
+
+ @Override
+ public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map<String, String> details) {
+ SP_LOG("StorpooolStorageAdaptor.connectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool);
+
+ log.debug(String.format("connectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool));
+
+ return attachOrDetachVolume("attach", "volume", volumeUuid);
+ }
+
+ @Override
+ public boolean disconnectPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
+ SP_LOG("StorpooolStorageAdaptor.disconnectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool);
+
+ log.debug(String.format("disconnectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool));
+ return attachOrDetachVolume("detach", "volume", volumeUuid);
+ }
+
+ public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
+ String volumeUuid = volumeToDisconnect.get(DiskTO.UUID);
+ SP_LOG("StorpooolStorageAdaptor.disconnectPhysicalDisk: map. uuid=%s", volumeUuid);
+ return attachOrDetachVolume("detach", "volume", volumeUuid);
+ }
+
+ @Override
+ public boolean disconnectPhysicalDiskByPath(String localPath) {
+ SP_LOG("StorpooolStorageAdaptor.disconnectPhysicalDiskByPath: localPath=%s", localPath);
+
+ log.debug(String.format("disconnectPhysicalDiskByPath: localPath=%s", localPath));
+ return attachOrDetachVolume("detach", "volume", localPath);
+ }
+
+ // The following do not apply for StorpoolStorageAdaptor?
+ @Override
+ public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, KVMStoragePool pool, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
+ SP_LOG("StorpooolStorageAdaptor.createPhysicalDisk: uuid=%s, pool=%s, format=%s, size=%d", volumeUuid, pool, format, size);
+ throw new UnsupportedOperationException("Creating a physical disk is not supported.");
+ }
+
+ @Override
+ public boolean deletePhysicalDisk(String volumeUuid, KVMStoragePool pool, Storage.ImageFormat format) {
+ // Should only come here when cleaning-up StorPool snapshots associated with CloudStack templates.
+ SP_LOG("StorpooolStorageAdaptor.deletePhysicalDisk: uuid=%s, pool=%s, format=%s", volumeUuid, pool, format);
+ final String name = getVolumeNameFromPath(volumeUuid, true);
+ if (name == null) {
+ final String err = String.format("StorpooolStorageAdaptor.deletePhysicalDisk: '%s' is not a StorPool volume?", volumeUuid);
+ SP_LOG(err);
+ throw new UnsupportedOperationException(err);
+ }
+
+ Script sc = new Script("storpool", 0, log);
+ sc.add("-M");
+ sc.add("snapshot", name);
+ sc.add("delete", name);
+
+ OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
+
+ String res = sc.execute(parser);
+ if (res != null) {
+ final String err = String.format("Unable to delete StorPool snapshot '%s'. Error: %s", name, res);
+ SP_LOG(err);
+ log.warn(err);
+ throw new UnsupportedOperationException(err);
+ }
+ return true; // apparently ignored
+ }
+
+ @Override
+ public List<KVMPhysicalDisk> listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) {
+ SP_LOG("StorpooolStorageAdaptor.listPhysicalDisks: uuid=%s, pool=%s", storagePoolUuid, pool);
+ throw new UnsupportedOperationException("Listing disks is not supported for this configuration.");
+ }
+
+ @Override
+ public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format,
+ ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout) {
+ SP_LOG("StorpooolStorageAdaptor.createDiskFromTemplate: template=%s, name=%s, fmt=%s, ptype=%s, size=%d, dst_pool=%s, to=%d",
+ template, name, format, provisioningType, size, destPool.getUuid(), timeout);
+ throw new UnsupportedOperationException("Creating a disk from a template is not yet supported for this configuration.");
+ }
+
+ @Override
+ public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool) {
+ SP_LOG("StorpooolStorageAdaptor.createTemplateFromDisk: disk=%s, name=%s, fmt=%s, size=%d, dst_pool=%s", disk, name, format, size, destPool.getUuid());
+ throw new UnsupportedOperationException("Creating a template from a disk is not yet supported for this configuration.");
+ }
+
+ @Override
+ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) {
+ SP_LOG("StorpooolStorageAdaptor.copyPhysicalDisk: disk=%s, name=%s, dst_pool=%s, to=%d", disk, name, destPool.getUuid(), timeout);
+ throw new UnsupportedOperationException("Copying a disk is not supported in this configuration.");
+ }
+
+ public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
+ SP_LOG("StorpooolStorageAdaptor.createDiskFromSnapshot: snap=%s, snap_name=%s, name=%s, dst_pool=%s", snapshot, snapshotName, name, destPool.getUuid());
+ throw new UnsupportedOperationException("Creating a disk from a snapshot is not supported in this configuration.");
+ }
+
+ @Override
+ public boolean refresh(KVMStoragePool pool) {
+ SP_LOG("StorpooolStorageAdaptor.refresh: pool=%s", pool);
+ return true;
+ }
+
+ @Override
+ public boolean createFolder(String uuid, String path) {
+ SP_LOG("StorpooolStorageAdaptor.createFolder: uuid=%s, path=%s", uuid, path);
+ throw new UnsupportedOperationException("A folder cannot be created in this configuration.");
+ }
+
+ public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name,
+ KVMStoragePool destPool, int timeout) {
+ SP_LOG("StorpooolStorageAdaptor.createDiskFromSnapshot: snap=%s, snap_name=%s, name=%s, dst_pool=%s", snapshot,
+ snapshotName, name, destPool.getUuid());
+ throw new UnsupportedOperationException(
+ "Creating a disk from a snapshot is not supported in this configuration.");
+ }
+
+ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name,
+ PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) {
+ SP_LOG("StorpooolStorageAdaptor.createDiskFromTemplateBacking: template=%s, name=%s, dst_pool=%s", template,
+ name, destPool.getUuid());
+ throw new UnsupportedOperationException(
+ "Creating a disk from a template is not supported in this configuration.");
+ }
+
+ public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool,
+ boolean isIso) {
+ SP_LOG("StorpooolStorageAdaptor.createTemplateFromDirectDownloadFile: templateFilePath=%s, dst_pool=%s",
+ templateFilePath, destPool.getUuid());
+ throw new UnsupportedOperationException(
+ "Creating a template from direct download is not supported in this configuration.");
+ }
+
+ public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath,
+ KVMStoragePool destPool, ImageFormat format, int timeout) {
+ return null;
+ }
+
+ @Override
+ public boolean createFolder(String uuid, String path, String localPath) {
+ return false;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStoragePool.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStoragePool.java
new file mode 100644
index 0000000000..d031f319d1
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStoragePool.java
@@ -0,0 +1,164 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.hypervisor.kvm.storage;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
+
+import com.cloud.storage.Storage;
+import com.cloud.storage.Storage.StoragePoolType;
+
+public class StorPoolStoragePool implements KVMStoragePool {
+ private String _uuid;
+ private String _sourceHost;
+ private int _sourcePort;
+ private StoragePoolType _storagePoolType;
+ private StorageAdaptor _storageAdaptor;
+ private String _authUsername;
+ private String _authSecret;
+ private String _sourceDir;
+ private String _localPath;
+
+ public StorPoolStoragePool(String uuid, String host, int port, StoragePoolType storagePoolType, StorageAdaptor storageAdaptor) {
+ _uuid = uuid;
+ _sourceHost = host;
+ _sourcePort = port;
+ _storagePoolType = storagePoolType;
+ _storageAdaptor = storageAdaptor;
+ }
+
+ @Override
+ public String getUuid() {
+ return _uuid;
+ }
+
+ @Override
+ public String getSourceHost() {
+ return _sourceHost;
+ }
+
+ @Override
+ public int getSourcePort() {
+ return _sourcePort;
+ }
+
+ @Override
+ public long getCapacity() {
+ return 100L*(1024L*1024L*1024L*1024L*1024L);
+ }
+
+ @Override
+ public long getUsed() {
+ return 0;
+ }
+
+ @Override
+ public long getAvailable() {
+ return 0;
+ }
+
+ @Override
+ public StoragePoolType getType() {
+ return _storagePoolType;
+ }
+
+ @Override
+ public String getAuthUserName() {
+ return _authUsername;
+ }
+
+ @Override
+ public String getAuthSecret() {
+ return _authSecret;
+ }
+
+ @Override
+ public String getSourceDir() {
+ return _sourceDir;
+ }
+
+ @Override
+ public String getLocalPath() {
+ return _localPath;
+ }
+
+ @Override
+ public PhysicalDiskFormat getDefaultFormat() {
+ return PhysicalDiskFormat.RAW;
+ }
+
+ @Override
+ public KVMPhysicalDisk createPhysicalDisk(String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
+ return _storageAdaptor.createPhysicalDisk(name, this, format, provisioningType, size);
+ }
+
+ @Override
+ public KVMPhysicalDisk createPhysicalDisk(String name, Storage.ProvisioningType provisioningType, long size) {
+ return _storageAdaptor.createPhysicalDisk(name, this, null, provisioningType, size);
+ }
+
+ @Override
+ public boolean connectPhysicalDisk(String name, Map<String, String> details) {
+ return _storageAdaptor.connectPhysicalDisk(name, this, details);
+ }
+
+ @Override
+ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid) {
+ return _storageAdaptor.getPhysicalDisk(volumeUuid, this);
+ }
+
+ @Override
+ public boolean disconnectPhysicalDisk(String volumeUuid) {
+ return _storageAdaptor.disconnectPhysicalDisk(volumeUuid, this);
+ }
+
+ @Override
+ public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format) {
+ return _storageAdaptor.deletePhysicalDisk(volumeUuid, this, format);
+ }
+
+ @Override
+ public List<KVMPhysicalDisk> listPhysicalDisks() {
+ return _storageAdaptor.listPhysicalDisks(_uuid, this);
+ }
+
+ @Override
+ public boolean refresh() {
+ return _storageAdaptor.refresh(this);
+ }
+
+ @Override
+ public boolean delete() {
+ return _storageAdaptor.deleteStoragePool(this);
+ }
+
+ @Override
+ public boolean createFolder(String path) {
+ return _storageAdaptor.createFolder(_uuid, path);
+ }
+
+ @Override
+ public boolean isExternalSnapshot() {
+ return false;
+ }
+
+ public boolean supportsConfigDriveIso() {
+ return false;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/collector/StorPoolAbandonObjectsCollector.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/collector/StorPoolAbandonObjectsCollector.java
new file mode 100644
index 0000000000..499d0630b8
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/collector/StorPoolAbandonObjectsCollector.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.collector;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.framework.config.ConfigKey;
+import org.apache.cloudstack.framework.config.Configurable;
+import org.apache.cloudstack.managed.context.ManagedContextRunnable;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.component.ManagerBase;
+import com.cloud.utils.concurrency.NamedThreadFactory;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.db.TransactionCallbackNoReturn;
+import com.cloud.utils.db.TransactionLegacy;
+import com.cloud.utils.db.TransactionStatus;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonObject;
+
+public class StorPoolAbandonObjectsCollector extends ManagerBase implements Configurable {
+ private static Logger log = Logger.getLogger(StorPoolAbandonObjectsCollector.class);
+ @Inject
+ private PrimaryDataStoreDao storagePoolDao;
+ @Inject
+ private StoragePoolDetailsDao storagePoolDetailsDao;
+
+ private ScheduledExecutorService _volumeTagsUpdateExecutor;
+ private static final String ABANDON_LOG = "/var/log/cloudstack/management/storpool-abandoned-objects";
+
+
+ static final ConfigKey<Integer> volumeCheckupTagsInterval = new ConfigKey<Integer>("Advanced", Integer.class,
+ "storpool.volume.tags.checkup", "86400",
+ "Minimal interval (in seconds) to check and report if StorPool volume exists in CloudStack volumes database",
+ false);
+ static final ConfigKey<Integer> snapshotCheckupTagsInterval = new ConfigKey<Integer>("Advanced", Integer.class,
+ "storpool.snapshot.tags.checkup", "86400",
+ "Minimal interval (in seconds) to check and report if StorPool snapshot exists in CloudStack snapshots database",
+ false);
+
+ @Override
+ public String getConfigComponentName() {
+ return StorPoolAbandonObjectsCollector.class.getSimpleName();
+ }
+
+ @Override
+ public ConfigKey<?>[] getConfigKeys() {
+ return new ConfigKey<?>[] { volumeCheckupTagsInterval, snapshotCheckupTagsInterval };
+ }
+
+ @Override
+ public boolean start() {
+ init();
+ return true;
+ }
+
+ private void init() {
+ _volumeTagsUpdateExecutor = Executors.newScheduledThreadPool(2,
+ new NamedThreadFactory("StorPoolAbandonObjectsCollector"));
+ StorPoolHelper.appendLogger(log, ABANDON_LOG, "abandon");
+ if (volumeCheckupTagsInterval.value() > 0) {
+ _volumeTagsUpdateExecutor.scheduleAtFixedRate(new StorPoolVolumesTagsUpdate(),
+ volumeCheckupTagsInterval.value(), volumeCheckupTagsInterval.value(), TimeUnit.SECONDS);
+ }
+ if (snapshotCheckupTagsInterval.value() > 0) {
+ _volumeTagsUpdateExecutor.scheduleAtFixedRate(new StorPoolSnapshotsTagsUpdate(),
+ snapshotCheckupTagsInterval.value(), snapshotCheckupTagsInterval.value(), TimeUnit.SECONDS);
+ }
+ }
+
+ class StorPoolVolumesTagsUpdate extends ManagedContextRunnable {
+
+ @Override
+ @DB
+ protected void runInContext() {
+ List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
+ if (CollectionUtils.isEmpty(spPools)) {
+ return;
+ }
+ Map<String, String> volumes = new HashMap<>();
+ for (StoragePoolVO storagePoolVO : spPools) {
+ try {
+ JsonArray arr = StorPoolUtil.volumesList(StorPoolUtil.getSpConnection(storagePoolVO.getUuid(), storagePoolVO.getId(), storagePoolDetailsDao, storagePoolDao));
+ volumes.putAll(getStorPoolNamesAndCsTag(arr));
+ } catch (Exception e) {
+ log.debug(String.format("Could not collect abandon objects due to %s", e.getMessage()), e);
+ }
+ }
+ Transaction.execute(new TransactionCallbackNoReturn() {
+ @Override
+ public void doInTransactionWithoutResult(TransactionStatus status) {
+ TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
+
+ try {
+ PreparedStatement pstmt = txn.prepareAutoCloseStatement(
+ "CREATE TEMPORARY TABLE `cloud`.`volumes1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
+ pstmt.executeUpdate();
+
+ pstmt = txn.prepareAutoCloseStatement(
+ "CREATE TEMPORARY TABLE `cloud`.`volumes_on_host1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
+ pstmt.executeUpdate();
+
+ } catch (SQLException e) {
+ log.info(String.format("[ignored] SQL failed to delete vm work job: %s ",
+ e.getLocalizedMessage()));
+ } catch (Throwable e) {
+ log.info(String.format("[ignored] caught an error during delete vm work job: %s",
+ e.getLocalizedMessage()));
+ }
+
+ try {
+ PreparedStatement pstmt = txn.prepareStatement("INSERT INTO `cloud`.`volumes1` (name, tag) VALUES (?, ?)");
+ PreparedStatement volumesOnHostpstmt = txn.prepareStatement("INSERT INTO `cloud`.`volumes_on_host1` (name, tag) VALUES (?, ?)");
+ for (Map.Entry<String, String> volume : volumes.entrySet()) {
+ if (volume.getValue().equals("volume")) {
+ addRecordToDb(volume.getKey(), pstmt, volume.getValue(), true);
+ } else if (volume.getValue().equals("check-volume-is-on-host")) {
+ addRecordToDb(volume.getKey(), volumesOnHostpstmt, volume.getValue(), true);
+ }
+ }
+ pstmt.executeBatch();
+ volumesOnHostpstmt.executeBatch();
+ String sql = "SELECT f.* FROM `cloud`.`volumes1` f LEFT JOIN `cloud`.`volumes` v ON f.name=v.path where v.path is NULL OR NOT state=?";
+ findMissingRecordsInCS(txn, sql, "volume");
+
+ String sqlVolumeOnHost = "SELECT f.* FROM `cloud`.`volumes_on_host1` f LEFT JOIN `cloud`.`storage_pool_details` v ON f.name=v.value where v.value is NULL";
+ findMissingRecordsInCS(txn, sqlVolumeOnHost, "volumes_on_host");
+ } catch (SQLException e) {
+ log.info(String.format("[ignored] SQL failed due to: %s ",
+ e.getLocalizedMessage()));
+ } catch (Throwable e) {
+ log.info(String.format("[ignored] caught an error: %s",
+ e.getLocalizedMessage()));
+ } finally {
+ try {
+ PreparedStatement pstmt = txn.prepareStatement("DROP TABLE `cloud`.`volumes1`");
+ pstmt.executeUpdate();
+ pstmt = txn.prepareStatement("DROP TABLE `cloud`.`volumes_on_host1`");
+ pstmt.executeUpdate();
+ } catch (SQLException e) {
+ txn.close();
+ log.info(String.format("createTemporaryVolumeTable %s", e.getMessage()));
+ }
+ txn.close();
+ }
+ }
+ });
+ }
+ }
+
+ class StorPoolSnapshotsTagsUpdate extends ManagedContextRunnable {
+
+ @Override
+ @DB
+ protected void runInContext() {
+ List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
+ Map<String, String> snapshots = new HashMap<String, String>();
+ if (CollectionUtils.isEmpty(spPools)) {
+ return;
+ }
+ for (StoragePoolVO storagePoolVO : spPools) {
+ try {
+ JsonArray arr = StorPoolUtil.snapshotsList(StorPoolUtil.getSpConnection(storagePoolVO.getUuid(), storagePoolVO.getId(), storagePoolDetailsDao, storagePoolDao));
+ snapshots.putAll(getStorPoolNamesAndCsTag(arr));
+ } catch (Exception e) {
+ log.debug(String.format("Could not collect abandon objects due to %s", e.getMessage()));
+ }
+ }
+ Transaction.execute(new TransactionCallbackNoReturn() {
+ @Override
+ public void doInTransactionWithoutResult(TransactionStatus status) {
+ TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
+
+ try{
+ PreparedStatement pstmt = txn.prepareAutoCloseStatement(
+ "CREATE TEMPORARY TABLE `cloud`.`snapshots1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
+ pstmt.executeUpdate();
+
+ pstmt = txn.prepareAutoCloseStatement(
+ "CREATE TEMPORARY TABLE `cloud`.`vm_snapshots1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
+ pstmt.executeUpdate();
+
+ pstmt = txn.prepareAutoCloseStatement(
+ "CREATE TEMPORARY TABLE `cloud`.`vm_templates1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))");
+ pstmt.executeUpdate();
+ } catch (SQLException e) {
+ log.info(String.format("[ignored] SQL failed to delete vm work job: %s ",
+ e.getLocalizedMessage()));
+ } catch (Throwable e) {
+ log.info(String.format("[ignored] caught an error during delete vm work job: %s",
+ e.getLocalizedMessage()));
+ }
+
+ try {
+ PreparedStatement snapshotsPstmt = txn.prepareStatement("INSERT INTO `cloud`.`snapshots1` (name, tag) VALUES (?, ?)");
+ PreparedStatement groupSnapshotsPstmt = txn.prepareStatement("INSERT INTO `cloud`.`vm_snapshots1` (name, tag) VALUES (?, ?)");
+ PreparedStatement templatePstmt = txn.prepareStatement("INSERT INTO `cloud`.`vm_templates1` (name, tag) VALUES (?, ?)");
+ for (Map.Entry<String, String> snapshot : snapshots.entrySet()) {
+ if (!snapshot.getValue().equals("group") && !snapshot.getValue().equals("template")) {
+ addRecordToDb(snapshot.getKey(), snapshotsPstmt, snapshot.getValue(), true);
+ } else if (snapshot.getValue().equals("group")) {
+ addRecordToDb(snapshot.getKey(), groupSnapshotsPstmt, snapshot.getValue(), true);
+ } else if (snapshot.getValue().equals("template")) {
+ addRecordToDb(snapshot.getKey(), templatePstmt, snapshot.getValue(), true);
+ }
+ }
+ snapshotsPstmt.executeBatch();
+ groupSnapshotsPstmt.executeBatch();
+ templatePstmt.executeBatch();
+
+ String sqlSnapshots = "SELECT f.* FROM `cloud`.`snapshots1` f LEFT JOIN `cloud`.`snapshot_details` v ON f.name=v.value where v.value is NULL";
+ findMissingRecordsInCS(txn, sqlSnapshots, "snapshot");
+
+ String sqlVmSnapshots = "SELECT f.* FROM `cloud`.`vm_snapshots1` f LEFT JOIN `cloud`.`vm_snapshot_details` v ON f.name=v.value where v.value is NULL";
+ findMissingRecordsInCS(txn, sqlVmSnapshots, "snapshot");
+
+ String sqlTemplates = "SELECT temp.*"
+ + " FROM `cloud`.`vm_templates1` temp"
+ + " LEFT JOIN `cloud`.`template_store_ref` store"
+ + " ON temp.name=store.local_path"
+ + " LEFT JOIN `cloud`.`template_spool_ref` spool"
+ + " ON temp.name=spool.local_path"
+ + " where store.local_path is NULL"
+ + " and spool.local_path is NULL";
+ findMissingRecordsInCS(txn, sqlTemplates, "snapshot");
+ } catch (SQLException e) {
+ log.info(String.format("[ignored] SQL failed due to: %s ",
+ e.getLocalizedMessage()));
+ } catch (Throwable e) {
+ log.info(String.format("[ignored] caught an error: %s",
+ e.getLocalizedMessage()));
+ } finally {
+ try {
+ PreparedStatement pstmt = txn.prepareStatement("DROP TABLE `cloud`.`snapshots1`");
+ pstmt.executeUpdate();
+ pstmt = txn.prepareStatement("DROP TABLE `cloud`.`vm_snapshots1`");
+ pstmt.executeUpdate();
+ pstmt = txn.prepareStatement("DROP TABLE `cloud`.`vm_templates1`");
+ pstmt.executeUpdate();
+ } catch (SQLException e) {
+ txn.close();
+ log.info(String.format("createTemporaryVolumeTable %s", e.getMessage()));
+ }
+ txn.close();
+ }
+ }
+ });
+ }
+ }
+
+ private void addRecordToDb(String name, PreparedStatement pstmt, String tag, boolean pathNeeded)
+ throws SQLException {
+ name = name.startsWith("~") ? name.split("~")[1] : name;
+ pstmt.setString(1, pathNeeded ? StorPoolUtil.devPath(name) : name);
+ pstmt.setString(2, tag);
+ pstmt.addBatch();
+ }
+
+ private void findMissingRecordsInCS(TransactionLegacy txn, String sql, String object) throws SQLException {
+ ResultSet rs;
+ PreparedStatement pstmt2 = txn.prepareStatement(sql);
+ if (object.equals("volume")) {
+ pstmt2.setString(1, "Ready");
+ }
+ rs = pstmt2.executeQuery();
+ String name = null;
+ while (rs.next()) {
+ name = rs.getString(2);
+ log.info(String.format(
+ "CloudStack does not know about StorPool %s %s, it had to be a %s", object, name, rs.getString(3)));
+ }
+ }
+
+ private Map<String,String> getStorPoolNamesAndCsTag(JsonArray arr) {
+ Map<String, String> map = new HashMap<>();
+ for (int i = 0; i < arr.size(); i++) {
+ String name = arr.get(i).getAsJsonObject().get("name").getAsString();
+ String tag = null;
+ if (!name.startsWith("*") && !name.contains("@")) {
+ JsonObject tags = arr.get(i).getAsJsonObject().get("tags").getAsJsonObject();
+ if (tags != null && tags.getAsJsonPrimitive("cs") != null && !(arr.get(i).getAsJsonObject().get("deleted") != null && arr.get(i).getAsJsonObject().get("deleted").getAsBoolean())) {
+ tag = tags.getAsJsonPrimitive("cs").getAsString();
+ map.put(name, tag);
+ }
+ }
+ }
+ return map;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java
new file mode 100644
index 0000000000..97b15d847f
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java
@@ -0,0 +1,976 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.driver;
+import java.util.Map;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.storage.RemoteHostEndPoint;
+import org.apache.cloudstack.storage.command.CommandResult;
+import org.apache.cloudstack.storage.command.CopyCmdAnswer;
+import org.apache.cloudstack.storage.command.CreateObjectAnswer;
+import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
+import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
+import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
+import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
+import org.apache.cloudstack.storage.to.SnapshotObjectTO;
+import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.cloudstack.storage.volume.VolumeObject;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.storage.ResizeVolumeAnswer;
+import com.cloud.agent.api.storage.StorPoolBackupSnapshotCommand;
+import com.cloud.agent.api.storage.StorPoolBackupTemplateFromSnapshotCommand;
+import com.cloud.agent.api.storage.StorPoolCopyVolumeToSecondaryCommand;
+import com.cloud.agent.api.storage.StorPoolDownloadTemplateCommand;
+import com.cloud.agent.api.storage.StorPoolDownloadVolumeCommand;
+import com.cloud.agent.api.storage.StorPoolResizeVolumeCommand;
+import com.cloud.agent.api.to.DataObjectType;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.StorageFilerTO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.host.Host;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.server.ResourceTag;
+import com.cloud.server.ResourceTag.ResourceObjectType;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.ResizeVolumePayload;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.VMTemplateDetailVO;
+import com.cloud.storage.VMTemplateStoragePoolVO;
+import com.cloud.storage.VolumeDetailVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.SnapshotDetailsDao;
+import com.cloud.storage.dao.SnapshotDetailsVO;
+import com.cloud.storage.dao.VMTemplateDetailsDao;
+import com.cloud.storage.dao.VMTemplatePoolDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.VolumeDetailsDao;
+import com.cloud.tags.dao.ResourceTagDao;
+import com.cloud.utils.Pair;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachineManager;
+import com.cloud.vm.dao.VMInstanceDao;
+
+public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
+
+ private static final Logger log = Logger.getLogger(StorPoolPrimaryDataStoreDriver.class);
+
+ @Inject
+ private VolumeDao volumeDao;
+ @Inject
+ private StorageManager storageMgr;
+ @Inject
+ private PrimaryDataStoreDao primaryStoreDao;
+ @Inject
+ private EndPointSelector selector;
+ @Inject
+ private ConfigurationDao configDao;
+ @Inject
+ private TemplateDataStoreDao vmTemplateDataStoreDao;
+ @Inject
+ private VMInstanceDao vmInstanceDao;
+ @Inject
+ private ClusterDao clusterDao;
+ @Inject
+ private HostDao hostDao;
+ @Inject
+ private ResourceTagDao _resourceTagDao;
+ @Inject
+ private SnapshotDetailsDao _snapshotDetailsDao;
+ @Inject
+ private SnapshotDataStoreDao snapshotDataStoreDao;
+ @Inject
+ private VolumeDetailsDao volumeDetailsDao;
+ @Inject
+ private VMTemplateDetailsDao vmTemplateDetailsDao;
+ @Inject
+ private StoragePoolDetailsDao storagePoolDetailsDao;
+ @Inject
+ private VMTemplatePoolDao vmTemplatePoolDao;
+
+ @Override
+ public Map<String, String> getCapabilities() {
+ return null;
+ }
+
+ @Override
+ public DataTO getTO(DataObject data) {
+ return null;
+ }
+
+ @Override
+ public DataStoreTO getStoreTO(DataStore store) {
+ return null;
+ }
+
+ @Override
+ public long getUsedBytes(StoragePool storagePool) {
+ return 0;
+ }
+
+ @Override
+ public long getUsedIops(StoragePool storagePool) {
+ return 0;
+ }
+
+ @Override
+ public boolean grantAccess(DataObject data, Host host, DataStore dataStore) {
+ return false;
+ }
+
+ @Override
+ public void revokeAccess(DataObject data, Host host, DataStore dataStore) {
+ }
+
+ private void updateStoragePool(final long poolId, final long deltaUsedBytes) {
+ StoragePoolVO storagePool = primaryStoreDao.findById(poolId);
+ final long capacity = storagePool.getCapacityBytes();
+ final long used = storagePool.getUsedBytes() + deltaUsedBytes;
+
+ storagePool.setUsedBytes(used < 0 ? 0 : (used > capacity ? capacity : used));
+ primaryStoreDao.update(poolId, storagePool);
+ }
+
+ private String getVMInstanceUUID(Long id) {
+ return id != null ? vmInstanceDao.findById(id).getUuid() : null;
+ }
+
+ protected void _completeResponse(final CreateObjectAnswer answer, final String err, final AsyncCompletionCallback<CommandResult> callback)
+ {
+ final CreateCmdResult res = new CreateCmdResult(null, answer);
+ res.setResult(err);
+ callback.complete(res);
+ }
+
+ protected void completeResponse(final DataTO result, final AsyncCompletionCallback<CommandResult> callback)
+ {
+ _completeResponse(new CreateObjectAnswer(result), null, callback);
+ }
+
+ protected void completeResponse(final String err, final AsyncCompletionCallback<CommandResult> callback)
+ {
+ _completeResponse(new CreateObjectAnswer(err), err, callback);
+ }
+
+ @Override
+ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
+ return dataObject.getSize();
+ }
+
+ @Override
+ public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) {
+ return 0;
+ }
+
+ @Override
+ public ChapInfo getChapInfo(DataObject dataObject) {
+ return null;
+ }
+
+ @Override
+ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
+ String path = null;
+ String err = null;
+ if (data.getType() == DataObjectType.VOLUME) {
+ try {
+ VolumeInfo vinfo = (VolumeInfo)data;
+ String name = vinfo.getUuid();
+ Long size = vinfo.getSize();
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao);
+
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", vinfo.getName(), vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), conn.getTemplateName());
+ SpApiResponse resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vinfo.getInstanceId()), null, "volume", vinfo.getMaxIops(), conn);
+ if (resp.getError() == null) {
+ String volumeName = StorPoolUtil.getNameFromResponse(resp, false);
+ path = StorPoolUtil.devPath(volumeName);
+
+ VolumeVO volume = volumeDao.findById(vinfo.getId());
+ volume.setPoolId(dataStore.getId());
+ volume.setPoolType(StoragePoolType.SharedMountPoint);
+ volume.setPath(path);
+ volumeDao.update(volume.getId(), volume);
+
+ updateStoragePool(dataStore.getId(), size);
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", volumeName, vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), conn.getTemplateName());
+ } else {
+ err = String.format("Could not create StorPool volume %s. Error: %s", name, resp.getError());
+ }
+ } catch (Exception e) {
+ err = String.format("Could not create volume due to %s", e.getMessage());
+ }
+ } else {
+ err = String.format("Invalid object type \"%s\" passed to createAsync", data.getType());
+ }
+
+ CreateCmdResult res = new CreateCmdResult(path, new Answer(null, err == null, err));
+ res.setResult(err);
+ if (callback != null) {
+ callback.complete(res);
+ }
+ }
+
+ @Override
+ public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
+ String path = null;
+ String err = null;
+ ResizeVolumeAnswer answer = null;
+
+ if (data.getType() == DataObjectType.VOLUME) {
+ VolumeObject vol = (VolumeObject)data;
+ StoragePool pool = (StoragePool)data.getDataStore();
+ ResizeVolumePayload payload = (ResizeVolumePayload)vol.getpayload();
+
+ final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true);
+ final long oldSize = vol.getSize();
+ Long oldMaxIops = vol.getMaxIops();
+
+ try {
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(data.getDataStore().getUuid(), data.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.resize: name=%s, uuid=%s, oldSize=%d, newSize=%s, shrinkOk=%s", name, vol.getUuid(), oldSize, payload.newSize, payload.shrinkOk);
+
+ SpApiResponse resp = StorPoolUtil.volumeUpdate(name, payload.newSize, payload.shrinkOk, payload.newMaxIops, conn);
+ if (resp.getError() != null) {
+ err = String.format("Could not resize StorPool volume %s. Error: %s", name, resp.getError());
+ } else {
+ StorPoolResizeVolumeCommand resizeCmd = new StorPoolResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), payload.newSize, payload.shrinkOk,
+ payload.instanceName, payload.hosts == null ? false : true);
+ answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, payload.hosts, resizeCmd);
+
+ if (answer == null || !answer.getResult()) {
+ err = answer != null ? answer.getDetails() : "return a null answer, resize failed for unknown reason";
+ } else {
+ path = StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false));
+
+ vol.setSize(payload.newSize);
+ vol.update();
+ if (payload.newMaxIops != null) {
+ VolumeVO volume = volumeDao.findById(vol.getId());
+ volume.setMaxIops(payload.newMaxIops);
+ volumeDao.update(volume.getId(), volume);
+ }
+
+ updateStoragePool(vol.getPoolId(), payload.newSize - oldSize);
+ }
+ }
+ if (err != null) {
+ // try restoring volume to its initial size
+ resp = StorPoolUtil.volumeUpdate(name, oldSize, true, oldMaxIops, conn);
+ if (resp.getError() != null) {
+ log.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, resp.getError()));
+ }
+ }
+ } catch (Exception e) {
+ log.debug("sending resize command failed", e);
+ err = e.toString();
+ }
+ } else {
+ err = String.format("Invalid object type \"%s\" passed to resize", data.getType());
+ }
+
+ CreateCmdResult res = new CreateCmdResult(path, answer);
+ res.setResult(err);
+ callback.complete(res);
+ }
+
+ @Override
+ public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CommandResult> callback) {
+ String err = null;
+ if (data.getType() == DataObjectType.VOLUME) {
+ VolumeInfo vinfo = (VolumeInfo)data;
+ String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true);
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.deleteAsync delete volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s dataStore=%s", name, vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), dataStore.getUuid());
+ if (name == null) {
+ name = vinfo.getUuid();
+ }
+ try {
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao);
+
+ SpApiResponse resp = StorPoolUtil.volumeDelete(name, conn);
+ if (resp.getError() == null) {
+ updateStoragePool(dataStore.getId(), - vinfo.getSize());
+ VolumeDetailVO detail = volumeDetailsDao.findDetail(vinfo.getId(), StorPoolUtil.SP_PROVIDER_NAME);
+ if (detail != null) {
+ volumeDetailsDao.remove(detail.getId());
+ }
+ } else {
+ if (!resp.getError().getName().equalsIgnoreCase("objectDoesNotExist")) {
+ err = String.format("Could not delete StorPool volume %s. Error: %s", name, resp.getError());
+ }
+ }
+ } catch (Exception e) {
+ err = String.format("Could not delete volume due to %s", e.getMessage());
+ }
+ } else {
+ err = String.format("Invalid DataObjectType \"%s\" passed to deleteAsync", data.getType());
+ }
+
+ if (err != null) {
+ log.error(err);
+ StorPoolUtil.spLog(err);
+ }
+
+ CommandResult res = new CommandResult();
+ res.setResult(err);
+ callback.complete(res);
+ }
+
+ private void logDataObject(final String pref, DataObject data) {
+ final DataStore dstore = data.getDataStore();
+ String name = null;
+ Long size = null;
+
+ if (data.getType() == DataObjectType.VOLUME) {
+ VolumeInfo vinfo = (VolumeInfo)data;
+ name = vinfo.getName();
+ size = vinfo.getSize();
+ } else if (data.getType() == DataObjectType.SNAPSHOT) {
+ SnapshotInfo sinfo = (SnapshotInfo)data;
+ name = sinfo.getName();
+ size = sinfo.getSize();
+ } else if (data.getType() == DataObjectType.TEMPLATE) {
+ TemplateInfo tinfo = (TemplateInfo)data;
+ name = tinfo.getName();
+ size = tinfo.getSize();
+ }
+
+ StorPoolUtil.spLog("%s: name=%s, size=%s, uuid=%s, type=%s, dstore=%s:%s:%s", pref, name, size, data.getUuid(), data.getType(), dstore.getUuid(), dstore.getName(), dstore.getRole());
+ }
+
+ @Override
+ public boolean canCopy(DataObject srcData, DataObject dstData) {
+ return true;
+ }
+
+ @Override
+ public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCallback<CopyCommandResult> callback) {
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc:");
+ logDataObject("SRC", srcData);
+ logDataObject("DST", dstData);
+
+ final DataObjectType srcType = srcData.getType();
+ final DataObjectType dstType = dstData.getType();
+ String err = null;
+ Answer answer = null;
+ StorageSubSystemCommand cmd = null;
+
+ try {
+ if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.VOLUME) {
+ SnapshotInfo sinfo = (SnapshotInfo)srcData;
+ final String snapshotName = StorPoolHelper.getSnapshotName(srcData.getId(), srcData.getUuid(), snapshotDataStoreDao, _snapshotDetailsDao);
+
+ VolumeInfo vinfo = (VolumeInfo)dstData;
+ final String volumeName = vinfo.getUuid();
+ final Long size = vinfo.getSize();
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+ SpApiResponse resp = StorPoolUtil.volumeCreate(volumeName, snapshotName, size, null, null, "volume", sinfo.getBaseVolume().getMaxIops(), conn);
+ if (resp.getError() == null) {
+ updateStoragePool(dstData.getDataStore().getId(), size);
+
+ VolumeObjectTO to = (VolumeObjectTO)dstData.getTO();
+ to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
+ to.setSize(size);
+
+ answer = new CopyCmdAnswer(to);
+ StorPoolUtil.spLog("Created volume=%s with uuid=%s from snapshot=%s with uuid=%s", StorPoolUtil.getNameFromResponse(resp, false), to.getUuid(), snapshotName, sinfo.getUuid());
+ } else if (resp.getError().getName().equals("objectDoesNotExist")) {
+ //check if snapshot is on secondary storage
+ StorPoolUtil.spLog("Snapshot %s does not exists on StorPool, will try to create a volume from a snopshot on secondary storage", snapshotName);
+ SnapshotDataStoreVO snap = snapshotDataStoreDao.findBySnapshot(sinfo.getId(), DataStoreRole.Image);
+ if (snap != null && StorPoolStorageAdaptor.getVolumeNameFromPath(snap.getInstallPath(), false) == null) {
+ resp = StorPoolUtil.volumeCreate(srcData.getUuid(), null, size, null, "no", "snapshot", sinfo.getBaseVolume().getMaxIops(), conn);
+ if (resp.getError() == null) {
+ VolumeObjectTO dstTO = (VolumeObjectTO) dstData.getTO();
+ dstTO.setSize(size);
+ dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
+ cmd = new StorPoolDownloadTemplateCommand(srcData.getTO(), dstTO, StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value(), "volume");
+
+ EndPoint ep = selector.select(srcData, dstData);
+ if (ep == null) {
+ err = "No remote endpoint to send command, check if host or ssvm is down?";
+ } else {
+ answer = ep.sendMessage(cmd);
+ }
+
+ if (answer != null && answer.getResult()) {
+ SpApiResponse resp2 = StorPoolUtil.volumeFreeze(StorPoolUtil.getNameFromResponse(resp, true), conn);
+ if (resp2.getError() != null) {
+ err = String.format("Could not freeze Storpool volume %s. Error: %s", srcData.getUuid(), resp2.getError());
+ } else {
+ String name = StorPoolUtil.getNameFromResponse(resp, false);
+ SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(sinfo.getId(), sinfo.getUuid());
+ if (snapshotDetails != null) {
+ StorPoolHelper.updateSnapshotDetailsValue(snapshotDetails.getId(), StorPoolUtil.devPath(name), "snapshot");
+ }else {
+ StorPoolHelper.addSnapshotDetails(sinfo.getId(), sinfo.getUuid(), StorPoolUtil.devPath(name), _snapshotDetailsDao);
+ }
+ resp = StorPoolUtil.volumeCreate(volumeName, StorPoolUtil.getNameFromResponse(resp, true), size, null, null, "volume", sinfo.getBaseVolume().getMaxIops(), conn);
+ if (resp.getError() == null) {
+ updateStoragePool(dstData.getDataStore().getId(), size);
+
+ VolumeObjectTO to = (VolumeObjectTO) dstData.getTO();
+ to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
+ to.setSize(size);
+ // successfully downloaded snapshot to primary storage
+ answer = new CopyCmdAnswer(to);
+ StorPoolUtil.spLog("Created volume=%s with uuid=%s from snapshot=%s with uuid=%s", name, to.getUuid(), snapshotName, sinfo.getUuid());
+
+ } else {
+ err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError());
+ }
+ }
+ } else {
+ err = answer != null ? answer.getDetails() : "Unknown error while downloading template. Null answer returned.";
+ }
+ } else {
+ err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError());
+ }
+ } else {
+ err = String.format("The snapshot %s does not exists neither on primary, neither on secondary storage. Cannot create volume from snapshot", snapshotName);
+ }
+ } else {
+ err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError());
+ }
+ } else if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.SNAPSHOT) {
+ // bypass secondary storage
+ if (StorPoolConfigurationManager.BypassSecondaryStorage.value()) {
+ SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData.getTO();
+ answer = new CopyCmdAnswer(snapshot);
+ } else {
+ // copy snapshot to secondary storage (backup snapshot)
+ cmd = new StorPoolBackupSnapshotCommand(srcData.getTO(), dstData.getTO(), StorPoolHelper.getTimeout(StorPoolHelper.BackupSnapshotWait, configDao), VirtualMachineManager.ExecuteInSequence.value());
+
+ final String snapName = StorPoolStorageAdaptor.getVolumeNameFromPath(((SnapshotInfo) srcData).getPath(), true);
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(srcData.getDataStore().getUuid(), srcData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+ try {
+ Long clusterId = StorPoolHelper.findClusterIdByGlobalId(snapName, clusterDao);
+ EndPoint ep = clusterId != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, hostDao)) : selector.select(srcData, dstData);
+ if (ep == null) {
+ err = "No remote endpoint to send command, check if host or ssvm is down?";
+ } else {
+ answer = ep.sendMessage(cmd);
+ // if error during snapshot backup, cleanup the StorPool snapshot
+ if (answer != null && !answer.getResult()) {
+ StorPoolUtil.spLog(String.format("Error while backing-up snapshot '%s' - cleaning up StorPool snapshot. Error: %s", snapName, answer.getDetails()));
+ SpApiResponse resp = StorPoolUtil.snapshotDelete(snapName, conn);
+ if (resp.getError() != null) {
+ final String err2 = String.format("Failed to cleanup StorPool snapshot '%s'. Error: %s.", snapName, resp.getError());
+ log.error(err2);
+ StorPoolUtil.spLog(err2);
+ }
+ }
+ }
+ } catch (CloudRuntimeException e) {
+ err = e.getMessage();
+ }
+ }
+ } else if (srcType == DataObjectType.VOLUME && dstType == DataObjectType.TEMPLATE) {
+ // create template from volume
+ VolumeObjectTO volume = (VolumeObjectTO) srcData.getTO();
+ TemplateObjectTO template = (TemplateObjectTO) dstData.getTO();
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(srcData.getDataStore().getUuid(), srcData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+
+ String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true);
+
+
+ cmd = new StorPoolBackupTemplateFromSnapshotCommand(volume, template,
+ StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value());
+
+ try {
+ Long clusterId = StorPoolHelper.findClusterIdByGlobalId(volumeName, clusterDao);
+ EndPoint ep2 = clusterId != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, hostDao)) : selector.select(srcData, dstData);
+ if (ep2 == null) {
+ err = "No remote endpoint to send command, check if host or ssvm is down?";
+ } else {
+ answer = ep2.sendMessage(cmd);
+ if (answer != null && answer.getResult()) {
+ SpApiResponse resSnapshot = StorPoolUtil.volumeSnapshot(volumeName, template.getUuid(), null, "template", "no", conn);
+ if (resSnapshot.getError() != null) {
+ log.debug(String.format("Could not snapshot volume with ID=%s", volume.getId()));
+ StorPoolUtil.spLog("Volume snapshot failed with error=%s", resSnapshot.getError().getDescr());
+ err = resSnapshot.getError().getDescr();
+ }
+ else {
+ StorPoolHelper.updateVmStoreTemplate(template.getId(), template.getDataStore().getRole(), StorPoolUtil.devPath(StorPoolUtil.getSnapshotNameFromResponse(resSnapshot, false, StorPoolUtil.GLOBAL_ID)), vmTemplateDataStoreDao);
+ vmTemplateDetailsDao.persist(new VMTemplateDetailVO(template.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(srcData.getDataStore().getId()), false));
+ }
+ }else {
+ err = "Could not copy template to secondary " + answer.getResult();
+ }
+ }
+ }catch (CloudRuntimeException e) {
+ err = e.getMessage();
+ }
+ } else if (srcType == DataObjectType.TEMPLATE && dstType == DataObjectType.TEMPLATE) {
+ // copy template to primary storage
+ TemplateInfo tinfo = (TemplateInfo)dstData;
+ Long size = tinfo.getSize();
+ if(size == null || size == 0)
+ size = 1L*1024*1024*1024;
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(dstData.getDataStore().getUuid(), dstData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+
+ TemplateDataStoreVO templDataStoreVO = vmTemplateDataStoreDao.findByTemplate(tinfo.getId(), DataStoreRole.Image);
+
+ String snapshotName = (templDataStoreVO != null && templDataStoreVO.getLocalDownloadPath() != null)
+ ? StorPoolStorageAdaptor.getVolumeNameFromPath(templDataStoreVO.getLocalDownloadPath(), true)
+ : null;
+ String name = tinfo.getUuid();
+
+ SpApiResponse resp = null;
+ if (snapshotName != null) {
+ //no need to copy volume from secondary, because we have it already on primary. Just need to create a child snapshot from it.
+ //The child snapshot is needed when configuration "storage.cleanup.enabled" is true, not to clean the base snapshot and to lose everything
+ resp = StorPoolUtil.volumeCreate(name, snapshotName, size, null, "no", "template", null, conn);
+ if (resp.getError() != null) {
+ err = String.format("Could not create Storpool volume for CS template %s. Error: %s", name, resp.getError());
+ } else {
+ String volumeNameToSnapshot = StorPoolUtil.getNameFromResponse(resp, true);
+ SpApiResponse resp2 = StorPoolUtil.volumeFreeze(volumeNameToSnapshot, conn);
+ if (resp2.getError() != null) {
+ err = String.format("Could not freeze Storpool volume %s. Error: %s", name, resp2.getError());
+ } else {
+ StorPoolUtil.spLog("Storpool snapshot [%s] for a template exists. Creating template on Storpool with name [%s]", tinfo.getUuid(), name);
+ TemplateObjectTO dstTO = (TemplateObjectTO) dstData.getTO();
+ dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
+ dstTO.setSize(size);
+ answer = new CopyCmdAnswer(dstTO);
+ }
+ }
+ } else {
+ resp = StorPoolUtil.volumeCreate(name, null, size, null, "no", "template", null, conn);
+ if (resp.getError() != null) {
+ err = String.format("Could not create Storpool volume for CS template %s. Error: %s", name, resp.getError());
+ } else {
+ TemplateObjectTO dstTO = (TemplateObjectTO)dstData.getTO();
+ dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
+ dstTO.setSize(size);
+
+ cmd = new StorPoolDownloadTemplateCommand(srcData.getTO(), dstTO, StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value(), "volume");
+
+ EndPoint ep = selector.select(srcData, dstData);
+ if (ep == null) {
+ err = "No remote endpoint to send command, check if host or ssvm is down?";
+ } else {
+ answer = ep.sendMessage(cmd);
+ }
+
+ if (answer != null && answer.getResult()) {
+ // successfully downloaded template to primary storage
+ SpApiResponse resp2 = StorPoolUtil.volumeFreeze(StorPoolUtil.getNameFromResponse(resp, true), conn);
+ if (resp2.getError() != null) {
+ err = String.format("Could not freeze Storpool volume %s. Error: %s", name, resp2.getError());
+ }
+ } else {
+ err = answer != null ? answer.getDetails() : "Unknown error while downloading template. Null answer returned.";
+ }
+ }
+ }
+ if (err != null) {
+ resp = StorPoolUtil.volumeDelete(StorPoolUtil.getNameFromResponse(resp, true), conn);
+ if (resp.getError() != null) {
+ log.warn(String.format("Could not clean-up Storpool volume %s. Error: %s", name, resp.getError()));
+ }
+ }
+ } else if (srcType == DataObjectType.TEMPLATE && dstType == DataObjectType.VOLUME) {
+ // create volume from template on Storpool PRIMARY
+ TemplateInfo tinfo = (TemplateInfo)srcData;
+
+ VolumeInfo vinfo = (VolumeInfo)dstData;
+ VMTemplateStoragePoolVO templStoragePoolVO = StorPoolHelper.findByPoolTemplate(vinfo.getPoolId(), tinfo.getId());
+ final String parentName = templStoragePoolVO.getLocalDownloadPath() !=null ? StorPoolStorageAdaptor.getVolumeNameFromPath(templStoragePoolVO.getLocalDownloadPath(), true) : StorPoolStorageAdaptor.getVolumeNameFromPath(templStoragePoolVO.getInstallPath(), true);
+ final String name = vinfo.getUuid();
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+
+ Long snapshotSize = StorPoolUtil.snapshotSize(parentName, conn);
+ if (snapshotSize == null) {
+ err = String.format("Snapshot=%s does not exist on StorPool. Will recreate it first on primary", parentName);
+ vmTemplatePoolDao.remove(templStoragePoolVO.getId());
+ }
+ if (err == null) {
+ long size = vinfo.getSize();
+ if( size < snapshotSize )
+ {
+ StorPoolUtil.spLog(String.format("provided size is too small for snapshot. Provided %d, snapshot %d. Using snapshot size", size, snapshotSize));
+ size = snapshotSize;
+ }
+ StorPoolUtil.spLog(String.format("volume size is: %d", size));
+ Long vmId = vinfo.getInstanceId();
+ SpApiResponse resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId),
+ getVcPolicyTag(vmId), "volume", vinfo.getMaxIops(), conn);
+ if (resp.getError() == null) {
+ updateStoragePool(dstData.getDataStore().getId(), vinfo.getSize());
+
+ VolumeObjectTO to = (VolumeObjectTO) vinfo.getTO();
+ to.setSize(vinfo.getSize());
+ to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
+
+ answer = new CopyCmdAnswer(to);
+ } else {
+ err = String.format("Could not create Storpool volume %s. Error: %s", name, resp.getError());
+ }
+ }
+ } else if (srcType == DataObjectType.VOLUME && dstType == DataObjectType.VOLUME) {
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.copyAsync src Data Store=%s", srcData.getDataStore().getDriver());
+ VolumeInfo dstInfo = (VolumeInfo)dstData;
+ VolumeInfo srcInfo = (VolumeInfo) srcData;
+
+ if( !(srcData.getDataStore().getDriver() instanceof StorPoolPrimaryDataStoreDriver ) ) {
+ // copy "VOLUME" to primary storage
+ String name = dstInfo.getUuid();
+ Long size = dstInfo.getSize();
+ if(size == null || size == 0)
+ size = 1L*1024*1024*1024;
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(dstData.getDataStore().getUuid(), dstData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+ Long vmId = srcInfo.getInstanceId();
+
+ SpApiResponse resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vmId), getVcPolicyTag(vmId), "volume", dstInfo.getMaxIops(), conn);
+ if (resp.getError() != null) {
+ err = String.format("Could not create Storpool volume for CS template %s. Error: %s", name, resp.getError());
+ } else {
+ //updateVolume(dstData.getId());
+ VolumeObjectTO dstTO = (VolumeObjectTO)dstData.getTO();
+ dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
+ dstTO.setSize(size);
+
+ cmd = new StorPoolDownloadVolumeCommand(srcData.getTO(), dstTO, StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value());
+
+ EndPoint ep = selector.select(srcData, dstData);
+
+ if( ep == null) {
+ StorPoolUtil.spLog("select(srcData, dstData) returned NULL. trying srcOnly");
+ ep = selector.select(srcData); // Storpool is zone
+ }
+ if (ep == null) {
+ err = "No remote endpoint to send command, check if host or ssvm is down?";
+ } else {
+ StorPoolUtil.spLog("Sending command to %s", ep.getHostAddr());
+ answer = ep.sendMessage(cmd);
+
+ if (answer != null && answer.getResult()) {
+ // successfully downloaded volume to primary storage
+ } else {
+ err = answer != null ? answer.getDetails() : "Unknown error while downloading volume. Null answer returned.";
+ }
+ }
+
+ if (err != null) {
+ SpApiResponse resp3 = StorPoolUtil.volumeDelete(name, conn);
+ if (resp3.getError() != null) {
+ log.warn(String.format("Could not clean-up Storpool volume %s. Error: %s", name, resp3.getError()));
+ }
+ }
+ }
+ } else {
+ // download volume - first copies to secondary
+ VolumeObjectTO srcTO = (VolumeObjectTO)srcData.getTO();
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc SRC path=%s ", srcTO.getPath());
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc DST canonicalName=%s ", dstData.getDataStore().getClass().getCanonicalName());
+ PrimaryDataStoreTO checkStoragePool = dstData.getTO().getDataStore() instanceof PrimaryDataStoreTO ? (PrimaryDataStoreTO)dstData.getTO().getDataStore() : null;
+ final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(srcTO.getPath(), true);
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc DST tmpSnapName=%s ,srcUUID=%s", name, srcTO.getUuid());
+
+ if (checkStoragePool != null && checkStoragePool.getPoolType().equals(StoragePoolType.SharedMountPoint)) {
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(dstData.getDataStore().getUuid(), dstData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+ String baseOn = StorPoolStorageAdaptor.getVolumeNameFromPath(srcTO.getPath(), true);
+ //uuid tag will be the same as srcData.uuid
+ String volumeName = srcData.getUuid();
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc volumeName=%s, baseOn=%s", volumeName, baseOn);
+ final SpApiResponse response = StorPoolUtil.volumeCopy(volumeName, baseOn, "volume", srcInfo.getMaxIops(), conn);
+ srcTO.setSize(srcData.getSize());
+ srcTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(response, false)));
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc DST to=%s", srcTO);
+
+ answer = new CopyCmdAnswer(srcTO);
+ } else {
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(srcData.getDataStore().getUuid(), srcData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+ final SpApiResponse resp = StorPoolUtil.volumeSnapshot(name, srcTO.getUuid(), srcInfo.getInstanceId() != null ? getVMInstanceUUID(srcInfo.getInstanceId()) : null, "temporary", null, conn);
+ String snapshotName = StorPoolUtil.getSnapshotNameFromResponse(resp, true, StorPoolUtil.GLOBAL_ID);
+ if (resp.getError() == null) {
+ srcTO.setPath(StorPoolUtil.devPath(
+ StorPoolUtil.getSnapshotNameFromResponse(resp, false, StorPoolUtil.GLOBAL_ID)));
+
+ cmd = new StorPoolCopyVolumeToSecondaryCommand(srcTO, dstData.getTO(), StorPoolHelper.getTimeout(StorPoolHelper.CopyVolumeWait, configDao), VirtualMachineManager.ExecuteInSequence.value());
+
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc command=%s ", cmd);
+
+ try {
+ Long clusterId = StorPoolHelper.findClusterIdByGlobalId(snapshotName, clusterDao);
+ EndPoint ep = clusterId != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, hostDao)) : selector.select(srcData, dstData);
+ StorPoolUtil.spLog("selector.select(srcData, dstData) ", ep);
+ if (ep == null) {
+ ep = selector.select(dstData);
+ StorPoolUtil.spLog("selector.select(srcData) ", ep);
+ }
+
+ if (ep == null) {
+ err = "No remote endpoint to send command, check if host or ssvm is down?";
+ } else {
+ answer = ep.sendMessage(cmd);
+ StorPoolUtil.spLog("Answer: details=%s, result=%s", answer.getDetails(), answer.getResult());
+ }
+ } catch (CloudRuntimeException e) {
+ err = e.getMessage();
+ }
+ } else {
+ err = String.format("Failed to create temporary StorPool snapshot while trying to download volume %s (uuid %s). Error: %s", srcTO.getName(), srcTO.getUuid(), resp.getError());
+ }
+ final SpApiResponse resp2 = StorPoolUtil.snapshotDelete(snapshotName, conn);
+ if (resp2.getError() != null) {
+ final String err2 = String.format("Failed to delete temporary StorPool snapshot %s. Error: %s", StorPoolUtil.getNameFromResponse(resp, true), resp2.getError());
+ log.error(err2);
+ StorPoolUtil.spLog(err2);
+ }
+ }
+ }
+ } else {
+ err = String.format("Unsupported copy operation from %s (type %s) to %s (type %s)", srcData.getUuid(), srcType, dstData.getUuid(), dstType);
+ }
+ } catch (Exception e) {
+ StorPoolUtil.spLog("Caught exception: %s", e.toString());
+ err = e.toString();
+ }
+
+ if (answer != null && !answer.getResult()) {
+ err = answer.getDetails();
+ }
+
+ if (err != null) {
+ StorPoolUtil.spLog("Failed due to %s", err);
+
+ log.error(err);
+ answer = new Answer(cmd, false, err);
+ }
+
+ CopyCommandResult res = new CopyCommandResult(null, answer);
+ res.setResult(err);
+ callback.complete(res);
+ }
+
+ @Override
+ public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback) {
+ String snapshotName = snapshot.getUuid();
+ VolumeInfo vinfo = snapshot.getBaseVolume();
+ String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true);
+ Long vmId = vinfo.getInstanceId();
+ if (volumeName != null) {
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.takeSnapshot volumename=%s vmInstance=%s",volumeName, vmId);
+ } else {
+ throw new UnsupportedOperationException("The path should be: " + StorPoolUtil.SP_DEV_PATH);
+ }
+
+ CreateObjectAnswer answer = null;
+ String err = null;
+
+ try {
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+
+ SpApiResponse resp = StorPoolUtil.volumeSnapshot(volumeName, snapshotName, vmId != null ? getVMInstanceUUID(vmId) : null, "snapshot", null, conn);
+
+ if (resp.getError() != null) {
+ err = String.format("Could not snapshot StorPool volume %s. Error %s", volumeName, resp.getError());
+ answer = new CreateObjectAnswer(err);
+ } else {
+ String name = StorPoolUtil.getSnapshotNameFromResponse(resp, true, StorPoolUtil.GLOBAL_ID);
+ SnapshotObjectTO snapTo = (SnapshotObjectTO)snapshot.getTO();
+ snapTo.setPath(StorPoolUtil.devPath(name.split("~")[1]));
+ answer = new CreateObjectAnswer(snapTo);
+ StorPoolHelper.addSnapshotDetails(snapshot.getId(), snapshot.getUuid(), snapTo.getPath(), _snapshotDetailsDao);
+ //add primary storage of snapshot
+ StorPoolHelper.addSnapshotDetails(snapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(snapshot.getDataStore().getId()), _snapshotDetailsDao);
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.takeSnapshot: snapshot: name=%s, uuid=%s, volume: name=%s, uuid=%s", name, snapshot.getUuid(), volumeName, vinfo.getUuid());
+ }
+ } catch (Exception e) {
+ err = String.format("Could not take volume snapshot due to %s", e.getMessage());
+ }
+
+ CreateCmdResult res = new CreateCmdResult(null, answer);
+ res.setResult(err);
+ callback.complete(res);
+ }
+
+ @Override
+ public void revertSnapshot(final SnapshotInfo snapshot, final SnapshotInfo snapshotOnPrimaryStore, final AsyncCompletionCallback<CommandResult> callback) {
+ final VolumeInfo vinfo = snapshot.getBaseVolume();
+ final String snapshotName = StorPoolHelper.getSnapshotName(snapshot.getId(), snapshot.getUuid(), snapshotDataStoreDao, _snapshotDetailsDao);
+ final String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true);
+ StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.revertSnapshot: snapshot: name=%s, uuid=%s, volume: name=%s, uuid=%s", snapshotName, snapshot.getUuid(), volumeName, vinfo.getUuid());
+ String err = null;
+
+ SpConnectionDesc conn = null;
+ try {
+ conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+ } catch (Exception e) {
+ err = String.format("Could not revert volume due to %s", e.getMessage());
+ completeResponse(err, callback);
+ return;
+ }
+
+ VolumeDetailVO detail = volumeDetailsDao.findDetail(vinfo.getId(), StorPoolUtil.SP_PROVIDER_NAME);
+ if (detail != null) {
+ //Rename volume to its global id only if it was migrated from UUID to global id
+ SpApiResponse updateVolumeResponse = StorPoolUtil.volumeUpdateRename(StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true), "", StorPoolStorageAdaptor.getVolumeNameFromPath(detail.getValue(), false), conn);
+
+ if (updateVolumeResponse.getError() != null) {
+ StorPoolUtil.spLog("Could not update StorPool's volume %s to it's globalId due to %s", StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true), updateVolumeResponse.getError().getDescr());
+ err = String.format("Could not update StorPool's volume %s to it's globalId due to %s", StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true), updateVolumeResponse.getError().getDescr());
+ completeResponse(err, callback);
+ return;
+ }
+ volumeDetailsDao.remove(detail.getId());
+ }
+
+ SpApiResponse resp = StorPoolUtil.detachAllForced(volumeName, false, conn);
+ if (resp.getError() != null) {
+ err = String.format("Could not detach StorPool volume %s due to %s", volumeName, resp.getError());
+ completeResponse(err, callback);
+ return;
+ }
+ SpApiResponse response = StorPoolUtil.volumeRevert(volumeName, snapshotName, conn);
+ if (response.getError() != null) {
+ err = String.format(
+ "Could not revert StorPool volume %s to the %s snapshot: could not create the new volume: error %s",
+ volumeName, snapshotName, response.getError());
+ completeResponse(err, callback);
+ return;
+ }
+
+ if (vinfo.getMaxIops() != null) {
+ response = StorPoolUtil.volumeUpadateTags(volumeName, null, vinfo.getMaxIops(), conn, null);
+ if (response.getError() != null) {
+ StorPoolUtil.spLog("Volume was reverted successfully but max iops could not be set due to %s", response.getError().getDescr());
+ }
+ }
+
+ final VolumeObjectTO to = (VolumeObjectTO)vinfo.getTO();
+ completeResponse(to, callback);
+ }
+
+ private String getVcPolicyTag(Long vmId) {
+ ResourceTag resourceTag = vmId != null ? _resourceTagDao.findByKey(vmId, ResourceObjectType.UserVm, StorPoolUtil.SP_VC_POLICY) : null;
+ return resourceTag != null ? resourceTag.getValue() : "";
+ }
+
+ public void handleQualityOfServiceForVolumeMigration(VolumeInfo arg0, QualityOfServiceState arg1) {
+ StorPoolUtil.spLog("handleQualityOfServiceForVolumeMigration with volume name=%s", arg0.getName());
+ }
+
+
+ public void copyAsync(DataObject srcData, DataObject destData, Host destHost,
+ AsyncCompletionCallback<CopyCommandResult> callback) {
+ copyAsync(srcData, destData, callback);
+ }
+
+ public boolean canProvideStorageStats() {
+ return false;
+ }
+
+ public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
+ return null;
+ }
+
+ public boolean canProvideVolumeStats() {
+ return false;
+ }
+
+ public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) {
+ return null;
+ }
+
+ public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
+ return false;
+ }
+
+ @Override
+ public boolean isVmInfoNeeded() {
+ return true;
+ }
+
+ @Override
+ public void provideVmInfo(long vmId, long volumeId) {
+ VolumeVO volume = volumeDao.findById(volumeId);
+ StoragePoolVO poolVO = primaryStoreDao.findById(volume.getPoolId());
+ if (poolVO != null) {
+ try {
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(poolVO.getUuid(), poolVO.getId(), storagePoolDetailsDao, primaryStoreDao);
+ String volName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true);
+ VMInstanceVO userVM = vmInstanceDao.findById(vmId);
+ SpApiResponse resp = StorPoolUtil.volumeUpadateTags(volName, volume.getInstanceId() != null ? userVM.getUuid() : "", null, conn, getVcPolicyTag(vmId));
+ if (resp.getError() != null) {
+ log.warn(String.format("Could not update VC policy tags of a volume with id [%s]", volume.getUuid()));
+ }
+ } catch (Exception e) {
+ log.warn(String.format("Could not update Virtual machine tags due to %s", e.getMessage()));
+ }
+ }
+ }
+
+ @Override
+ public boolean isVmTagsNeeded(String tagKey) {
+ return tagKey != null && tagKey.equals(StorPoolUtil.SP_VC_POLICY);
+ }
+
+ @Override
+ public void provideVmTags(long vmId, long volumeId, String tagValue) {
+ VolumeVO volume = volumeDao.findById(volumeId);
+ StoragePoolVO poolVO = primaryStoreDao.findById(volume.getPoolId());
+ if (poolVO != null) {
+ try {
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(poolVO.getUuid(), poolVO.getId(), storagePoolDetailsDao, primaryStoreDao);
+ String volName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true);
+ SpApiResponse resp = StorPoolUtil.volumeUpadateVCTags(volName, conn, getVcPolicyTag(vmId));
+ if (resp.getError() != null) {
+ log.warn(String.format("Could not update VC policy tags of a volume with id [%s]", volume.getUuid()));
+ }
+ } catch (Exception e) {
+ log.warn(String.format("Could not update Virtual machine tags due to %s", e.getMessage()));
+ }
+ }
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java
new file mode 100644
index 0000000000..8ed39327f3
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.lifecycle;
+
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.ScopeType;
+import com.cloud.storage.SnapshotVO;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.storage.VMTemplateDetailVO;
+import com.cloud.storage.VMTemplateStoragePoolVO;
+import com.cloud.storage.dao.SnapshotDao;
+import com.cloud.storage.dao.SnapshotDetailsDao;
+import com.cloud.storage.dao.SnapshotDetailsVO;
+import com.cloud.storage.dao.VMTemplateDetailsDao;
+import com.cloud.storage.dao.VMTemplatePoolDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class StorPoolPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
+ private static final Logger log = Logger.getLogger(StorPoolPrimaryDataStoreLifeCycle.class);
+
+ @Inject
+ protected PrimaryDataStoreHelper dataStoreHelper;
+ @Inject
+ protected StoragePoolAutomation storagePoolAutmation;
+ @Inject
+ private PrimaryDataStoreDao _primaryDataStoreDao;
+ @Inject
+ private ResourceManager resourceMgr;
+ @Inject
+ private StorageManager storageMgr;
+ @Inject
+ private SnapshotDao snapshotDao;
+ @Inject
+ private SnapshotDetailsDao snapshotDetailsDao;
+ @Inject
+ private VMTemplatePoolDao vmTemplatePoolDao;
+ @Inject
+ private VMTemplateDetailsDao vmTemplateDetailsDao;
+ @Inject
+ private StoragePoolDetailsDao storagePoolDetailsDao;
+
+ @Override
+ public DataStore initialize(Map<String, Object> dsInfos) {
+ StorPoolUtil.spLog("initialize:");
+ for (Map.Entry<String, Object> e: dsInfos.entrySet()) {
+ StorPoolUtil.spLog(" %s=%s", e.getKey(), e.getValue());
+ }
+ StorPoolUtil.spLog("");
+
+ log.debug("initialize");
+
+ String name = (String)dsInfos.get("name");
+ String providerName = (String)dsInfos.get("providerName");
+ Long zoneId = (Long)dsInfos.get("zoneId");
+
+ String url = (String)dsInfos.get("url");
+ SpConnectionDesc conn = new SpConnectionDesc(url);
+ if (conn.getHostPort() == null)
+ throw new IllegalArgumentException("No SP_API_HTTP");
+
+ if (conn.getAuthToken() == null)
+ throw new IllegalArgumentException("No SP_AUTH_TOKEN");
+
+ if (conn.getTemplateName() == null)
+ throw new IllegalArgumentException("No SP_TEMPLATE");
+
+ if (!StorPoolUtil.templateExists(conn)) {
+ throw new IllegalArgumentException("No such storpool template " + conn.getTemplateName() + " or credentials are invalid");
+ }
+
+ for (StoragePoolVO sp : _primaryDataStoreDao.findPoolsByProvider("StorPool")) {
+ List<StoragePoolDetailVO> spDetails = storagePoolDetailsDao.listDetails(sp.getId());
+ String host = null;
+ String template = null;
+ String authToken = null;
+ SpConnectionDesc old = null;
+ for (StoragePoolDetailVO storagePoolDetailVO : spDetails) {
+ switch (storagePoolDetailVO.getName()) {
+ case StorPoolUtil.SP_AUTH_TOKEN:
+ authToken = storagePoolDetailVO.getValue();
+ break;
+ case StorPoolUtil.SP_HOST_PORT:
+ host = storagePoolDetailVO.getValue();
+ break;
+ case StorPoolUtil.SP_TEMPLATE:
+ template = storagePoolDetailVO.getValue();
+ break;
+ default:
+ break;
+ }
+ }
+ if (host != null && template != null && authToken != null) {
+ old = new SpConnectionDesc(host, authToken, template);
+ } else {
+ old = new SpConnectionDesc(sp.getUuid());
+ }
+ if( old.getHostPort().equals(conn.getHostPort()) && old.getTemplateName().equals(conn.getTemplateName()) )
+ throw new IllegalArgumentException("StorPool cluster and template already in use by pool " + sp.getName());
+ }
+
+ Long capacityBytes = (Long)dsInfos.get("capacityBytes");
+ if (capacityBytes == null) {
+ throw new IllegalArgumentException("Capcity bytes is required");
+ }
+
+ String tags = (String)dsInfos.get("tags");
+ if (tags == null || tags.isEmpty()) {
+ tags = name;
+ }
+
+ @SuppressWarnings("unchecked")
+ Map<String, String> details = (Map<String, String>)dsInfos.get("details");
+ details.put(StorPoolUtil.SP_AUTH_TOKEN, conn.getAuthToken());
+ details.put(StorPoolUtil.SP_HOST_PORT, conn.getHostPort());
+ details.put(StorPoolUtil.SP_TEMPLATE, conn.getTemplateName());
+
+ PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
+ parameters.setName(name);
+ parameters.setUuid(conn.getTemplateName() + ";" + UUID.randomUUID().toString());
+ parameters.setZoneId(zoneId);
+ parameters.setProviderName(providerName);
+ parameters.setType(StoragePoolType.SharedMountPoint);
+ parameters.setHypervisorType(HypervisorType.KVM);
+ parameters.setManaged(false);
+ parameters.setHost("n/a");
+ parameters.setPort(0);
+ parameters.setPath(StorPoolUtil.SP_DEV_PATH);
+ parameters.setUsedBytes(0);
+ parameters.setCapacityBytes(capacityBytes);
+ parameters.setTags(tags);
+ parameters.setDetails(details);
+
+ return dataStoreHelper.createPrimaryDataStore(parameters);
+ }
+
+ @Override
+ public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
+ StorPoolUtil.spLog("updateStoragePool:");
+ for (Map.Entry<String, String> e: details.entrySet()) {
+ StorPoolUtil.spLog(" %s=%s", e.getKey(), e.getValue());
+ }
+ StorPoolUtil.spLog("");
+
+ log.debug("updateStoragePool");
+ return;
+ }
+ @Override
+ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
+ log.debug("attachHost");
+ return true;
+ }
+
+ @Override
+ public boolean attachCluster(DataStore store, ClusterScope scope) {
+ log.debug("attachCluster");
+ if (!scope.getScopeType().equals(ScopeType.ZONE)) {
+ throw new UnsupportedOperationException("Only Zone-Wide scope is supported!");
+ }
+ return true;
+ }
+
+ @Override
+ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
+ log.debug("attachZone");
+
+ if (hypervisorType != HypervisorType.KVM) {
+ throw new UnsupportedOperationException("Only KVM hypervisors supported!");
+ }
+ List<HostVO> kvmHosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
+ for (HostVO host : kvmHosts) {
+ try {
+ storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
+ } catch (Exception e) {
+ log.warn(String.format("Unable to establish a connection between host %s and pool %s due to %s", host, dataStore, e));
+ }
+ }
+ dataStoreHelper.attachZone(dataStore, hypervisorType);
+ return true;
+ }
+
+ @Override
+ public boolean maintain(DataStore dataStore) {
+ log.debug("maintain");
+
+ storagePoolAutmation.maintain(dataStore);
+ dataStoreHelper.maintain(dataStore);
+ return true;
+ }
+
+ @Override
+ public boolean cancelMaintain(DataStore store) {
+ log.debug("cancelMaintain");
+
+ dataStoreHelper.cancelMaintain(store);
+ storagePoolAutmation.cancelMaintain(store);
+ return true;
+ }
+
+ @Override
+ public boolean deleteDataStore(DataStore store) {
+ log.debug("deleteDataStore");
+ long storagePoolId = store.getId();
+
+ List<SnapshotVO> lstSnapshots = snapshotDao.listAll();
+
+ if (lstSnapshots != null) {
+ for (SnapshotVO snapshot : lstSnapshots) {
+ SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID);
+
+ // if this snapshot belongs to the storagePool that was passed in
+ if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == storagePoolId) {
+ throw new CloudRuntimeException("This primary storage cannot be deleted because it currently contains one or more snapshots.");
+ }
+ }
+ }
+
+ List<VMTemplateDetailVO> lstTemplateDetails = vmTemplateDetailsDao.listAll();
+
+ if (lstTemplateDetails != null) {
+ for (VMTemplateDetailVO vmTemplateDetailVO : lstTemplateDetails) {
+ if (vmTemplateDetailVO.getName().equals(StorPoolUtil.SP_STORAGE_POOL_ID) && Long.parseLong(vmTemplateDetailVO.getValue()) == storagePoolId) {
+ throw new CloudRuntimeException("This primary storage cannot be deleted because it currently contains one or more template snapshots.");
+ }
+ }
+ }
+
+ List<VMTemplateStoragePoolVO> lstTemplatePoolRefs = vmTemplatePoolDao.listByPoolId(storagePoolId);
+
+ SpConnectionDesc conn = null;
+ try {
+ conn = StorPoolUtil.getSpConnection(store.getUuid(), store.getId(), storagePoolDetailsDao, _primaryDataStoreDao);
+ } catch (CloudRuntimeException e) {
+ throw e;
+ }
+
+ if (lstTemplatePoolRefs != null) {
+ for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) {
+ SpApiResponse resp = StorPoolUtil.snapshotDelete(
+ StorPoolStorageAdaptor.getVolumeNameFromPath(templatePoolRef.getLocalDownloadPath(), true), conn);
+ if (resp.getError() != null) {
+ throw new CloudRuntimeException(String.format("Could not delete StorPool's snapshot from template_spool_ref table due to %s", resp.getError()));
+ }
+ vmTemplatePoolDao.remove(templatePoolRef.getId());
+ }
+ }
+ boolean isDeleted = dataStoreHelper.deletePrimaryDataStore(store);
+ if (isDeleted) {
+ List<StoragePoolDetailVO> volumesOnHosts = storagePoolDetailsDao.listDetails(storagePoolId);
+ for (StoragePoolDetailVO storagePoolDetailVO : volumesOnHosts) {
+ if (storagePoolDetailVO.getValue() != null && storagePoolDetailVO.getName().contains(StorPoolUtil.SP_VOLUME_ON_CLUSTER)) {
+ StorPoolUtil.volumeDelete(StorPoolStorageAdaptor.getVolumeNameFromPath(storagePoolDetailVO.getValue(), true), conn);
+ }
+ }
+ storagePoolDetailsDao.removeDetails(storagePoolId);
+ }
+ return isDeleted;
+ }
+
+ @Override
+ public boolean migrateToObjectStore(DataStore store) {
+ log.debug("migrateToObjectStore");
+ return false;
+ }
+
+ @Override
+ public void enableStoragePool(DataStore dataStore) {
+ log.debug("enableStoragePool");
+ dataStoreHelper.enable(dataStore);
+ }
+
+ @Override
+ public void disableStoragePool(DataStore dataStore) {
+ log.debug("disableStoragePool");
+ dataStoreHelper.disable(dataStore);
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java
new file mode 100644
index 0000000000..4a5ce4012d
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.provider;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.util.StorPoolFeaturesAndFixes;
+import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.storage.StorPoolModifyStoragePoolAnswer;
+import com.cloud.agent.api.storage.StorPoolModifyStoragePoolCommand;
+import com.cloud.agent.manager.AgentAttache;
+import com.cloud.alert.AlertManager;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.StorageConflictException;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class StorPoolHostListener implements HypervisorHostListener {
+ private static final Logger log = Logger.getLogger(StorPoolHostListener .class);
+
+ @Inject
+ private AgentManager agentMgr;
+ @Inject
+ private DataStoreManager dataStoreMgr;
+ @Inject
+ private AlertManager alertMgr;
+ @Inject
+ private StoragePoolHostDao storagePoolHostDao;
+ @Inject
+ private PrimaryDataStoreDao primaryStoreDao;
+ @Inject
+ private HostDao hostDao;
+ @Inject
+ private ClusterDao clusterDao;
+ @Inject
+ private ClusterDetailsDao clusterDetailsDao;
+ @Inject
+ private StoragePoolDetailsDao storagePoolDetailsDao;
+
+ @Override
+ public boolean hostConnect(long hostId, long poolId) throws StorageConflictException {
+ //Will update storage pool's connection details if they aren't updated in DB, before connecting pool to host
+ StoragePoolVO poolVO = primaryStoreDao.findById(poolId);
+
+ SpConnectionDesc conn = null;
+ try {
+ conn = StorPoolUtil.getSpConnection(poolVO.getUuid(), poolId, storagePoolDetailsDao, primaryStoreDao);
+ } catch (Exception e) {
+ return false;
+ }
+
+ StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
+
+ HostVO host = hostDao.findById(hostId);
+ StoragePoolDetailVO volumeOnPool = verifyVolumeIsOnCluster(poolId, conn, host.getClusterId());
+ if (volumeOnPool == null) {
+ return false;
+ }
+
+ if (host.isInMaintenanceStates()) {
+ addModifyCommandToCommandsAllowedInMaintenanceMode();
+ }
+
+ List<String> driverSupportedFeatures = StorPoolFeaturesAndFixes.getAllClassConstants();
+ List<StoragePoolDetailVO> driverFeaturesBeforeUpgrade = StorPoolHelper.listFeaturesUpdates(storagePoolDetailsDao, poolId);
+ boolean isCurrentVersionSupportsEverythingFromPrevious = StorPoolHelper.isPoolSupportsAllFunctionalityFromPreviousVersion(storagePoolDetailsDao, driverSupportedFeatures, driverFeaturesBeforeUpgrade, poolId);
+ if (!isCurrentVersionSupportsEverythingFromPrevious) {
+ String msg = "The current StorPool driver does not support all functionality from the one before upgrade to CS";
+ StorPoolUtil.spLog("Storage pool [%s] is not connected to host [%s] because the functionality after the upgrade is not full",
+ poolId, hostId);
+ alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg);
+ return false;
+ }
+
+ StorPoolModifyStoragePoolCommand cmd = new StorPoolModifyStoragePoolCommand(true, pool, volumeOnPool.getValue());
+ final Answer answer = agentMgr.easySend(hostId, cmd);
+
+ StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(pool.getId(), hostId);
+
+ if (answer == null) {
+ throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId());
+ }
+
+ if (!answer.getResult()) {
+ if (answer.getDetails() != null) {
+ if (answer.getDetails().equals("objectDoesNotExist")) {
+ StorPoolUtil.volumeDelete(StorPoolStorageAdaptor.getVolumeNameFromPath(volumeOnPool.getValue(), true), conn);
+ storagePoolDetailsDao.remove(volumeOnPool.getId());
+ return false;
+ } else if (answer.getDetails().equals("spNotFound")) {
+ return false;
+ }
+
+ }
+ String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId;
+ alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg);
+ throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() +
+ pool.getId());
+ }
+
+ StorPoolUtil.spLog("hostConnect: hostId=%d, poolId=%d", hostId, poolId);
+
+ StorPoolModifyStoragePoolAnswer mspAnswer = (StorPoolModifyStoragePoolAnswer)answer;
+ if (mspAnswer.getLocalDatastoreName() != null && pool.isShared()) {
+ String datastoreName = mspAnswer.getLocalDatastoreName();
+ List<StoragePoolVO> localStoragePools = primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName);
+ for (StoragePoolVO localStoragePool : localStoragePools) {
+ if (datastoreName.equals(localStoragePool.getPath())) {
+ log.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName());
+ throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:"
+ + localStoragePool.getName());
+ }
+ }
+ }
+
+ if (poolHost == null) {
+ poolHost = new StoragePoolHostVO(pool.getId(), hostId, mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
+ storagePoolHostDao.persist(poolHost);
+ } else {
+ poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
+ }
+
+ StorPoolHelper.setSpClusterIdIfNeeded(hostId, mspAnswer.getClusterId(), clusterDao, hostDao, clusterDetailsDao);
+
+ log.info("Connection established between storage pool " + pool + " and host " + hostId);
+ return true;
+ }
+
+ private synchronized StoragePoolDetailVO verifyVolumeIsOnCluster(long poolId, SpConnectionDesc conn, long clusterId) {
+ StoragePoolDetailVO volumeOnPool = storagePoolDetailsDao.findDetail(poolId, StorPoolUtil.SP_VOLUME_ON_CLUSTER + "-" + clusterId);
+ if (volumeOnPool == null) {
+ SpApiResponse resp = StorPoolUtil.volumeCreate(conn);
+ if (resp.getError() != null) {
+ return volumeOnPool;
+ }
+ String volumeName = StorPoolUtil.getNameFromResponse(resp, false);
+ volumeOnPool = new StoragePoolDetailVO(poolId, StorPoolUtil.SP_VOLUME_ON_CLUSTER + "-" + clusterId, StorPoolUtil.devPath(volumeName), false);
+ storagePoolDetailsDao.persist(volumeOnPool);
+ }
+ return volumeOnPool;
+ }
+
+ @Override
+ public boolean hostAdded(long hostId) {
+ return true;
+ }
+
+ @Override
+ public boolean hostDisconnected(long hostId, long poolId) {
+ StorPoolUtil.spLog("hostDisconnected: hostId=%d, poolId=%d", hostId, poolId);
+ return true;
+ }
+
+ @Override
+ public boolean hostAboutToBeRemoved(long hostId) {
+ return true;
+ }
+
+ @Override
+ public boolean hostRemoved(long hostId, long clusterId) {
+ return true;
+ }
+
+ //workaround: we need this "hack" to add our command StorPoolModifyStoragePoolCommand in AgentAttache.s_commandsAllowedInMaintenanceMode
+ //which checks the allowed commands when the host is in maintenance mode
+ private void addModifyCommandToCommandsAllowedInMaintenanceMode() {
+
+ Class<AgentAttache> cls = AgentAttache.class;
+ try {
+ Field field = cls.getDeclaredField("s_commandsAllowedInMaintenanceMode");
+ field.setAccessible(true);
+ Field modifiersField = Field.class.getDeclaredField("modifiers");
+ modifiersField.setAccessible(true);
+ modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
+ List<String> allowedCmdsInMaintenance = new ArrayList<String>(Arrays.asList(AgentAttache.s_commandsAllowedInMaintenanceMode));
+ allowedCmdsInMaintenance.add(StorPoolModifyStoragePoolCommand.class.toString());
+ String[] allowedCmdsInMaintenanceNew = new String[allowedCmdsInMaintenance.size()];
+ allowedCmdsInMaintenance.toArray(allowedCmdsInMaintenanceNew);
+ Arrays.sort(allowedCmdsInMaintenanceNew);
+ field.set(null, allowedCmdsInMaintenanceNew);
+ } catch (IllegalArgumentException | IllegalAccessException | NoSuchFieldException | SecurityException e) {
+ String err = "Could not add StorPoolModifyStoragePoolCommand to s_commandsAllowedInMaintenanceMode array due to: %s";
+ StorPoolUtil.spLog(err, e.getMessage());
+ log.warn(String.format(err, e.getMessage()));
+ }
+ }
+
+ @Override
+ public boolean hostEnabled(long hostId) {
+ return true;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolPrimaryDataStoreProvider.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolPrimaryDataStoreProvider.java
new file mode 100644
index 0000000000..892d95b8c9
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolPrimaryDataStoreProvider.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.provider;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
+import org.apache.cloudstack.storage.datastore.driver.StorPoolPrimaryDataStoreDriver;
+import org.apache.cloudstack.storage.datastore.lifecycle.StorPoolPrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
+
+import com.cloud.utils.component.ComponentContext;
+
+public class StorPoolPrimaryDataStoreProvider implements PrimaryDataStoreProvider {
+
+ protected DataStoreLifeCycle lifecycle;
+ protected DataStoreDriver driver;
+ protected HypervisorHostListener listener;
+
+ StorPoolPrimaryDataStoreProvider() {
+ }
+
+ @Override
+ public String getName() {
+ return StorPoolUtil.SP_PROVIDER_NAME;
+ }
+
+ @Override
+ public DataStoreLifeCycle getDataStoreLifeCycle() {
+ return lifecycle;
+ }
+
+ @Override
+ public DataStoreDriver getDataStoreDriver() {
+ return driver;
+ }
+
+ @Override
+ public HypervisorHostListener getHostListener() {
+ return listener;
+ }
+
+ @Override
+ public boolean configure(Map<String, Object> params) {
+ lifecycle = ComponentContext.inject(StorPoolPrimaryDataStoreLifeCycle.class);
+ driver = ComponentContext.inject(StorPoolPrimaryDataStoreDriver.class);
+ listener = ComponentContext.inject(StorPoolHostListener.class);
+ return true;
+ }
+
+ @Override
+ public Set<DataStoreProviderType> getTypes() {
+ Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
+ types.add(DataStoreProviderType.PRIMARY);
+ return types;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolFeaturesAndFixes.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolFeaturesAndFixes.java
new file mode 100644
index 0000000000..f87f36c6b4
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolFeaturesAndFixes.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.datastore.util;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.List;
+
+public class StorPoolFeaturesAndFixes {
+
+ public static List<String> getAllClassConstants() {
+ List<String> constants = new ArrayList<>();
+
+ for (Field field : StorPoolFeaturesAndFixes.class.getDeclaredFields()) {
+ int modifiers = field.getModifiers();
+ if (Modifier.isStatic(modifiers) && Modifier.isFinal(modifiers)) {
+ constants.add(field.getName());
+ }
+ }
+ return constants;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java
new file mode 100644
index 0000000000..9395f134fe
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.datastore.util;
+
+import java.io.IOException;
+import java.sql.PreparedStatement;
+import java.sql.Timestamp;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
+import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
+import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.commons.collections4.CollectionUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+import org.apache.log4j.RollingFileAppender;
+
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterDetailsVO;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.server.ResourceTag;
+import com.cloud.server.ResourceTag.ResourceObjectType;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.VMTemplateStoragePoolVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.SnapshotDetailsDao;
+import com.cloud.storage.dao.SnapshotDetailsVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.tags.dao.ResourceTagDao;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.db.QueryBuilder;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.db.SearchCriteria.Op;
+import com.cloud.utils.db.TransactionLegacy;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
+
+public class StorPoolHelper {
+
+ private static final String UPDATE_SNAPSHOT_DETAILS_VALUE = "UPDATE `cloud`.`snapshot_details` SET value=? WHERE id=?";
+ private static final String UPDATE_VOLUME_DETAILS_NAME = "UPDATE `cloud`.`volume_details` SET name=? WHERE id=?";
+ public static final String PrimaryStorageDownloadWait = "primary.storage.download.wait";
+ public static final String CopyVolumeWait = "copy.volume.wait";
+ public static final String BackupSnapshotWait = "backup.snapshot.wait";
+
+ public static void updateVolumeInfo(VolumeObjectTO volumeObjectTO, Long size, SpApiResponse resp,
+ VolumeDao volumeDao) {
+ String volumePath = StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false));
+ VolumeVO volume = volumeDao.findById(volumeObjectTO.getId());
+ if (volume != null) {
+ volumeObjectTO.setSize(size);
+ volumeObjectTO.setPath(volumePath);
+ volume.setSize(size);
+ volume.setPath(volumePath);
+ volumeDao.update(volumeObjectTO.getId(), volume);
+ }
+ }
+
+ // If volume is deleted, CloudStack removes records of snapshots created on Primary storage only in database.
+ // That's why we keep information in snapshot_details table, about all snapshots created on StorPool and we can operate with them
+ public static void addSnapshotDetails(final Long id, final String uuid, final String snapshotName,
+ SnapshotDetailsDao snapshotDetailsDao) {
+ SnapshotDetailsVO details = new SnapshotDetailsVO(id, uuid, snapshotName, false);
+ snapshotDetailsDao.persist(details);
+ }
+
+ public static String getSnapshotName(Long snapshotId, String snapshotUuid, SnapshotDataStoreDao snapshotStoreDao,
+ SnapshotDetailsDao snapshotDetailsDao) {
+
+ SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotId, snapshotUuid);
+
+ if (snapshotDetails != null) {
+ return StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDetails.getValue(), true);
+ } else {
+ List<SnapshotDataStoreVO> snapshots = snapshotStoreDao.findBySnapshotId(snapshotId);
+ if (!CollectionUtils.isEmpty(snapshots)) {
+ for (SnapshotDataStoreVO snapshotDataStoreVO : snapshots) {
+ String name = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDataStoreVO.getInstallPath(), true);
+ if (name == null) {
+ continue;
+ } else {
+ addSnapshotDetails(snapshotId, snapshotUuid, snapshotDataStoreVO.getInstallPath(), snapshotDetailsDao);
+ return name;
+ }
+ }
+ }
+ }
+ return null;
+ }
+
+ public static void updateSnapshotDetailsValue(Long id, String valueOrName, String snapshotOrVolume) {
+ TransactionLegacy txn = TransactionLegacy.currentTxn();
+ PreparedStatement pstmt = null;
+ try {
+ String sql = null;
+ if (snapshotOrVolume.equals("snapshot")) {
+ sql = UPDATE_SNAPSHOT_DETAILS_VALUE;
+ } else if (snapshotOrVolume.equals("volume")) {
+ sql = UPDATE_VOLUME_DETAILS_NAME;
+ } else {
+ StorPoolUtil.spLog("Could not update snapshot detail with id=%s", id);
+ }
+ if (sql != null) {
+ pstmt = txn.prepareAutoCloseStatement(sql);
+ pstmt.setString(1, valueOrName);
+ pstmt.setLong(2, id);
+ pstmt.executeUpdate();
+ txn.commit();
+ }
+ } catch (Exception e) {
+ txn.rollback();
+ StorPoolUtil.spLog("Could not update snapshot detail with id=%s", id);
+ }
+ }
+
+ public static String getVcPolicyTag(Long vmId, ResourceTagDao resourceTagDao) {
+ if (vmId != null) {
+ ResourceTag tag = resourceTagDao.findByKey(vmId, ResourceObjectType.UserVm, StorPoolUtil.SP_VC_POLICY);
+ if (tag != null) {
+ return tag.getValue();
+ }
+ }
+ return null;
+ }
+
+ public static String getVMInstanceUUID(Long id, VMInstanceDao vmInstanceDao) {
+ if (id != null) {
+ VMInstanceVO vmInstance = vmInstanceDao.findById(id);
+ if (vmInstance != null) {
+ return vmInstance.getUuid();
+ }
+ }
+ return null;
+ }
+
+ public static Map<String, String> addStorPoolTags(String name, String vmUuid, String csTag, String vcPolicy) {
+ Map<String, String> tags = new HashMap<>();
+ tags.put("uuid", name);
+ tags.put("cvm", vmUuid);
+ tags.put(StorPoolUtil.SP_VC_POLICY, vcPolicy);
+ if (csTag != null) {
+ tags.put("cs", csTag);
+ }
+ return tags;
+ }
+
+ // Initialize custom logger for updated volume and snapshots
+ public static void appendLogger(Logger log, String filePath, String kindOfLog) {
+ Appender appender = null;
+ PatternLayout patternLayout = new PatternLayout();
+ patternLayout.setConversionPattern("%d{YYYY-MM-dd HH:mm:ss.SSS} %m%n");
+ SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
+ Timestamp timestamp = new Timestamp(System.currentTimeMillis());
+ String path = filePath + "-" + sdf.format(timestamp) + ".log";
+ try {
+ appender = new RollingFileAppender(patternLayout, path);
+ log.setAdditivity(false);
+ log.addAppender(appender);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ if (kindOfLog.equals("update")) {
+ StorPoolUtil.spLog(
+ "You can find information about volumes and snapshots, which will be updated in Database with their globalIs in %s log file",
+ path);
+ } else if (kindOfLog.equals("abandon")) {
+ StorPoolUtil.spLog(
+ "You can find information about volumes and snapshots, for which CloudStack doesn't have information in %s log file",
+ path);
+ }
+ }
+
+ public static void setSpClusterIdIfNeeded(long hostId, String clusterId, ClusterDao clusterDao, HostDao hostDao,
+ ClusterDetailsDao clusterDetails) {
+ HostVO host = hostDao.findById(hostId);
+ if (host != null && host.getClusterId() != null) {
+ ClusterVO cluster = clusterDao.findById(host.getClusterId());
+ ClusterDetailsVO clusterDetailsVo = clusterDetails.findDetail(cluster.getId(),
+ StorPoolConfigurationManager.StorPoolClusterId.key());
+ if (clusterDetailsVo == null) {
+ clusterDetails.persist(
+ new ClusterDetailsVO(cluster.getId(), StorPoolConfigurationManager.StorPoolClusterId.key(), clusterId));
+ } else if (clusterDetailsVo.getValue() == null || !clusterDetailsVo.getValue().equals(clusterId)) {
+ clusterDetailsVo.setValue(clusterId);
+ clusterDetails.update(clusterDetailsVo.getId(), clusterDetailsVo);
+ }
+ }
+ }
+
+ public static Long findClusterIdByGlobalId(String globalId, ClusterDao clusterDao) {
+ List<ClusterVO> clusterVo = clusterDao.listAll();
+ if (clusterVo.size() == 1) {
+ StorPoolUtil.spLog("There is only one cluster, sending backup to secondary command");
+ return null;
+ }
+ for (ClusterVO clusterVO2 : clusterVo) {
+ if (globalId != null && StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterVO2.getId()) != null
+ && globalId.contains(StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterVO2.getId()).toString())) {
+ StorPoolUtil.spLog("Found cluster with id=%s for object with globalId=%s", clusterVO2.getId(),
+ globalId);
+ return clusterVO2.getId();
+ }
+ }
+ throw new CloudRuntimeException(
+ "Could not find the right clusterId. to send command. To use snapshot backup to secondary for each CloudStack cluster in its settings set the value of StorPool's cluster-id in \"sp.cluster.id\".");
+ }
+
+ public static HostVO findHostByCluster(Long clusterId, HostDao hostDao) {
+ List<HostVO> host = hostDao.findByClusterId(clusterId);
+ return host != null ? host.get(0) : null;
+ }
+
+ public static int getTimeout(String cfg, ConfigurationDao configDao) {
+ final ConfigurationVO value = configDao.findByName(cfg);
+ return NumbersUtil.parseInt(value.getValue(), Integer.parseInt(value.getDefaultValue()));
+ }
+
+ public static VMTemplateStoragePoolVO findByPoolTemplate(long poolId, long templateId) {
+ QueryBuilder<VMTemplateStoragePoolVO> sc = QueryBuilder.create(VMTemplateStoragePoolVO.class);
+ sc.and(sc.entity().getPoolId(), Op.EQ, poolId);
+ sc.and(sc.entity().getTemplateId(), Op.EQ, templateId);
+ return sc.find();
+ }
+
+ public static void updateVmStoreTemplate(Long id, DataStoreRole role, String path,
+ TemplateDataStoreDao templStoreDao) {
+ TemplateDataStoreVO templ = templStoreDao.findByTemplate(id, role);
+ templ.setLocalDownloadPath(path);
+ templStoreDao.persist(templ);
+ }
+
+ public static List<StoragePoolDetailVO> listFeaturesUpdates(StoragePoolDetailsDao storagePoolDetails, long poolId) {
+ SearchBuilder<StoragePoolDetailVO> sb = storagePoolDetails.createSearchBuilder();
+ sb.and("pool_id", sb.entity().getResourceId(), SearchCriteria.Op.EQ);
+ sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);
+ SearchCriteria<StoragePoolDetailVO> sc = sb.create();
+ sc.setParameters("pool_id", poolId);
+ sc.setParameters("name", "SP-FEATURE" + "%");
+ return storagePoolDetails.search(sc, null);
+ }
+
+ public static boolean isPoolSupportsAllFunctionalityFromPreviousVersion(StoragePoolDetailsDao storagePoolDetails, List<String> currentPluginFeatures, List<StoragePoolDetailVO> poolFeaturesBeforeUpgrade, long poolId) {
+ if (CollectionUtils.isEmpty(currentPluginFeatures) && CollectionUtils.isEmpty(poolFeaturesBeforeUpgrade)) {
+ return true;
+ }
+
+ List<String> poolDetails = poolFeaturesBeforeUpgrade.stream().map(StoragePoolDetailVO::getName).collect(Collectors.toList());
+ List<String> detailsNotContainedInCurrent = new ArrayList<>(CollectionUtils.removeAll(poolDetails, currentPluginFeatures));
+ List<String> detailsNotContainedInDataBase = new ArrayList<>(CollectionUtils.removeAll(currentPluginFeatures, poolDetails));
+ if (!CollectionUtils.isEmpty(detailsNotContainedInCurrent)) {
+ return false;
+ } else if (!CollectionUtils.isEmpty(detailsNotContainedInDataBase)) {
+ for (String features : detailsNotContainedInDataBase) {
+ StoragePoolDetailVO storageNewFeatures = new StoragePoolDetailVO(poolId, features, features, false);
+ storagePoolDetails.persist(storageNewFeatures);
+ }
+ return true;
+ }
+ return true;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java
new file mode 100644
index 0000000000..484a9b9893
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java
@@ -0,0 +1,609 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.util;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.sql.Timestamp;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.entity.ContentType;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.log4j.Logger;
+
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.script.OutputInterpreter;
+import com.cloud.utils.script.Script;
+import com.google.gson.Gson;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.gson.JsonPrimitive;
+
+public class StorPoolUtil {
+ private static final Logger log = Logger.getLogger(StorPoolUtil.class);
+
+ private static final File spLogFile = new File("/var/log/cloudstack/management/storpool-plugin.log");
+ private static PrintWriter spLogPrinterWriter = spLogFileInitialize();
+
+ private static PrintWriter spLogFileInitialize() {
+ try {
+ log.info("INITIALIZE SP-LOG_FILE");
+ if (spLogFile.exists()) {
+ final SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
+ final Timestamp timestamp = new Timestamp(System.currentTimeMillis());
+ final File spLogFileRename = new File(spLogFile + "-" + sdf.format(timestamp));
+ final boolean ret = spLogFile.renameTo(spLogFileRename);
+ if (!ret) {
+ log.warn("Unable to rename" + spLogFile + " to " + spLogFileRename);
+ } else {
+ log.debug("Renamed " + spLogFile + " to " + spLogFileRename);
+ }
+ } else {
+ spLogFile.getParentFile().mkdirs();
+ }
+ return new PrintWriter(spLogFile);
+ } catch (Exception e) {
+ log.info("INITIALIZE SP-LOG_FILE: " + e.getMessage());
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static void spLog(String fmt, Object... args) {
+ String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,ms").format(Calendar.getInstance().getTime());
+ spLogPrinterWriter.println(String.format(timeStamp + " " + fmt, args));
+ spLogPrinterWriter.flush();
+ if (spLogFile.length() > 107374182400L) {
+ spLogPrinterWriter.close();
+ spLogPrinterWriter = spLogFileInitialize();
+ }
+ }
+
+ public static final String SP_PROVIDER_NAME = "StorPool";
+ public static final String SP_DEV_PATH = "/dev/storpool-byid/";
+ public static final String SP_OLD_PATH = "/dev/storpool/";
+ public static final String SP_VC_POLICY = "vc-policy";
+ public static final String GLOBAL_ID = "snapshotGlobalId";
+ public static final String UPDATED_DETAIL = "renamed";
+ public static final String SP_STORAGE_POOL_ID = "spStoragePoolId";
+
+ public static final String SP_HOST_PORT = "SP_API_HTTP_HOST";
+
+ public static final String SP_TEMPLATE = "SP_TEMPLATE";
+
+ public static final String SP_AUTH_TOKEN = "SP_AUTH_TOKEN";
+
+ public static final String SP_VOLUME_ON_CLUSTER = "SP_VOLUME_ON_CLUSTER";
+
+ public static enum StorpoolRights {
+ RO("ro"), RW("rw"), DETACH("detach");
+
+ private final String name;
+
+ private StorpoolRights(String name) {
+ this.name = name;
+ }
+
+ public String toString() {
+ return name;
+ }
+ }
+
+ public static final class SpApiError {
+ private String name;
+ private String descr;
+
+ public SpApiError() {
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public String getDescr() {
+ return this.descr;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public void setDescr(String descr) {
+ this.descr = descr;
+ }
+
+ public String toString() {
+ return String.format("%s: %s", name, descr);
+ }
+ }
+
+ public static class SpConnectionDesc {
+ private String hostPort;
+ private String authToken;
+ private String templateName;
+
+ public SpConnectionDesc(String url) {
+ String[] urlSplit = url.split(";");
+ if (urlSplit.length == 1 && !urlSplit[0].contains("=")) {
+ this.templateName = url;
+
+ Script sc = new Script("storpool_confget", 0, log);
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+
+ final String err = sc.execute(parser);
+ if (err != null) {
+ final String errMsg = String.format("Could not execute storpool_confget. Error: %s", err);
+ log.warn(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+
+ String SP_API_HOST = null;
+ String SP_API_PORT = null;
+
+ for (String line : parser.getLines().split("\n")) {
+ String[] toks = line.split("=");
+ if (toks.length != 2) {
+ continue;
+ }
+
+ switch (toks[0]) {
+ case "SP_API_HTTP_HOST":
+ SP_API_HOST = toks[1];
+ break;
+
+ case "SP_API_HTTP_PORT":
+ SP_API_PORT = toks[1];
+ break;
+
+ case "SP_AUTH_TOKEN":
+ this.authToken = toks[1];
+ break;
+ }
+ }
+
+ if (SP_API_HOST == null)
+ throw new CloudRuntimeException("Invalid StorPool config. Missing SP_API_HTTP_HOST");
+ if (SP_API_PORT == null)
+ throw new CloudRuntimeException("Invalid StorPool config. Missing SP_API_HTTP_PORT");
+ if (this.authToken == null)
+ throw new CloudRuntimeException("Invalid StorPool config. Missing SP_AUTH_TOKEN");
+
+ this.hostPort = SP_API_HOST + ":" + SP_API_PORT;
+ } else {
+ for (String kv : urlSplit) {
+ String[] toks = kv.split("=");
+ if (toks.length != 2)
+ continue;
+ switch (toks[0]) {
+ case "SP_API_HTTP":
+ this.hostPort = toks[1];
+ break;
+
+ case "SP_AUTH_TOKEN":
+ this.authToken = toks[1];
+ break;
+
+ case "SP_TEMPLATE":
+ this.templateName = toks[1];
+ break;
+ }
+ }
+ }
+ }
+
+ public SpConnectionDesc(String host, String authToken2, String templateName2) {
+ this.hostPort = host;
+ this.authToken = authToken2;
+ this.templateName = templateName2;
+ }
+
+ public String getHostPort() {
+ return this.hostPort;
+ }
+
+ public String getAuthToken() {
+ return this.authToken;
+ }
+
+ public String getTemplateName() {
+ return this.templateName;
+ }
+ }
+
+ public static SpConnectionDesc getSpConnection(String url, long poolId, StoragePoolDetailsDao poolDetails,
+ PrimaryDataStoreDao storagePool) {
+ boolean isAlternateEndpointEnabled = StorPoolConfigurationManager.AlternativeEndPointEnabled.valueIn(poolId);
+ if (isAlternateEndpointEnabled) {
+ String alternateEndpoint = StorPoolConfigurationManager.AlternativeEndpoint.valueIn(poolId);
+ if (StringUtils.isNotEmpty(alternateEndpoint)) {
+ return new SpConnectionDesc(alternateEndpoint);
+ } else {
+ throw new CloudRuntimeException(String.format("Using an alternative endpoint of StorPool primary storage with id [%s] is enabled but no endpoint URL is provided", poolId));
+ }
+ }
+ List<StoragePoolDetailVO> details = poolDetails.listDetails(poolId);
+ String host = null;
+ String authToken = null;
+ String templateName = null;
+ for (StoragePoolDetailVO storagePoolDetailVO : details) {
+ switch (storagePoolDetailVO.getName()) {
+ case SP_HOST_PORT:
+ host = storagePoolDetailVO.getValue();
+ break;
+ case SP_AUTH_TOKEN:
+ authToken = storagePoolDetailVO.getValue();
+ break;
+ case SP_TEMPLATE:
+ templateName = storagePoolDetailVO.getValue();
+ break;
+ }
+ }
+ if (host != null && authToken != null && templateName != null) {
+ return new SpConnectionDesc(host, authToken, templateName);
+ } else {
+ return updateStorageAndStorageDetails(url, poolId, poolDetails, storagePool);
+ }
+ }
+
+ private static SpConnectionDesc updateStorageAndStorageDetails(String url, long poolId,
+ StoragePoolDetailsDao poolDetails, PrimaryDataStoreDao storagePool) {
+ SpConnectionDesc conn = new SpConnectionDesc(url);
+ poolDetails.persist(new StoragePoolDetailVO(poolId, SP_HOST_PORT, conn.getHostPort(), false));
+ poolDetails.persist(new StoragePoolDetailVO(poolId, SP_AUTH_TOKEN, conn.getAuthToken(), false));
+ poolDetails.persist(new StoragePoolDetailVO(poolId, SP_TEMPLATE, conn.getTemplateName(), false));
+ StoragePoolVO pool = storagePool.findById(poolId);
+ pool.setUuid(conn.getTemplateName() + ";" + UUID.randomUUID().toString());
+ storagePool.update(poolId, pool);
+ StorPoolUtil.spLog(
+ "Storage pool with id=%s and template's name=%s was updated and its connection details are hidden from UI.",
+ pool.getId(), conn.getTemplateName());
+ return conn;
+ }
+
+ public static class SpApiResponse {
+ private SpApiError error;
+ public JsonElement fullJson;
+
+ public SpApiResponse() {
+ }
+
+ public SpApiError getError() {
+ return this.error;
+ }
+
+ public void setError(SpApiError error) {
+ this.error = error;
+ }
+ }
+
+ public static String devPath(final String name) {
+ return String.format("%s%s", SP_DEV_PATH, name);
+ }
+
+ private static SpApiResponse spApiRequest(HttpRequestBase req, String query, SpConnectionDesc conn) {
+
+ if (conn == null)
+ conn = new SpConnectionDesc("");
+
+ if (conn.getHostPort() == null) {
+ throw new CloudRuntimeException("Invalid StorPool config. Missing SP_API_HTTP_HOST");
+ }
+
+ if (conn.getAuthToken() == null) {
+ throw new CloudRuntimeException("Invalid StorPool config. Missing SP_AUTH_TOKEN");
+ }
+
+ try (CloseableHttpClient httpclient = HttpClientBuilder.create().build()) {
+ final String qry = String.format("http://%s/ctrl/1.0/%s", conn.getHostPort(), query);
+ final URI uri = new URI(qry);
+
+ req.setURI(uri);
+ req.addHeader("Authorization", String.format("Storpool v1:%s", conn.getAuthToken()));
+
+ final HttpResponse resp = httpclient.execute(req);
+
+ Gson gson = new Gson();
+ BufferedReader br = new BufferedReader(new InputStreamReader(resp.getEntity().getContent()));
+
+ JsonElement el = new JsonParser().parse(br);
+
+ SpApiResponse apiResp = gson.fromJson(el, SpApiResponse.class);
+ apiResp.fullJson = el;
+ return apiResp;
+ } catch (UnsupportedEncodingException ex) {
+ throw new CloudRuntimeException(ex.getMessage());
+ } catch (ClientProtocolException ex) {
+ throw new CloudRuntimeException(ex.getMessage());
+ } catch (IOException ex) {
+ throw new CloudRuntimeException(ex.getMessage());
+ } catch (URISyntaxException ex) {
+ throw new CloudRuntimeException(ex.getMessage());
+ }
+ }
+
+ private static SpApiResponse GET(String query, SpConnectionDesc conn) {
+ return spApiRequest(new HttpGet(), query, conn);
+ }
+
+ private static SpApiResponse POST(String query, Object json, SpConnectionDesc conn) {
+ HttpPost req = new HttpPost();
+ if (json != null) {
+ Gson gson = new Gson();
+ String js = gson.toJson(json);
+ StringEntity input = new StringEntity(js, ContentType.APPLICATION_JSON);
+ log.info("Request:" + js);
+ req.setEntity(input);
+ }
+
+ return spApiRequest(req, query, conn);
+ }
+
+ public static boolean templateExists(SpConnectionDesc conn) {
+ SpApiResponse resp = GET("VolumeTemplateDescribe/" + conn.getTemplateName(), conn);
+ return resp.getError() == null ? true : objectExists(resp.getError());
+ }
+
+ public static boolean snapshotExists(final String name, SpConnectionDesc conn) {
+ SpApiResponse resp = GET("MultiCluster/Snapshot/" + name, conn);
+ return resp.getError() == null ? true : objectExists(resp.getError());
+ }
+
+ public static JsonArray snapshotsList(SpConnectionDesc conn) {
+ SpApiResponse resp = GET("MultiCluster/SnapshotsList", conn);
+ JsonObject obj = resp.fullJson.getAsJsonObject();
+ JsonArray data = obj.getAsJsonArray("data");
+ return data;
+ }
+
+ public static JsonArray volumesList(SpConnectionDesc conn) {
+ SpApiResponse resp = GET("MultiCluster/VolumesList", conn);
+ JsonObject obj = resp.fullJson.getAsJsonObject();
+ JsonArray data = obj.getAsJsonArray("data");
+ return data;
+ }
+
+ private static boolean objectExists(SpApiError err) {
+ if (!err.getName().equals("objectDoesNotExist")) {
+ throw new CloudRuntimeException(err.getDescr());
+ }
+ return false;
+ }
+
+ public static Long snapshotSize(final String name, SpConnectionDesc conn) {
+ SpApiResponse resp = GET("MultiCluster/Snapshot/" + name, conn);
+ JsonObject obj = resp.fullJson.getAsJsonObject();
+
+ if (resp.getError() != null && !objectExists(resp.getError())) {
+ return null;
+ }
+ JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject();
+ return data.getAsJsonPrimitive("size").getAsLong();
+ }
+
+ public static String getSnapshotClusterID(String name, SpConnectionDesc conn) {
+ SpApiResponse resp = GET("MultiCluster/Snapshot/" + name, conn);
+ JsonObject obj = resp.fullJson.getAsJsonObject();
+
+ JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject();
+ JsonPrimitive clusterId = data.getAsJsonPrimitive("clusterId");
+ return clusterId != null ? clusterId.getAsString() : null;
+ }
+
+ public static String getVolumeClusterID(String name, SpConnectionDesc conn) {
+ SpApiResponse resp = GET("MultiCluster/Volume/" + name, conn);
+ JsonObject obj = resp.fullJson.getAsJsonObject();
+
+ JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject();
+ JsonPrimitive clusterId = data.getAsJsonPrimitive("clusterId");
+ return clusterId != null ? clusterId.getAsString() : null;
+ }
+
+ public static SpApiResponse volumeCreate(final String name, final String parentName, final Long size, String vmUuid,
+ String vcPolicy, String csTag, Long iops, SpConnectionDesc conn) {
+ Map<String, Object> json = new LinkedHashMap<>();
+ json.put("name", "");
+ json.put("iops", iops);
+ json.put("parent", parentName);
+ json.put("size", size);
+ json.put("template", conn.getTemplateName());
+ Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, vmUuid, csTag, vcPolicy);
+ json.put("tags", tags);
+ return POST("MultiCluster/VolumeCreate", json, conn);
+ }
+
+ public static SpApiResponse volumeCreate(SpConnectionDesc conn) {
+ Map<String, Object> json = new LinkedHashMap<>();
+ json.put("name", "");
+ json.put("size", 512);
+ json.put("template", conn.getTemplateName());
+ Map<String, String> tags = new HashMap<>();
+ tags.put("cs", "check-volume-is-on-host");
+ json.put("tags", tags);
+ return POST("MultiCluster/VolumeCreate", json, conn);
+ }
+
+ public static SpApiResponse volumeCopy(final String name, final String baseOn, String csTag, Long iops,
+ SpConnectionDesc conn) {
+ Map<String, Object> json = new HashMap<>();
+ json.put("baseOn", baseOn);
+ if (iops != null) {
+ json.put("iops", iops);
+ }
+ json.put("template", conn.getTemplateName());
+ Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, null, csTag, null);
+ json.put("tags", tags);
+ return POST("MultiCluster/VolumeCreate", json, conn);
+ }
+
+ public static SpApiResponse volumeUpdateRename(final String name, String newName, String uuid,
+ SpConnectionDesc conn) {
+ Map<String, Object> json = new HashMap<>();
+ json.put("rename", newName);
+ Map<String, String> tags = new HashMap<>();
+ tags.put("uuid", uuid);
+ json.put("tags", tags);
+
+ return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
+ }
+
+ public static SpApiResponse volumeUpdate(final String name, final Long newSize, final Boolean shrinkOk, Long iops,
+ SpConnectionDesc conn) {
+ Map<String, Object> json = new HashMap<>();
+ json.put("iops", iops);
+ json.put("size", newSize);
+ json.put("shrinkOk", shrinkOk);
+
+ return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
+ }
+
+ public static SpApiResponse volumeUpadateTags(final String name, final String uuid, Long iops,
+ SpConnectionDesc conn, String vcPolicy) {
+ Map<String, Object> json = new HashMap<>();
+ Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, uuid, null, vcPolicy);
+ json.put("iops", iops);
+ json.put("tags", tags);
+ return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
+ }
+
+ public static SpApiResponse volumeUpadateCvmTags(final String name, final String uuid, SpConnectionDesc conn) {
+ Map<String, Object> json = new HashMap<>();
+ Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, uuid, null, null);
+ json.put("tags", tags);
+ return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
+ }
+
+ public static SpApiResponse volumeUpadateVCTags(final String name, SpConnectionDesc conn, String vcPolicy) {
+ Map<String, Object> json = new HashMap<>();
+ Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, null, null, vcPolicy);
+ json.put("tags", tags);
+ return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
+ }
+
+ public static SpApiResponse volumeSnapshot(final String volumeName, final String snapshotName, String vmUuid,
+ String csTag, String vcPolicy, SpConnectionDesc conn) {
+ Map<String, Object> json = new HashMap<>();
+ Map<String, String> tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, vcPolicy);
+ json.put("name", "");
+ json.put("tags", tags);
+
+ return POST("MultiCluster/VolumeSnapshot/" + volumeName, json, conn);
+ }
+
+ public static SpApiResponse volumesGroupSnapshot(final List<VolumeObjectTO> volumeTOs, final String vmUuid,
+ final String snapshotName, String csTag, SpConnectionDesc conn) {
+ Map<String, Object> json = new LinkedHashMap<>();
+ Map<String, String> tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, null);
+ List<Map<String, Object>> volumes = new ArrayList<>();
+ for (VolumeObjectTO volumeTO : volumeTOs) {
+ Map<String, Object> vol = new LinkedHashMap<>();
+ String name = StorPoolStorageAdaptor.getVolumeNameFromPath(volumeTO.getPath(), true);
+ vol.put("name", "");
+ vol.put("volume", name);
+ volumes.add(vol);
+ }
+ json.put("tags", tags);
+ json.put("volumes", volumes);
+ log.info("json:" + json);
+ return POST("MultiCluster/VolumesGroupSnapshot", json, conn);
+ }
+
+ public static SpApiResponse volumeRevert(final String name, final String snapshotName, SpConnectionDesc conn) {
+ Map<String, Object> json = new HashMap<>();
+ json.put("toSnapshot", snapshotName);
+ return POST("MultiCluster/VolumeRevert/" + name, json, conn);
+ }
+
+ public static SpApiResponse volumeFreeze(final String volumeName, SpConnectionDesc conn) {
+ return POST("MultiCluster/VolumeFreeze/" + volumeName, null, conn);
+ }
+
+ public static SpApiResponse volumeAcquire(final String volumeName, SpConnectionDesc conn) {
+ Map<String, Object> json = new HashMap<>();
+ json.put("onRemoteAttached", "detachForce");
+ return POST("MultiCluster/VolumeAcquire/" + volumeName, json, conn);
+ }
+
+ public static SpApiResponse volumeDelete(final String name, SpConnectionDesc conn) {
+ Map<String, Object> json = new HashMap<>();
+ json.put("onAttached", "detachForce");
+ return POST("MultiCluster/VolumeDelete/" + name, json, conn);
+ }
+
+ public static SpApiResponse snapshotDelete(final String name, SpConnectionDesc conn) {
+ SpApiResponse resp = detachAllForced(name, true, conn);
+ return resp.getError() == null ? POST("MultiCluster/SnapshotDelete/" + name, null, conn) : resp;
+ }
+
+ public static SpApiResponse detachAllForced(final String name, final boolean snapshot, SpConnectionDesc conn) {
+ final String type = snapshot ? "snapshot" : "volume";
+ List<Map<String, Object>> json = new ArrayList<>();
+ Map<String, Object> reassignDesc = new HashMap<>();
+ reassignDesc.put(type, name);
+ reassignDesc.put("detach", "all");
+ reassignDesc.put("force", true);
+ json.add(reassignDesc);
+
+ return POST("MultiCluster/VolumesReassign", json, conn);
+ }
+
+ public static String getSnapshotNameFromResponse(SpApiResponse resp, boolean tildeNeeded, String globalIdOrRemote) {
+ JsonObject obj = resp.fullJson.getAsJsonObject();
+ JsonPrimitive data = obj.getAsJsonObject("data").getAsJsonPrimitive(globalIdOrRemote);
+ String name = data != null ? data.getAsString() : null;
+ name = name != null ? !tildeNeeded ? name : "~" + name : name;
+ return name;
+ }
+
+ public static String getNameFromResponse(SpApiResponse resp, boolean tildeNeeded) {
+ JsonObject obj = resp.fullJson.getAsJsonObject();
+ JsonPrimitive data = obj.getAsJsonObject("data").getAsJsonPrimitive("name");
+ String name = data != null ? data.getAsString() : null;
+ name = name != null ? name.startsWith("~") && !tildeNeeded ? name.split("~")[1] : name : name;
+ return name;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java
new file mode 100644
index 0000000000..1608680e41
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java
@@ -0,0 +1,575 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.motion;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
+import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
+import org.apache.cloudstack.framework.async.AsyncCallFuture;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.storage.RemoteHostEndPoint;
+import org.apache.cloudstack.storage.command.CopyCmdAnswer;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
+import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
+import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
+import org.apache.cloudstack.storage.to.SnapshotObjectTO;
+import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import org.apache.commons.collections.MapUtils;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.Command;
+import com.cloud.agent.api.MigrateAnswer;
+import com.cloud.agent.api.MigrateCommand;
+import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
+import com.cloud.agent.api.ModifyTargetsAnswer;
+import com.cloud.agent.api.ModifyTargetsCommand;
+import com.cloud.agent.api.PrepareForMigrationCommand;
+import com.cloud.agent.api.storage.StorPoolBackupTemplateFromSnapshotCommand;
+import com.cloud.agent.api.to.DataObjectType;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.AgentUnavailableException;
+import com.cloud.exception.OperationTimedoutException;
+import com.cloud.host.Host;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.VMTemplateDetailVO;
+import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.GuestOSCategoryDao;
+import com.cloud.storage.dao.GuestOSDao;
+import com.cloud.storage.dao.SnapshotDao;
+import com.cloud.storage.dao.SnapshotDetailsDao;
+import com.cloud.storage.dao.SnapshotDetailsVO;
+import com.cloud.storage.dao.VMTemplateDetailsDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachineManager;
+import com.cloud.vm.dao.VMInstanceDao;
+
+@Component
+public class StorPoolDataMotionStrategy implements DataMotionStrategy {
+ private static final Logger log = Logger.getLogger(StorPoolDataMotionStrategy.class);
+
+ @Inject
+ private SnapshotDataFactory _snapshotDataFactory;
+ @Inject
+ private DataStoreManager _dataStore;
+ @Inject
+ private ConfigurationDao _configDao;
+ @Inject
+ private EndPointSelector _selector;
+ @Inject
+ private TemplateDataStoreDao _templStoreDao;
+ @Inject
+ private ClusterDao _clusterDao;
+ @Inject
+ private HostDao _hostDao;
+ @Inject
+ private SnapshotDetailsDao _snapshotDetailsDao;
+ @Inject
+ private VMTemplateDetailsDao _vmTemplateDetailsDao;
+ @Inject
+ private SnapshotDataStoreDao _snapshotStoreDao;
+ @Inject
+ private StoragePoolDetailsDao _storagePoolDetails;
+ @Inject
+ private PrimaryDataStoreDao _storagePool;
+ @Inject
+ private VolumeDao _volumeDao;
+ @Inject
+ private VolumeDataFactory _volumeDataFactory;
+ @Inject
+ private VMInstanceDao _vmDao;
+ @Inject
+ private GuestOSDao _guestOsDao;
+ @Inject
+ private VolumeService _volumeService;
+ @Inject
+ private GuestOSCategoryDao _guestOsCategoryDao;
+ @Inject
+ private SnapshotDao _snapshotDao;
+ @Inject
+ private AgentManager _agentManager;
+ @Inject
+ private PrimaryDataStoreDao _storagePoolDao;
+
+ @Override
+ public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
+ DataObjectType srcType = srcData.getType();
+ DataObjectType dstType = destData.getType();
+ if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.TEMPLATE
+ && StorPoolConfigurationManager.BypassSecondaryStorage.value()) {
+ SnapshotInfo sinfo = (SnapshotInfo) srcData;
+ VolumeInfo volume = sinfo.getBaseVolume();
+ StoragePoolVO storagePool = _storagePool.findById(volume.getPoolId());
+ if (!storagePool.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+ String snapshotName = StorPoolHelper.getSnapshotName(sinfo.getId(), sinfo.getUuid(), _snapshotStoreDao,
+ _snapshotDetailsDao);
+ StorPoolUtil.spLog("StorPoolDataMotionStrategy.canHandle snapshot name=%s", snapshotName);
+ if (snapshotName != null) {
+ return StrategyPriority.HIGHEST;
+ }
+ }
+ return StrategyPriority.CANT_HANDLE;
+ }
+
+ @Override
+ public void copyAsync(DataObject srcData, DataObject destData, Host destHost,
+ AsyncCompletionCallback<CopyCommandResult> callback) {
+ SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData.getTO();
+ TemplateObjectTO template = (TemplateObjectTO) destData.getTO();
+ DataStore store = _dataStore.getDataStore(snapshot.getVolume().getDataStore().getUuid(),
+ snapshot.getVolume().getDataStore().getRole());
+ SnapshotInfo sInfo = _snapshotDataFactory.getSnapshot(snapshot.getId(), store);
+
+ VolumeInfo vInfo = sInfo.getBaseVolume();
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(vInfo.getDataStore().getUuid(),
+ vInfo.getDataStore().getId(), _storagePoolDetails, _storagePool);
+ String name = template.getUuid();
+ String volumeName = "";
+
+ String parentName = StorPoolHelper.getSnapshotName(sInfo.getId(), sInfo.getUuid(), _snapshotStoreDao,
+ _snapshotDetailsDao);
+ // TODO volume tags cs - template
+ SpApiResponse res = StorPoolUtil.volumeCreate(name, parentName, sInfo.getSize(), null, "no", "template", null,
+ conn);
+ CopyCmdAnswer answer = null;
+ String err = null;
+ if (res.getError() != null) {
+ log.debug(String.format("Could not create volume from snapshot with ID=%s", snapshot.getId()));
+ StorPoolUtil.spLog("Volume create failed with error=%s", res.getError().getDescr());
+ err = res.getError().getDescr();
+ } else {
+ volumeName = StorPoolUtil.getNameFromResponse(res, true);
+ SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(sInfo.getId(), sInfo.getUuid());
+
+ snapshot.setPath(snapshotDetails.getValue());
+ Command backupSnapshot = new StorPoolBackupTemplateFromSnapshotCommand(snapshot, template,
+ StorPoolHelper.getTimeout(StorPoolHelper.BackupSnapshotWait, _configDao),
+ VirtualMachineManager.ExecuteInSequence.value());
+
+ try {
+ // final String snapName =
+ // StorpoolStorageAdaptor.getVolumeNameFromPath(((SnapshotInfo)
+ // srcData).getPath(), true);
+ Long clusterId = StorPoolHelper.findClusterIdByGlobalId(parentName, _clusterDao);
+ EndPoint ep2 = clusterId != null
+ ? RemoteHostEndPoint
+ .getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, _hostDao))
+ : _selector.select(sInfo, destData);
+ if (ep2 == null) {
+ err = "No remote endpoint to send command, check if host or ssvm is down?";
+ } else {
+ answer = (CopyCmdAnswer) ep2.sendMessage(backupSnapshot);
+ if (answer != null && answer.getResult()) {
+ SpApiResponse resSnapshot = StorPoolUtil.volumeFreeze(volumeName, conn);
+ if (resSnapshot.getError() != null) {
+ log.debug(String.format("Could not snapshot volume with ID=%s", snapshot.getId()));
+ StorPoolUtil.spLog("Volume freeze failed with error=%s", resSnapshot.getError().getDescr());
+ err = resSnapshot.getError().getDescr();
+ StorPoolUtil.volumeDelete(volumeName, conn);
+ } else {
+ StorPoolHelper.updateVmStoreTemplate(template.getId(), template.getDataStore().getRole(),
+ StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(res, false)), _templStoreDao);
+ }
+ } else {
+ err = "Could not copy template to secondary " + answer.getResult();
+ StorPoolUtil.volumeDelete(StorPoolUtil.getNameFromResponse(res, true), conn);
+ }
+ }
+ } catch (CloudRuntimeException e) {
+ err = e.getMessage();
+ }
+ }
+ _vmTemplateDetailsDao.persist(new VMTemplateDetailVO(template.getId(), StorPoolUtil.SP_STORAGE_POOL_ID,
+ String.valueOf(vInfo.getDataStore().getId()), false));
+ StorPoolUtil.spLog("StorPoolDataMotionStrategy.copyAsync Creating snapshot=%s for StorPool template=%s",
+ volumeName, conn.getTemplateName());
+ final CopyCommandResult cmd = new CopyCommandResult(null, answer);
+ cmd.setResult(err);
+ callback.complete(cmd);
+ }
+
+ @Override
+ public StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
+ return canHandleLiveMigrationOnStorPool(volumeMap, srcHost, destHost);
+ }
+
+ final StrategyPriority canHandleLiveMigrationOnStorPool(Map<VolumeInfo, DataStore> volumeMap, Host srcHost,
+ Host destHost) {
+ if (srcHost.getId() != destHost.getId() && isDestinationStorPoolPrimaryStorage(volumeMap)) {
+ return StrategyPriority.HIGHEST;
+ }
+ return StrategyPriority.CANT_HANDLE;
+ }
+
+ private boolean isDestinationStorPoolPrimaryStorage(Map<VolumeInfo, DataStore> volumeMap) {
+ if (MapUtils.isNotEmpty(volumeMap)) {
+ for (DataStore dataStore : volumeMap.values()) {
+ StoragePoolVO storagePoolVO = _storagePool.findById(dataStore.getId());
+ if (storagePoolVO == null
+ || !storagePoolVO.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) {
+ return false;
+ }
+ }
+ } else {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public void copyAsync(Map<VolumeInfo, DataStore> volumeDataStoreMap, VirtualMachineTO vmTO, Host srcHost,
+ Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
+ String errMsg = null;
+ String newVolName = null;
+ SpConnectionDesc conn = null;
+
+ try {
+ if (srcHost.getHypervisorType() != HypervisorType.KVM) {
+ throw new CloudRuntimeException(String.format("Invalid hypervisor type [%s]. Only KVM supported", srcHost.getHypervisorType()));
+ }
+
+ VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId());
+ vmTO.setState(vmInstance.getState());
+ List<MigrateDiskInfo> migrateDiskInfoList = new ArrayList<MigrateDiskInfo>();
+
+ Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage = new HashMap<>();
+ Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo = new HashMap<>();
+
+ for (Map.Entry<VolumeInfo, DataStore> entry : volumeDataStoreMap.entrySet()) {
+ VolumeInfo srcVolumeInfo = entry.getKey();
+ DataStore destDataStore = entry.getValue();
+
+ VolumeVO srcVolume = _volumeDao.findById(srcVolumeInfo.getId());
+ StoragePoolVO destStoragePool = _storagePool.findById(destDataStore.getId());
+
+ VolumeVO destVolume = duplicateVolumeOnAnotherStorage(srcVolume, destStoragePool);
+
+ VolumeInfo destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
+
+ destVolumeInfo.processEvent(Event.MigrationCopyRequested);
+ destVolumeInfo.processEvent(Event.MigrationCopySucceeded);
+ destVolumeInfo.processEvent(Event.MigrationRequested);
+
+ conn = StorPoolUtil.getSpConnection(destDataStore.getUuid(), destDataStore.getId(), _storagePoolDetails,
+ _storagePool);
+ SpApiResponse resp = StorPoolUtil.volumeCreate(srcVolume.getUuid(), null, srcVolume.getSize(),
+ vmTO.getUuid(), null, "volume", srcVolume.getMaxIops(), conn);
+
+ if (resp.getError() == null) {
+ newVolName = StorPoolUtil.getNameFromResponse(resp, true);
+ }
+
+ String volumeName = StorPoolUtil.getNameFromResponse(resp, false);
+ destVolume.setPath(StorPoolUtil.devPath(volumeName));
+ _volumeDao.update(destVolume.getId(), destVolume);
+ destVolume = _volumeDao.findById(destVolume.getId());
+
+ destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
+
+ String destPath = generateDestPath(destHost, destStoragePool, destVolumeInfo);
+
+ MigrateCommand.MigrateDiskInfo migrateDiskInfo = configureMigrateDiskInfo(srcVolumeInfo, destPath);
+ migrateDiskInfoList.add(migrateDiskInfo);
+
+ migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo);
+
+ srcVolumeInfoToDestVolumeInfo.put(srcVolumeInfo, destVolumeInfo);
+ }
+
+ PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
+
+ try {
+ Answer pfma = _agentManager.send(destHost.getId(), pfmc);
+
+ if (pfma == null || !pfma.getResult()) {
+ String details = pfma != null ? pfma.getDetails() : "null answer returned";
+ errMsg = String.format("Unable to prepare for migration due to the following: %s", details);
+
+ throw new AgentUnavailableException(errMsg, destHost.getId());
+ }
+ } catch (final OperationTimedoutException e) {
+ errMsg = String.format("Operation timed out due to %s", e.getMessage());
+ throw new AgentUnavailableException(errMsg, destHost.getId());
+ }
+
+ VMInstanceVO vm = _vmDao.findById(vmTO.getId());
+ boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId())
+ .getName().equalsIgnoreCase("Windows");
+
+ MigrateCommand migrateCommand = new MigrateCommand(vmTO.getName(),
+ destHost.getPrivateIpAddress(), isWindows, vmTO, true);
+ migrateCommand.setWait(StorageManager.KvmStorageOnlineMigrationWait.value());
+ migrateCommand.setMigrateStorage(migrateStorage);
+ migrateCommand.setMigrateStorageManaged(true);
+ migrateCommand.setMigrateDiskInfoList(migrateDiskInfoList);
+
+ boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
+
+ migrateCommand.setAutoConvergence(kvmAutoConvergence);
+
+ MigrateAnswer migrateAnswer = (MigrateAnswer) _agentManager.send(srcHost.getId(), migrateCommand);
+
+ boolean success = migrateAnswer != null && migrateAnswer.getResult();
+
+ handlePostMigration(success, srcVolumeInfoToDestVolumeInfo, vmTO, destHost);
+
+ if (migrateAnswer == null) {
+ throw new CloudRuntimeException("Unable to get an answer to the migrate command");
+ }
+
+ if (!migrateAnswer.getResult()) {
+ errMsg = migrateAnswer.getDetails();
+
+ throw new CloudRuntimeException(errMsg);
+ }
+ } catch (AgentUnavailableException | OperationTimedoutException | CloudRuntimeException ex) {
+
+ errMsg = String.format(
+ "Copy volume(s) of VM [%s] to storage(s) [%s] and VM to host [%s] failed in StorPoolDataMotionStrategy.copyAsync. Error message: [%s].",
+ vmTO.getId(), srcHost.getId(), destHost.getId(), ex.getMessage());
+ log.error(errMsg, ex);
+
+ throw new CloudRuntimeException(errMsg);
+ } finally {
+ if (errMsg != null) {
+ deleteVolumeOnFail(newVolName, conn);
+ }
+ CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg);
+
+ CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
+
+ result.setResult(errMsg);
+
+ callback.complete(result);
+ }
+ }
+
+ private void deleteVolumeOnFail(String newVolName, SpConnectionDesc conn) {
+ if (newVolName != null && conn != null) {
+ StorPoolUtil.volumeDelete(newVolName, conn);
+ }
+ }
+
+ private VolumeVO duplicateVolumeOnAnotherStorage(Volume volume, StoragePoolVO storagePoolVO) {
+ Long lastPoolId = volume.getPoolId();
+
+ VolumeVO newVol = new VolumeVO(volume);
+
+ newVol.setInstanceId(null);
+ newVol.setChainInfo(null);
+ newVol.setPath(null);
+ newVol.setFolder(null);
+ newVol.setPodId(storagePoolVO.getPodId());
+ newVol.setPoolId(storagePoolVO.getId());
+ newVol.setLastPoolId(lastPoolId);
+
+ return _volumeDao.persist(newVol);
+ }
+
+ private void handlePostMigration(boolean success, Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo,
+ VirtualMachineTO vmTO, Host destHost) {
+ if (!success) {
+ try {
+ PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
+
+ pfmc.setRollback(true);
+
+ Answer pfma = _agentManager.send(destHost.getId(), pfmc);
+
+ if (pfma == null || !pfma.getResult()) {
+ String details = pfma != null ? pfma.getDetails() : "null answer returned";
+ String msg = "Unable to rollback prepare for migration due to the following: " + details;
+
+ throw new AgentUnavailableException(msg, destHost.getId());
+ }
+ } catch (Exception e) {
+ log.debug("Failed to disconnect one or more (original) dest volumes", e);
+ }
+ }
+
+ for (Map.Entry<VolumeInfo, VolumeInfo> entry : srcVolumeInfoToDestVolumeInfo.entrySet()) {
+ VolumeInfo srcVolumeInfo = entry.getKey();
+ VolumeInfo destVolumeInfo = entry.getValue();
+
+ if (success) {
+ srcVolumeInfo.processEvent(Event.OperationSuccessed);
+ destVolumeInfo.processEvent(Event.OperationSuccessed);
+
+ _volumeDao.updateUuid(srcVolumeInfo.getId(), destVolumeInfo.getId());
+
+ VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
+
+ volumeVO.setFormat(ImageFormat.QCOW2);
+
+ _volumeDao.update(volumeVO.getId(), volumeVO);
+
+ try {
+ _volumeService.destroyVolume(srcVolumeInfo.getId());
+
+ srcVolumeInfo = _volumeDataFactory.getVolume(srcVolumeInfo.getId());
+
+ AsyncCallFuture<VolumeApiResult> destroyFuture = _volumeService.expungeVolumeAsync(srcVolumeInfo);
+
+ if (destroyFuture.get().isFailed()) {
+ log.debug("Failed to clean up source volume on storage");
+ }
+ } catch (Exception e) {
+ log.debug("Failed to clean up source volume on storage", e);
+ }
+
+ // Update the volume ID for snapshots on secondary storage
+ if (!_snapshotDao.listByVolumeId(srcVolumeInfo.getId()).isEmpty()) {
+ _snapshotDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId());
+ _snapshotStoreDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId());
+ }
+ } else {
+ try {
+ disconnectHostFromVolume(destHost, destVolumeInfo.getPoolId(), destVolumeInfo.getPath());
+ } catch (Exception e) {
+ log.debug("Failed to disconnect (new) dest volume", e);
+ }
+
+ try {
+ _volumeService.revokeAccess(destVolumeInfo, destHost, destVolumeInfo.getDataStore());
+ } catch (Exception e) {
+ log.debug("Failed to revoke access from dest volume", e);
+ }
+
+ destVolumeInfo.processEvent(Event.OperationFailed);
+ srcVolumeInfo.processEvent(Event.OperationFailed);
+
+ try {
+ _volumeService.destroyVolume(destVolumeInfo.getId());
+
+ destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId());
+
+ AsyncCallFuture<VolumeApiResult> destroyFuture = _volumeService.expungeVolumeAsync(destVolumeInfo);
+
+ if (destroyFuture.get().isFailed()) {
+ log.debug("Failed to clean up dest volume on storage");
+ }
+ } catch (Exception e) {
+ log.debug("Failed to clean up dest volume on storage", e);
+ }
+ }
+ }
+ }
+
+ private String generateDestPath(Host destHost, StoragePoolVO destStoragePool, VolumeInfo destVolumeInfo) {
+ return connectHostToVolume(destHost, destVolumeInfo.getPoolId(), destVolumeInfo.getPath());
+ }
+
+ private String connectHostToVolume(Host host, long storagePoolId, String iqn) {
+ ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, true);
+
+ return sendModifyTargetsCommand(modifyTargetsCommand, host.getId()).get(0);
+ }
+
+ private void disconnectHostFromVolume(Host host, long storagePoolId, String iqn) {
+ ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, false);
+
+ sendModifyTargetsCommand(modifyTargetsCommand, host.getId());
+ }
+
+ private ModifyTargetsCommand getModifyTargetsCommand(long storagePoolId, String iqn, boolean add) {
+ StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
+
+ Map<String, String> details = new HashMap<>();
+
+ details.put(ModifyTargetsCommand.IQN, iqn);
+ details.put(ModifyTargetsCommand.STORAGE_TYPE, storagePool.getPoolType().name());
+ details.put(ModifyTargetsCommand.STORAGE_UUID, storagePool.getUuid());
+ details.put(ModifyTargetsCommand.STORAGE_HOST, storagePool.getHostAddress());
+ details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePool.getPort()));
+
+ ModifyTargetsCommand cmd = new ModifyTargetsCommand();
+
+ List<Map<String, String>> targets = new ArrayList<>();
+
+ targets.add(details);
+
+ cmd.setTargets(targets);
+ cmd.setApplyToAllHostsInCluster(true);
+ cmd.setAdd(add);
+ cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC);
+
+ return cmd;
+ }
+
+ private List<String> sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
+ ModifyTargetsAnswer modifyTargetsAnswer = (ModifyTargetsAnswer) _agentManager.easySend(hostId, cmd);
+
+ if (modifyTargetsAnswer == null) {
+ throw new CloudRuntimeException("Unable to get an answer to the modify targets command");
+ }
+
+ if (!modifyTargetsAnswer.getResult()) {
+ String msg = "Unable to modify targets on the following host: " + hostId;
+
+ throw new CloudRuntimeException(msg);
+ }
+
+ return modifyTargetsAnswer.getConnectedPaths();
+ }
+
+ protected MigrateCommand.MigrateDiskInfo configureMigrateDiskInfo(VolumeInfo srcVolumeInfo, String destPath) {
+ return new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(),
+ MigrateCommand.MigrateDiskInfo.DiskType.BLOCK, MigrateCommand.MigrateDiskInfo.DriverType.RAW,
+ MigrateCommand.MigrateDiskInfo.Source.DEV, destPath);
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java
new file mode 100644
index 0000000000..782d813381
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.snapshot;
+
+import org.apache.cloudstack.framework.config.ConfigKey;
+import org.apache.cloudstack.framework.config.Configurable;
+
+public class StorPoolConfigurationManager implements Configurable {
+
+ public static final ConfigKey<Boolean> BypassSecondaryStorage = new ConfigKey<Boolean>(Boolean.class, "sp.bypass.secondary.storage", "Advanced", "false",
+ "For StorPool Managed storage backup to secondary", true, ConfigKey.Scope.Global, null);
+ public static final ConfigKey<String> StorPoolClusterId = new ConfigKey<String>(String.class, "sp.cluster.id", "Advanced", "n/a",
+ "For StorPool multi cluster authorization", true, ConfigKey.Scope.Cluster, null);
+ public static final ConfigKey<Boolean> AlternativeEndPointEnabled = new ConfigKey<Boolean>(Boolean.class, "sp.enable.alternative.endpoint", "Advanced", "false",
+ "Used for StorPool primary storage, definse if there is a need to be used alternative endpoint", true, ConfigKey.Scope.StoragePool, null);
+
+ public static final ConfigKey<String> AlternativeEndpoint = new ConfigKey<String>(String.class, "sp.alternative.endpoint", "Advanced", "",
+ "Used for StorPool primary storage for an alternative endpoint. Structure of the endpoint is - SP_API_HTTP=address:port;SP_AUTH_TOKEN=token;SP_TEMPLATE=template_name", true, ConfigKey.Scope.StoragePool, null);
+
+ @Override
+ public String getConfigComponentName() {
+ return StorPoolConfigurationManager.class.getSimpleName();
+ }
+
+ @Override
+ public ConfigKey<?>[] getConfigKeys() {
+ return new ConfigKey<?>[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint };
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java
new file mode 100644
index 0000000000..f66617d8f3
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java
@@ -0,0 +1,289 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.snapshot;
+
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
+import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy;
+import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.util.StorPoolHelper;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.Snapshot;
+import com.cloud.storage.SnapshotVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.SnapshotDao;
+import com.cloud.storage.dao.SnapshotDetailsDao;
+import com.cloud.storage.dao.SnapshotDetailsVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.fsm.NoTransitionException;
+
+
+@Component
+public class StorPoolSnapshotStrategy implements SnapshotStrategy {
+ private static final Logger log = Logger.getLogger(StorPoolSnapshotStrategy.class);
+
+ @Inject
+ private SnapshotDao _snapshotDao;
+ @Inject
+ private PrimaryDataStoreDao _primaryDataStoreDao;
+ @Inject
+ private VolumeDao _volumeDao;
+ @Inject
+ private SnapshotDataStoreDao _snapshotStoreDao;
+ @Inject
+ private SnapshotDetailsDao _snapshotDetailsDao;
+ @Inject
+ private SnapshotService snapshotSvr;
+ @Inject
+ private SnapshotDataFactory snapshotDataFactory;
+ @Inject
+ private StoragePoolDetailsDao storagePoolDetailsDao;
+
+ @Override
+ public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) {
+ SnapshotObject snapshotObj = (SnapshotObject) snapshotInfo;
+ try {
+ snapshotObj.processEvent(Snapshot.Event.BackupToSecondary);
+ snapshotObj.processEvent(Snapshot.Event.OperationSucceeded);
+ } catch (NoTransitionException ex) {
+ StorPoolUtil.spLog("Failed to change state: " + ex.toString());
+ try {
+ snapshotObj.processEvent(Snapshot.Event.OperationFailed);
+ } catch (NoTransitionException ex2) {
+ StorPoolUtil.spLog("Failed to change state: " + ex2.toString());
+ }
+ }
+ return snapshotInfo;
+ }
+
+ @Override
+ public boolean deleteSnapshot(Long snapshotId) {
+
+ final SnapshotVO snapshotVO = _snapshotDao.findById(snapshotId);
+ VolumeVO volume = _volumeDao.findByIdIncludingRemoved(snapshotVO.getVolumeId());
+ String name = StorPoolHelper.getSnapshotName(snapshotId, snapshotVO.getUuid(), _snapshotStoreDao, _snapshotDetailsDao);
+ boolean res = false;
+ // clean-up snapshot from Storpool storage pools
+ StoragePoolVO storage = _primaryDataStoreDao.findById(volume.getPoolId());
+ if (storage.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) {
+ try {
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(storage.getUuid(), storage.getId(), storagePoolDetailsDao, _primaryDataStoreDao);
+ SpApiResponse resp = StorPoolUtil.snapshotDelete(name, conn);
+ if (resp.getError() != null) {
+ final String err = String.format("Failed to clean-up Storpool snapshot %s. Error: %s", name, resp.getError());
+ StorPoolUtil.spLog(err);
+ } else {
+ SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshotId, snapshotVO.getUuid());
+ if (snapshotDetails != null) {
+ _snapshotDetailsDao.removeDetails(snapshotId);
+ }
+ res = deleteSnapshotFromDb(snapshotId);
+ StorPoolUtil.spLog("StorpoolSnapshotStrategy.deleteSnapshot: executed successfuly=%s, snapshot uuid=%s, name=%s", res, snapshotVO.getUuid(), name);
+ }
+ } catch (Exception e) {
+ String errMsg = String.format("Cannot delete snapshot due to %s", e.getMessage());
+ throw new CloudRuntimeException(errMsg);
+ }
+ }
+
+ return res;
+ }
+
+ @Override
+ public StrategyPriority canHandle(Snapshot snapshot, SnapshotOperation op) {
+ StorPoolUtil.spLog("StorpoolSnapshotStrategy.canHandle: snapshot=%s, uuid=%s, op=%s", snapshot.getName(), snapshot.getUuid(), op);
+
+ if (op != SnapshotOperation.DELETE) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+
+ String name = StorPoolHelper.getSnapshotName(snapshot.getId(), snapshot.getUuid(), _snapshotStoreDao, _snapshotDetailsDao);
+ if (name != null) {
+ StorPoolUtil.spLog("StorpoolSnapshotStrategy.canHandle: globalId=%s", name);
+
+ return StrategyPriority.HIGHEST;
+ }
+ SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), snapshot.getUuid());
+ if (snapshotDetails != null) {
+ _snapshotDetailsDao.remove(snapshotDetails.getId());
+ }
+ return StrategyPriority.CANT_HANDLE;
+ }
+
+ private boolean deleteSnapshotChain(SnapshotInfo snapshot) {
+ log.debug("delete snapshot chain for snapshot: " + snapshot.getId());
+ boolean result = false;
+ boolean resultIsSet = false;
+ try {
+ while (snapshot != null &&
+ (snapshot.getState() == Snapshot.State.Destroying || snapshot.getState() == Snapshot.State.Destroyed || snapshot.getState() == Snapshot.State.Error)) {
+ SnapshotInfo child = snapshot.getChild();
+
+ if (child != null) {
+ log.debug("the snapshot has child, can't delete it on the storage");
+ break;
+ }
+ log.debug("Snapshot: " + snapshot.getId() + " doesn't have children, so it's ok to delete it and its parents");
+ SnapshotInfo parent = snapshot.getParent();
+ boolean deleted = false;
+ if (parent != null) {
+ if (parent.getPath() != null && parent.getPath().equalsIgnoreCase(snapshot.getPath())) {
+ log.debug("for empty delta snapshot, only mark it as destroyed in db");
+ snapshot.processEvent(Event.DestroyRequested);
+ snapshot.processEvent(Event.OperationSuccessed);
+ deleted = true;
+ if (!resultIsSet) {
+ result = true;
+ resultIsSet = true;
+ }
+ }
+ }
+ if (!deleted) {
+ SnapshotInfo snap = snapshotDataFactory.getSnapshot(snapshot.getId(), DataStoreRole.Image);
+ if (StorPoolStorageAdaptor.getVolumeNameFromPath(snap.getPath(), true) == null) {
+ try {
+ boolean r = snapshotSvr.deleteSnapshot(snapshot);
+ if (r) {
+ List<SnapshotInfo> cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId());
+ for (SnapshotInfo cacheSnap : cacheSnaps) {
+ log.debug("Delete snapshot " + snapshot.getId() + " from image cache store: " + cacheSnap.getDataStore().getName());
+ cacheSnap.delete();
+ }
+ }
+ if (!resultIsSet) {
+ result = r;
+ resultIsSet = true;
+ }
+ } catch (Exception e) {
+ log.debug("Failed to delete snapshot on storage. ", e);
+ }
+ }
+ } else {
+ result = true;
+ }
+ snapshot = parent;
+ }
+ } catch (Exception e) {
+ log.debug("delete snapshot failed: ", e);
+ }
+ return result;
+ }
+
+ private boolean deleteSnapshotFromDb(Long snapshotId) {
+ SnapshotVO snapshotVO = _snapshotDao.findById(snapshotId);
+
+ if (snapshotVO.getState() == Snapshot.State.Allocated) {
+ _snapshotDao.remove(snapshotId);
+ return true;
+ }
+
+ if (snapshotVO.getState() == Snapshot.State.Destroyed) {
+ return true;
+ }
+
+ if (Snapshot.State.Error.equals(snapshotVO.getState())) {
+ List<SnapshotDataStoreVO> storeRefs = _snapshotStoreDao.findBySnapshotId(snapshotId);
+ for (SnapshotDataStoreVO ref : storeRefs) {
+ _snapshotStoreDao.expunge(ref.getId());
+ }
+ _snapshotDao.remove(snapshotId);
+ return true;
+ }
+
+ if (snapshotVO.getState() == Snapshot.State.CreatedOnPrimary) {
+ snapshotVO.setState(Snapshot.State.Destroyed);
+ _snapshotDao.update(snapshotId, snapshotVO);
+ return true;
+ }
+
+ if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) && !Snapshot.State.Error.equals(snapshotVO.getState()) &&
+ !Snapshot.State.Destroying.equals(snapshotVO.getState())) {
+ throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is in " + snapshotVO.getState() + " Status");
+ }
+
+ SnapshotInfo snapshotOnImage = snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Image);
+ if (snapshotOnImage == null) {
+ log.debug("Can't find snapshot on backup storage, delete it in db");
+ _snapshotDao.remove(snapshotId);
+ return true;
+ }
+
+ SnapshotObject obj = (SnapshotObject)snapshotOnImage;
+ try {
+ obj.processEvent(Snapshot.Event.DestroyRequested);
+ } catch (NoTransitionException e) {
+ log.debug("Failed to set the state to destroying: ", e);
+ return false;
+ }
+
+ try {
+ boolean result = deleteSnapshotChain(snapshotOnImage);
+ obj.processEvent(Snapshot.Event.OperationSucceeded);
+ if (result) {
+ SnapshotDataStoreVO snapshotOnPrimary = _snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Primary);
+ if (snapshotOnPrimary != null) {
+ snapshotOnPrimary.setState(State.Destroyed);
+ _snapshotStoreDao.update(snapshotOnPrimary.getId(), snapshotOnPrimary);
+ }
+ }
+ } catch (Exception e) {
+ log.debug("Failed to delete snapshot: ", e);
+ try {
+ obj.processEvent(Snapshot.Event.OperationFailed);
+ } catch (NoTransitionException e1) {
+ log.debug("Failed to change snapshot state: " + e.toString());
+ }
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public SnapshotInfo takeSnapshot(SnapshotInfo snapshot) {
+ return null;
+ }
+
+ @Override
+ public boolean revertSnapshot(SnapshotInfo snapshot) {
+ return false;
+ }
+
+ @Override
+ public void postSnapshotCreation(SnapshotInfo snapshot) {
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java
new file mode 100644
index 0000000000..ec7e89a239
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java
@@ -0,0 +1,387 @@
+//
+//Licensed to the Apache Software Foundation (ASF) under one
+//or more contributor license agreements. See the NOTICE file
+//distributed with this work for additional information
+//regarding copyright ownership. The ASF licenses this file
+//to you under the Apache License, Version 2.0 (the
+//"License"); you may not use this file except in compliance
+//with the License. You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing,
+//software distributed under the License is distributed on an
+//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+//KIND, either express or implied. See the License for the
+//specific language governing permissions and limitations
+//under the License.
+//
+package org.apache.cloudstack.storage.snapshot;
+
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.cloudstack.storage.vmsnapshot.DefaultVMSnapshotStrategy;
+import org.apache.cloudstack.storage.vmsnapshot.VMSnapshotHelper;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.agent.api.VMSnapshotTO;
+import com.cloud.event.EventTypes;
+import com.cloud.event.UsageEventUtils;
+import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.VolumeDetailVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.VolumeDetailsDao;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.fsm.NoTransitionException;
+import com.cloud.vm.UserVmVO;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.snapshot.VMSnapshot;
+import com.cloud.vm.snapshot.VMSnapshotDetailsVO;
+import com.cloud.vm.snapshot.VMSnapshotVO;
+import com.cloud.vm.snapshot.dao.VMSnapshotDao;
+import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+
+@Component
+public class StorPoolVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
+ private static final Logger log = Logger.getLogger(StorPoolVMSnapshotStrategy.class);
+
+ @Inject
+ private VMSnapshotHelper vmSnapshotHelper;
+ @Inject
+ private UserVmDao userVmDao;
+ @Inject
+ private VMSnapshotDao vmSnapshotDao;
+ @Inject
+ private VolumeDao volumeDao;
+ @Inject
+ private DiskOfferingDao diskOfferingDao;
+ @Inject
+ private PrimaryDataStoreDao storagePool;
+ @Inject
+ private VMSnapshotDetailsDao vmSnapshotDetailsDao;
+ @Inject
+ private VolumeDataFactory volFactory;
+ @Inject
+ private VolumeDetailsDao volumeDetailsDao;
+ @Inject
+ private StoragePoolDetailsDao storagePoolDetailsDao;
+ @Inject
+ private DataStoreManager dataStoreManager;
+ int _wait;
+
+ @Override
+ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) {
+ log.info("KVMVMSnapshotStrategy take snapshot");
+ UserVm userVm = userVmDao.findById(vmSnapshot.getVmId());
+ VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
+
+ try {
+ vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested);
+ } catch (NoTransitionException e) {
+ throw new CloudRuntimeException("No transiontion " + e.getMessage());
+ }
+
+ boolean result = false;
+ try {
+
+ List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId());
+ DataStore dataStore = dataStoreManager.getPrimaryDataStore(volumeTOs.get(0).getDataStore().getUuid());
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, storagePool);
+
+ long prev_chain_size = 0;
+ long virtual_size = 0;
+ for (VolumeObjectTO volume : volumeTOs) {
+ virtual_size += volume.getSize();
+ VolumeVO volumeVO = volumeDao.findById(volume.getId());
+ prev_chain_size += volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize();
+ }
+
+ VMSnapshotTO current = null;
+ VMSnapshotVO currentSnapshot = vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId());
+ if (currentSnapshot != null) {
+ current = vmSnapshotHelper.getSnapshotWithParents(currentSnapshot);
+ }
+
+ if (current == null) {
+ vmSnapshotVO.setParent(null);
+ } else {
+ vmSnapshotVO.setParent(current.getId());
+ }
+
+ SpApiResponse resp = StorPoolUtil.volumesGroupSnapshot(volumeTOs, userVm.getUuid(), vmSnapshotVO.getUuid(), "group", conn);
+ JsonObject obj = resp.fullJson.getAsJsonObject();
+ JsonArray snapshots = obj.getAsJsonObject("data").getAsJsonArray("snapshots");
+ StorPoolUtil.spLog("Volumes=%s attached to virtual machine", volumeTOs.toString());
+ for (VolumeObjectTO vol : volumeTOs) {
+ for (JsonElement jsonElement : snapshots) {
+ JsonObject snapshotObject = jsonElement.getAsJsonObject();
+ String snapshot = StorPoolUtil
+ .devPath(snapshotObject.getAsJsonPrimitive(StorPoolUtil.GLOBAL_ID).getAsString());
+ if (snapshotObject.getAsJsonPrimitive("volume").getAsString().equals(StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true))
+ || snapshotObject.getAsJsonPrimitive("volumeGlobalId").getAsString().equals(StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), false))) {
+ VMSnapshotDetailsVO vmSnapshotDetailsVO = new VMSnapshotDetailsVO(vmSnapshot.getId(), vol.getUuid(), snapshot, false);
+ vmSnapshotDetailsDao.persist(vmSnapshotDetailsVO);
+ Long poolId = volumeDao.findById(vol.getId()).getPoolId();
+ if (poolId != null) {
+ VMSnapshotDetailsVO vmSnapshotDetailStoragePoolId = new VMSnapshotDetailsVO(
+ vmSnapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(poolId), false);
+ vmSnapshotDetailsDao.persist(vmSnapshotDetailStoragePoolId);
+ }
+ StorPoolUtil.spLog("Snapshot=%s of volume=%s for a group snapshot=%s.", snapshot, vol.getUuid(), vmSnapshot.getUuid());
+ }
+ }
+ }
+
+ if (resp.getError() == null) {
+ StorPoolUtil.spLog("StorpoolVMSnapshotStrategy.takeSnapshot answer=%s", resp.getError());
+ finalizeCreate(vmSnapshotVO, volumeTOs);
+ result = vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded);
+ long new_chain_size = 0;
+ for (VolumeObjectTO volumeObjectTO : volumeTOs) {
+ publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeObjectTO);
+ new_chain_size += volumeObjectTO.getSize();
+ log.info("EventTypes.EVENT_VM_SNAPSHOT_CREATE publishUsageEvent" + volumeObjectTO);
+ }
+ publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_ON_PRIMARY, vmSnapshot, userVm, new_chain_size - prev_chain_size, virtual_size);
+ } else {
+ throw new CloudRuntimeException("Could not create vm snapshot");
+ }
+ return vmSnapshot;
+ } catch (Exception e) {
+ log.debug("Could not create VM snapshot:" + e.getMessage());
+ throw new CloudRuntimeException("Could not create VM snapshot:" + e.getMessage());
+ } finally {
+ if (!result) {
+ try {
+ vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
+ log.info(String.format("VMSnapshot.Event.OperationFailed vmSnapshot=%s", vmSnapshot));
+ } catch (NoTransitionException nte) {
+ log.error("Cannot set vm state:" + nte.getMessage());
+ }
+ }
+ }
+ }
+
+ @Override
+ public StrategyPriority canHandle(VMSnapshot vmSnapshot) {
+ return areAllVolumesOnStorPool(vmSnapshot.getVmId());
+ }
+
+ public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) {
+ if (snapshotMemory) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+ return areAllVolumesOnStorPool(vmId);
+ }
+
+ private StrategyPriority areAllVolumesOnStorPool(Long vmId) {
+ List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(vmId);
+ if (volumeTOs == null || volumeTOs.isEmpty()) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+ for (VolumeObjectTO volumeTO : volumeTOs) {
+ Long poolId = volumeTO.getPoolId();
+ StoragePoolVO pool = storagePool.findById(poolId);
+ if (!pool.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+ }
+ return StrategyPriority.HIGHEST;
+ }
+
+ @Override
+ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) {
+ UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId());
+ VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
+ try {
+ vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested);
+ } catch (NoTransitionException e) {
+ log.debug("Failed to change vm snapshot state with event ExpungeRequested");
+ throw new CloudRuntimeException(
+ "Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage());
+ }
+
+ List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId());
+ DataStore dataStore = dataStoreManager.getPrimaryDataStore(volumeTOs.get(0).getDataStore().getUuid());
+ SpConnectionDesc conn = null;
+ try {
+ conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, storagePool);
+ } catch (CloudRuntimeException e) {
+ throw e;
+ }
+
+ SpApiResponse resp = null;
+ for (VolumeObjectTO volumeObjectTO : volumeTOs) {
+ String err = null;
+ VMSnapshotDetailsVO snapshotDetailsVO = vmSnapshotDetailsDao.findDetail(vmSnapshot.getId(), volumeObjectTO.getUuid());
+ String snapshotName = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDetailsVO.getValue(), true);
+ if (snapshotName == null) {
+ err = String.format("Could not find StorPool's snapshot vm snapshot uuid=%s and volume uui=%s",
+ vmSnapshot.getUuid(), volumeObjectTO.getUuid());
+ log.error("Could not delete snapshot for vm:" + err);
+ }
+ StorPoolUtil.spLog("StorpoolVMSnapshotStrategy.deleteVMSnapshot snapshotName=%s", snapshotName);
+ resp = StorPoolUtil.snapshotDelete(snapshotName, conn);
+ if (resp.getError() != null) {
+ err = String.format("Could not delete storpool vm error=%s", resp.getError());
+ log.error("Could not delete snapshot for vm:" + err);
+ } else {
+ // do we need to clean database?
+ if (snapshotDetailsVO != null) {
+ vmSnapshotDetailsDao.remove(snapshotDetailsVO.getId());
+ }
+ }
+ if (err != null) {
+ StorPoolUtil.spLog(
+ "StorpoolVMSnapshotStrategy.deleteVMSnapshot delete snapshot=%s of gropusnapshot=%s failed due to %s",
+ snapshotName, userVm.getInstanceName(), err);
+ throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm "
+ + userVm.getInstanceName() + " failed due to " + err);
+ }
+ }
+ vmSnapshotDetailsDao.removeDetails(vmSnapshot.getId());
+
+ finalizeDelete(vmSnapshotVO, volumeTOs);
+ vmSnapshotDao.remove(vmSnapshot.getId());
+
+ long full_chain_size = 0;
+ for (VolumeObjectTO volumeTo : volumeTOs) {
+ publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_DELETE, vmSnapshot, userVm, volumeTo);
+ full_chain_size += volumeTo.getSize();
+ }
+ publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_OFF_PRIMARY, vmSnapshot, userVm, full_chain_size, 0L);
+ return true;
+ }
+
+ @Override
+ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) {
+ log.debug("Revert vm snapshot");
+ VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
+ UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId());
+
+ if (userVm.getState() == VirtualMachine.State.Running && vmSnapshotVO.getType() == VMSnapshot.Type.Disk) {
+ throw new CloudRuntimeException("Virtual machine should be in stopped state for revert operation");
+ }
+
+ try {
+ vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.RevertRequested);
+ } catch (NoTransitionException e) {
+ throw new CloudRuntimeException(e.getMessage());
+ }
+
+ boolean result = false;
+ try {
+ List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId());
+
+ DataStore dataStore = dataStoreManager.getPrimaryDataStore(volumeTOs.get(0).getDataStore().getUuid());
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, storagePool);
+ for (VolumeObjectTO volumeObjectTO : volumeTOs) {
+ String err = null;
+ VMSnapshotDetailsVO snapshotDetailsVO = vmSnapshotDetailsDao.findDetail(vmSnapshot.getId(),
+ volumeObjectTO.getUuid());
+ String snapshotName = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDetailsVO.getValue(), true);
+ if (snapshotName == null) {
+ err = String.format("Could not find StorPool's snapshot vm snapshot uuid=%s and volume uui=%s",
+ vmSnapshot.getUuid(), volumeObjectTO.getUuid());
+ log.error("Could not delete snapshot for vm:" + err);
+ }
+ String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(volumeObjectTO.getPath(), true);
+ VolumeDetailVO detail = volumeDetailsDao.findDetail(volumeObjectTO.getId(), StorPoolUtil.SP_PROVIDER_NAME);
+ if (detail != null) {
+ SpApiResponse updateVolumeResponse = StorPoolUtil.volumeUpdateRename(volumeName, "", StorPoolStorageAdaptor.getVolumeNameFromPath(detail.getValue(), false), conn);
+
+ if (updateVolumeResponse.getError() != null) {
+ StorPoolUtil.spLog("StorpoolVMSnapshotStrategy.canHandle - Could not update StorPool's volume %s to it's globalId due to %s", volumeName, updateVolumeResponse.getError().getDescr());
+ err = String.format("StorpoolVMSnapshotStrategy.canHandle - Could not update StorPool's volume %s to it's globalId due to %s", volumeName, updateVolumeResponse.getError().getDescr());
+ } else {
+ volumeDetailsDao.remove(detail.getId());
+ }
+ }
+
+ SpApiResponse resp = StorPoolUtil.detachAllForced(volumeName, false, conn);
+ if (resp.getError() != null) {
+ err = String.format("Could not detach StorPool volume %s from a group snapshot, due to %s",
+ volumeName, resp.getError());
+ throw new CloudRuntimeException(err);
+ }
+ resp = StorPoolUtil.volumeRevert(volumeName, snapshotName, conn);
+ if (resp.getError() != null) {
+ err = String.format("Create Could not complete revert task for volumeName=%s , and snapshotName=%s",
+ volumeName, snapshotName);
+ throw new CloudRuntimeException(err);
+ }
+ VolumeInfo vinfo = volFactory.getVolume(volumeObjectTO.getId());
+ if (vinfo.getMaxIops() != null) {
+ resp = StorPoolUtil.volumeUpadateTags(volumeName, null, vinfo.getMaxIops(), conn, null);
+
+ if (resp.getError() != null) {
+ StorPoolUtil.spLog("Volume was reverted successfully but max iops could not be set due to %s",
+ resp.getError().getDescr());
+ }
+ }
+ }
+ finalizeRevert(vmSnapshotVO, volumeTOs);
+ result = vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded);
+ } catch (CloudRuntimeException | NoTransitionException e) {
+ String errMsg = String.format("Error while finalize create vm snapshot [%s] due to %s", vmSnapshot.getName(), e.getMessage());
+ log.error(errMsg, e);
+ throw new CloudRuntimeException(errMsg);
+ } finally {
+ if (!result) {
+ try {
+ vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
+ } catch (NoTransitionException e1) {
+ log.error("Cannot set vm snapshot state due to: " + e1.getMessage());
+ }
+ }
+ }
+ return result;
+ }
+
+ private void publishUsageEvents(String type, VMSnapshot vmSnapshot, UserVm userVm, VolumeObjectTO volumeTo) {
+ VolumeVO volume = volumeDao.findById(volumeTo.getId());
+ Long diskOfferingId = volume.getDiskOfferingId();
+ Long offeringId = null;
+ if (diskOfferingId != null) {
+ DiskOfferingVO offering = diskOfferingDao.findById(diskOfferingId);
+ if (offering != null && offering.isComputeOnly()) {
+ offeringId = offering.getId();
+ }
+ }
+ UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(),
+ vmSnapshot.getName(), offeringId, volume.getId(), volumeTo.getSize(), VMSnapshot.class.getName(), vmSnapshot.getUuid());
+ }
+
+ private void publishUsageEvents(String type, VMSnapshot vmSnapshot, UserVm userVm, Long vmSnapSize, Long virtualSize) {
+ try {
+ UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(),
+ vmSnapshot.getName(), 0L, 0L, vmSnapSize, virtualSize, VMSnapshot.class.getName(),
+ vmSnapshot.getUuid());
+ } catch (Exception e) {
+ log.error("Failed to publis usage event " + type, e);
+ }
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/module.properties b/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/module.properties
new file mode 100644
index 0000000000..af4456ea37
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/module.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+name=storage-volume-storpool
+parent=storage
diff --git a/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml b/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml
new file mode 100644
index 0000000000..cf1db3a8bf
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml
@@ -0,0 +1,38 @@
+<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
+ license agreements. See the NOTICE file distributed with this work for additional
+ information regarding copyright ownership. The ASF licenses this file to
+ you under the Apache License, Version 2.0 (the "License"); you may not use
+ this file except in compliance with the License. You may obtain a copy of
+ the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
+ by applicable law or agreed to in writing, software distributed under the
+ License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+ OF ANY KIND, either express or implied. See the License for the specific
+ language governing permissions and limitations under the License. -->
+<beans xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:context="http://www.springframework.org/schema/context"
+ xmlns:aop="http://www.springframework.org/schema/aop"
+ xsi:schemaLocation="http://www.springframework.org/schema/beans
+ http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
+ http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
+ http://www.springframework.org/schema/context
+ http://www.springframework.org/schema/context/spring-context-3.0.xsd">
+
+ <bean id="storpoolPrimaryDataStoreProvider"
+ class="org.apache.cloudstack.storage.datastore.provider.StorPoolPrimaryDataStoreProvider" />
+
+ <bean id="storpoolSnapshotStrategy"
+ class="org.apache.cloudstack.storage.snapshot.StorPoolSnapshotStrategy" />
+
+ <bean id="storpoolVMSnapshotStrategy"
+ class="org.apache.cloudstack.storage.snapshot.StorPoolVMSnapshotStrategy" />
+
+ <bean id="storpoolConfigManager"
+ class="org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager" />
+
+ <bean id="storpoolDataMotionStrategy"
+ class="org.apache.cloudstack.storage.motion.StorPoolDataMotionStrategy" />
+
+ <bean id="cleanupTags"
+ class="org.apache.cloudstack.storage.collector.StorPoolAbandonObjectsCollector" />
+</beans>
diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
index 1028898347..2c741a205c 100644
--- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
@@ -57,10 +57,12 @@ import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationSer
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
@@ -2639,6 +2641,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (volume.getPoolId() != null) {
DataStore dataStore = dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary);
volService.revokeAccess(volFactory.getVolume(volume.getId()), host, dataStore);
+ provideVMInfo(dataStore, vmId, volumeId);
}
if (volumePool != null && hostId != null) {
handleTargetsForVMware(hostId, volumePool.getHostAddress(), volumePool.getPort(), volume.get_iScsiName());
@@ -3884,6 +3887,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (attached) {
ev = Volume.Event.OperationSucceeded;
s_logger.debug("Volume: " + volInfo.getName() + " successfully attached to VM: " + volInfo.getAttachedVmName());
+ provideVMInfo(dataStore, vm.getId(), volInfo.getId());
} else {
s_logger.debug("Volume: " + volInfo.getName() + " failed to attach to VM: " + volInfo.getAttachedVmName());
}
@@ -3892,6 +3896,17 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return _volsDao.findById(volumeToAttach.getId());
}
+ private void provideVMInfo(DataStore dataStore, long vmId, Long volumeId) {
+ DataStoreDriver dataStoreDriver = dataStore != null ? dataStore.getDriver() : null;
+
+ if (dataStoreDriver instanceof PrimaryDataStoreDriver) {
+ PrimaryDataStoreDriver storageDriver = (PrimaryDataStoreDriver)dataStoreDriver;
+ if (storageDriver.isVmInfoNeeded()) {
+ storageDriver.provideVmInfo(vmId, volumeId);
+ }
+ }
+ }
+
private int getMaxDataVolumesSupported(UserVmVO vm) {
Long hostId = vm.getHostId();
if (hostId == null) {
diff --git a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java
index db6cac0e39..23e757dfe2 100644
--- a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java
+++ b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java
@@ -25,8 +25,10 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import javax.persistence.EntityExistsException;
-import com.cloud.server.ResourceManagerUtil;
import org.apache.cloudstack.context.CallContext;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.commons.collections.MapUtils;
import org.apache.log4j.Logger;
@@ -40,11 +42,14 @@ import com.cloud.network.vpc.NetworkACLItemVO;
import com.cloud.network.vpc.NetworkACLVO;
import com.cloud.network.vpc.VpcVO;
import com.cloud.projects.ProjectVO;
+import com.cloud.server.ResourceManagerUtil;
import com.cloud.server.ResourceTag;
import com.cloud.server.ResourceTag.ResourceObjectType;
import com.cloud.server.TaggedResourceService;
+import com.cloud.storage.DataStoreRole;
import com.cloud.storage.SnapshotPolicyVO;
import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VolumeDao;
import com.cloud.tags.dao.ResourceTagDao;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
@@ -78,6 +83,10 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso
AccountDao _accountDao;
@Inject
ResourceManagerUtil resourceManagerUtil;
+ @Inject
+ VolumeDao volumeDao;
+ @Inject
+ DataStoreManager dataStoreMgr;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
@@ -196,6 +205,9 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso
throw new CloudRuntimeException(String.format("tag %s already on %s with id %s", resourceTag.getKey(), resourceType.toString(), resourceId),e);
}
resourceTags.add(resourceTag);
+ if (ResourceObjectType.UserVm.equals(resourceType)) {
+ informStoragePoolForVmTags(id, key, value);
+ }
}
}
}
@@ -275,6 +287,9 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso
_resourceTagDao.remove(tagToRemove.getId());
s_logger.debug("Removed the tag '" + tagToRemove + "' for resources (" +
String.join(", ", resourceIds) + ")");
+ if (ResourceObjectType.UserVm.equals(resourceType)) {
+ informStoragePoolForVmTags(tagToRemove.getResourceId(), tagToRemove.getKey(), tagToRemove.getValue());
+ }
}
}
});
@@ -292,4 +307,19 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso
List<? extends ResourceTag> listResourceTags = listByResourceTypeAndId(type, resourceId);
return listResourceTags == null ? null : listResourceTags.stream().collect(Collectors.toMap(ResourceTag::getKey, ResourceTag::getValue));
}
+
+
+ private void informStoragePoolForVmTags(long vmId, String key, String value) {
+ List<VolumeVO> volumeVos = volumeDao.findByInstance(vmId);
+ for (VolumeVO volume : volumeVos) {
+ DataStore dataStore = dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary);
+ if (dataStore == null || !(dataStore.getDriver() instanceof PrimaryDataStoreDriver)) {
+ continue;
+ }
+ PrimaryDataStoreDriver dataStoreDriver = (PrimaryDataStoreDriver) dataStore.getDriver();
+ if (dataStoreDriver.isVmTagsNeeded(key)) {
+ dataStoreDriver.provideVmTags(vmId, volume.getId(), value);
+ }
+ }
+ }
}
diff --git a/test/integration/plugins/storpool/MigrateVolumeToStorPool.py b/test/integration/plugins/storpool/MigrateVolumeToStorPool.py
new file mode 100644
index 0000000000..e6b9fbbbe2
--- /dev/null
+++ b/test/integration/plugins/storpool/MigrateVolumeToStorPool.py
@@ -0,0 +1,439 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Import Local Modules
+import pprint
+import random
+import subprocess
+import time
+import uuid
+
+from marvin.cloudstackAPI import (listOsTypes,
+ listTemplates,
+ listHosts,
+ createTemplate,
+ createVolume,
+ resizeVolume,
+ startVirtualMachine,
+ migrateVirtualMachine,
+ migrateVolume
+ )
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.codes import FAILED, KVM, PASS, XEN_SERVER, RUNNING
+from marvin.configGenerator import configuration, cluster
+from marvin.lib.base import (Account,
+ Configurations,
+ ServiceOffering,
+ Snapshot,
+ StoragePool,
+ Template,
+ Tag,
+ VirtualMachine,
+ VmSnapshot,
+ Volume,
+ SecurityGroup,
+ )
+from marvin.lib.common import (get_zone,
+ get_domain,
+ get_template,
+ list_disk_offering,
+ list_snapshots,
+ list_storage_pools,
+ list_volumes,
+ list_virtual_machines,
+ list_configurations,
+ list_service_offering,
+ list_clusters,
+ list_zones)
+from marvin.lib.utils import random_gen, cleanup_resources, validateList, is_snapshot_on_nfs, isAlmostEqual
+from nose.plugins.attrib import attr
+
+from storpool import spapi
+from sp_util import (TestData, StorPoolHelper)
+
+
+class TestMigrateVolumeToAnotherPool(cloudstackTestCase):
+ @classmethod
+ def setUpClass(cls):
+ super(TestMigrateVolumeToAnotherPool, cls).setUpClass()
+ try:
+ cls.setUpCloudStack()
+ except Exception:
+ cls.cleanUpCloudStack()
+ raise
+
+ @classmethod
+ def setUpCloudStack(cls):
+ cls.spapi = spapi.Api(host="10.2.23.248", port="81", auth="6549874687", multiCluster=True)
+ testClient = super(TestMigrateVolumeToAnotherPool, cls).getClsTestClient()
+ cls.apiclient = testClient.getApiClient()
+
+ cls._cleanup = []
+
+ cls.unsupportedHypervisor = False
+ cls.hypervisor = testClient.getHypervisorInfo()
+ if cls.hypervisor.lower() in ("hyperv", "lxc"):
+ cls.unsupportedHypervisor = True
+ return
+
+ cls.services = testClient.getParsedTestDataConfig()
+ # Get Zone, Domain and templates
+ cls.domain = get_domain(cls.apiclient)
+ cls.zone = None
+ zones = list_zones(cls.apiclient)
+
+ for z in zones:
+ if z.name == cls.getClsConfig().mgtSvr[0].zone:
+ cls.zone = z
+
+ assert cls.zone is not None
+
+ td = TestData()
+ cls.testdata = td.testdata
+ cls.helper = StorPoolHelper()
+ storpool_primary_storage = cls.testdata[TestData.primaryStorage]
+ cls.template_name = storpool_primary_storage.get("name")
+ storpool_service_offerings = cls.testdata[TestData.serviceOffering]
+
+ nfs_service_offerings = cls.testdata[TestData.serviceOfferingsPrimary]
+ ceph_service_offerings = cls.testdata[TestData.serviceOfferingsCeph]
+
+
+ storage_pool = list_storage_pools(
+ cls.apiclient,
+ name=cls.template_name
+ )
+
+ nfs_storage_pool = list_storage_pools(
+ cls.apiclient,
+ name='nfs'
+ )
+
+ ceph_primary_storage = cls.testdata[TestData.primaryStorage4]
+
+ cls.ceph_storage_pool = list_storage_pools(
+ cls.apiclient,
+ name=ceph_primary_storage.get("name")
+ )[0]
+
+ service_offerings = list_service_offering(
+ cls.apiclient,
+ name=cls.template_name
+ )
+ nfs_service_offering = list_service_offering(
+ cls.apiclient,
+ name='nfs'
+ )
+
+ ceph_service_offering = list_service_offering(
+ cls.apiclient,
+ name=ceph_primary_storage.get("name")
+ )
+
+ disk_offerings = list_disk_offering(
+ cls.apiclient,
+ name="ssd"
+ )
+
+ cls.disk_offerings = disk_offerings[0]
+ if storage_pool is None:
+ storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage)
+ else:
+ storage_pool = storage_pool[0]
+ cls.storage_pool = storage_pool
+ if service_offerings is None:
+ service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings)
+ else:
+ service_offerings = service_offerings[0]
+ if nfs_service_offering is None:
+ nfs_service_offering = ServiceOffering.create(cls.apiclient, nfs_service_offerings)
+ else:
+ nfs_service_offering = nfs_service_offering[0]
+
+ if ceph_service_offering is None:
+ ceph_service_offering = ServiceOffering.create(cls.apiclient, ceph_service_offerings)
+ else:
+ ceph_service_offering = ceph_service_offering[0]
+ #The version of CentOS has to be supported
+ template = get_template(
+ cls.apiclient,
+ cls.zone.id,
+ account = "system"
+ )
+
+ cls.nfs_storage_pool = nfs_storage_pool[0]
+ if cls.nfs_storage_pool.state == "Maintenance":
+ cls.nfs_storage_pool = StoragePool.cancelMaintenance(cls.apiclient, cls.nfs_storage_pool.id)
+
+ if cls.ceph_storage_pool.state == "Maintenance":
+ cls.ceph_storage_pool = StoragePool.cancelMaintenance(cls.apiclient, cls.ceph_storage_pool.id)
+
+ cls.account = cls.helper.create_account(
+ cls.apiclient,
+ cls.services["account"],
+ accounttype = 1,
+ domainid=cls.domain.id,
+ roleid = 1
+ )
+ cls._cleanup.append(cls.account)
+
+ securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0]
+ cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id)
+
+ cls.vm = VirtualMachine.create(cls.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ zoneid=cls.zone.id,
+ templateid=template.id,
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=nfs_service_offering.id,
+ hypervisor=cls.hypervisor,
+ rootdisksize=10
+ )
+ cls.vm2 = VirtualMachine.create(cls.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ zoneid=cls.zone.id,
+ templateid=template.id,
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=nfs_service_offering.id,
+ hypervisor= cls.hypervisor,
+ rootdisksize=10
+ )
+ cls.vm3 = VirtualMachine.create(cls.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ zoneid=cls.zone.id,
+ templateid=template.id,
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=nfs_service_offering.id,
+ hypervisor= cls.hypervisor,
+ rootdisksize=10
+ )
+ cls.vm4 = VirtualMachine.create(cls.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ zoneid=cls.zone.id,
+ templateid=template.id,
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=ceph_service_offering.id,
+ hypervisor= cls.hypervisor,
+ rootdisksize=10
+ )
+ cls.vm5 = VirtualMachine.create(cls.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ zoneid=cls.zone.id,
+ templateid=template.id,
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=ceph_service_offering.id,
+ hypervisor= cls.hypervisor,
+ rootdisksize=10
+ )
+ cls.storage_pool = StoragePool.update(cls.apiclient,
+ id=cls.storage_pool.id,
+ tags = ["ssd, nfs"])
+
+
+ if template == FAILED:
+ assert False, "get_template() failed to return template\
+ with description %s" % cls.services["ostype"]
+
+ cls.services["domainid"] = cls.domain.id
+ cls.services["small"]["zoneid"] = cls.zone.id
+ cls.services["templates"]["ostypeid"] = template.ostypeid
+ cls.services["zoneid"] = cls.zone.id
+
+
+ cls.service_offering = service_offerings
+ cls.nfs_service_offering = nfs_service_offering
+
+ cls.template = template
+ cls.random_data_0 = random_gen(size=100)
+ cls.test_dir = "/tmp"
+ cls.random_data = "random.data"
+ return
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.cleanUpCloudStack()
+
+ @classmethod
+ def cleanUpCloudStack(cls):
+ try:
+ if cls.nfs_storage_pool.state is not "Maintenance":
+ cls.nfs_storage_pool = StoragePool.enableMaintenance(cls.apiclient, cls.nfs_storage_pool.id)
+
+ if cls.ceph_storage_pool.state is not "Maintenance":
+ cls.ceph_storage_pool = StoragePool.enableMaintenance(cls.apiclient, cls.ceph_storage_pool.id)
+
+ cls.storage_pool = StoragePool.update(cls.apiclient,
+ id=cls.storage_pool.id,
+ tags = ["ssd"])
+ # Cleanup resources used
+ cleanup_resources(cls.apiclient, cls._cleanup)
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+ return
+
+ def setUp(self):
+ self.apiclient = self.testClient.getApiClient()
+ self.dbclient = self.testClient.getDbConnection()
+
+ if self.unsupportedHypervisor:
+ self.skipTest("Skipping test because unsupported hypervisor\
+ %s" % self.hypervisor)
+ return
+
+ def tearDown(self):
+ return
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_1_migrate_vm_from_nfs_to_storpool(self):
+ ''' Test migrate virtual machine from NFS primary storage to StorPool'''
+
+ self.vm.stop(self.apiclient, forced=True)
+ cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
+ cmd.virtualmachineid = self.vm.id
+ cmd.storageid = self.storage_pool.id
+ migrated_vm = self.apiclient.migrateVirtualMachine(cmd)
+ volumes = list_volumes(
+ self.apiclient,
+ virtualmachineid = migrated_vm.id,
+ listall=True
+ )
+ for v in volumes:
+ name = v.path.split("/")[3]
+ try:
+ sp_volume = self.spapi.volumeList(volumeName="~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertEqual(v.storageid, self.storage_pool.id, "Did not migrate virtual machine from NFS to StorPool")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_2_migrate_volume_from_nfs_to_storpool(self):
+ ''' Test migrate volume from NFS primary storage to StorPool'''
+
+ self.vm2.stop(self.apiclient, forced=True)
+ volumes = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.vm2.id,
+ listall=True
+ )
+ for v in volumes:
+ cmd = migrateVolume.migrateVolumeCmd()
+ cmd.storageid = self.storage_pool.id
+ cmd.volumeid = v.id
+ volume = self.apiclient.migrateVolume(cmd)
+ self.assertEqual(volume.storageid, self.storage_pool.id, "Did not migrate volume from NFS to StorPool")
+
+ volumes = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.vm2.id,
+ listall=True
+ )
+ for v in volumes:
+ name = v.path.split("/")[3]
+ try:
+ sp_volume = self.spapi.volumeList(volumeName="~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_3_migrate_volume_from_nfs_to_storpool(self):
+ '''Test write on disk before migrating volume from NFS primary storage
+ Check that data is on disk after migration'''
+
+ try:
+ # Login to VM and write data to file system
+ ssh_client = self.vm3.get_ssh_client(reconnect = True)
+
+ cmds = [
+ "echo %s > %s/%s" %
+ (self.random_data_0, self.test_dir, self.random_data),
+ "sync",
+ "sleep 1",
+ "sync",
+ "sleep 1",
+ "cat %s/%s" %
+ (self.test_dir, self.random_data)
+ ]
+
+ for c in cmds:
+ self.debug(c)
+ result = ssh_client.execute(c)
+ self.debug(result)
+
+
+ except Exception:
+ self.fail("SSH failed for Virtual machine: %s" %
+ self.vm3.ipaddress)
+ self.assertEqual(
+ self.random_data_0,
+ result[0],
+ "Check the random data has be write into temp file!"
+ )
+
+ self.vm3.stop(self.apiclient, forced=True)
+ volumes = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.vm3.id,
+ listall=True
+ )
+ time.sleep(30)
+ for v in volumes:
+ cmd = migrateVolume.migrateVolumeCmd()
+ cmd.storageid = self.storage_pool.id
+ cmd.volumeid = v.id
+ volume = self.apiclient.migrateVolume(cmd)
+ self.assertEqual(volume.storageid, self.storage_pool.id, "Did not migrate volume from NFS to StorPool")
+
+ volumes = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.vm3.id,
+ listall=True
+ )
+ for v in volumes:
+ name = v.path.split("/")[3]
+ try:
+ sp_volume = self.spapi.volumeList(volumeName="~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.vm3.start(self.apiclient)
+ try:
+ ssh_client = self.vm3.get_ssh_client(reconnect=True)
+
+ cmds = [
+ "cat %s/%s" % (self.test_dir, self.random_data)
+ ]
+
+ for c in cmds:
+ self.debug(c)
+ result = ssh_client.execute(c)
+ self.debug(result)
+
+ except Exception:
+ self.fail("SSH failed for Virtual machine: %s" %
+ self.vm3.ipaddress)
+
+ self.assertEqual(
+ self.random_data_0,
+ result[0],
+ "Check the random data is equal with the ramdom file!"
+ )
diff --git a/test/integration/plugins/storpool/TestStorPoolVolumes.py b/test/integration/plugins/storpool/TestStorPoolVolumes.py
new file mode 100644
index 0000000000..4560038d26
--- /dev/null
+++ b/test/integration/plugins/storpool/TestStorPoolVolumes.py
@@ -0,0 +1,2153 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Import Local Modules
+from marvin.codes import FAILED, KVM, PASS, XEN_SERVER, RUNNING
+from nose.plugins.attrib import attr
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.lib.utils import random_gen, cleanup_resources, validateList, is_snapshot_on_nfs, isAlmostEqual
+from marvin.lib.base import (Account,
+ Cluster,
+ Configurations,
+ ServiceOffering,
+ Snapshot,
+ StoragePool,
+ Template,
+ VirtualMachine,
+ VmSnapshot,
+ Volume,
+ SecurityGroup,
+ Role,
+ )
+from marvin.lib.common import (get_zone,
+ get_domain,
+ get_template,
+ list_disk_offering,
+ list_hosts,
+ list_snapshots,
+ list_storage_pools,
+ list_volumes,
+ list_virtual_machines,
+ list_configurations,
+ list_service_offering,
+ list_clusters,
+ list_zones)
+from marvin.cloudstackAPI import (listOsTypes,
+ listTemplates,
+ listHosts,
+ createTemplate,
+ createVolume,
+ getVolumeSnapshotDetails,
+ resizeVolume,
+ listZones)
+import time
+import pprint
+import random
+import subprocess
+from storpool import spapi
+from storpool import sptypes
+from marvin.configGenerator import configuration
+import uuid
+from sp_util import (TestData, StorPoolHelper)
+
+class TestStoragePool(cloudstackTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestStoragePool, cls).setUpClass()
+ try:
+ cls.setUpCloudStack()
+ except Exception:
+ cls.cleanUpCloudStack()
+ raise
+
+ @classmethod
+ def setUpCloudStack(cls):
+ testClient = super(TestStoragePool, cls).getClsTestClient()
+
+ cls._cleanup = []
+
+ cls.apiclient = testClient.getApiClient()
+ cls.helper = StorPoolHelper()
+
+ cls.unsupportedHypervisor = False
+ cls.hypervisor = testClient.getHypervisorInfo()
+ if cls.hypervisor.lower() in ("hyperv", "lxc"):
+ cls.unsupportedHypervisor = True
+ return
+
+ cls.services = testClient.getParsedTestDataConfig()
+
+ # Get Zone, Domain and templates
+ cls.domain = get_domain(cls.apiclient)
+ cls.zone = None
+ zones = list_zones(cls.apiclient)
+
+ for z in zones:
+ if z.name == cls.getClsConfig().mgtSvr[0].zone:
+ cls.zone = z
+
+ assert cls.zone is not None
+
+ cls.sp_template_1 = "ssd"
+ storpool_primary_storage = {
+ "name" : cls.sp_template_1,
+ "zoneid": cls.zone.id,
+ "url": "SP_API_HTTP=10.2.23.248:81;SP_AUTH_TOKEN=6549874687;SP_TEMPLATE=%s" % cls.sp_template_1,
+ "scope": "zone",
+ "capacitybytes": 564325555333,
+ "capacityiops": 155466,
+ "hypervisor": "kvm",
+ "provider": "StorPool",
+ "tags": cls.sp_template_1
+ }
+
+ cls.storpool_primary_storage = storpool_primary_storage
+ host, port, auth = cls.getCfgFromUrl(url = storpool_primary_storage["url"])
+ cls.spapi = spapi.Api(host=host, port=port, auth=auth, multiCluster=True)
+
+ storage_pool = list_storage_pools(
+ cls.apiclient,
+ name=storpool_primary_storage["name"]
+ )
+
+ if storage_pool is None:
+ newTemplate = sptypes.VolumeTemplateCreateDesc(name = storpool_primary_storage["name"],placeAll = "virtual", placeTail = "virtual", placeHead = "virtual", replication=1)
+ template_on_local = cls.spapi.volumeTemplateCreate(newTemplate)
+
+ storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage)
+ else:
+ storage_pool = storage_pool[0]
+ cls.primary_storage = storage_pool
+
+
+ storpool_service_offerings_ssd = {
+ "name": cls.sp_template_1,
+ "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)",
+ "cpunumber": 1,
+ "cpuspeed": 500,
+ "memory": 512,
+ "storagetype": "shared",
+ "customizediops": False,
+ "hypervisorsnapshotreserve": 200,
+ "tags": cls.sp_template_1
+ }
+
+ service_offerings_ssd = list_service_offering(
+ cls.apiclient,
+ name=storpool_service_offerings_ssd["name"]
+ )
+
+ if service_offerings_ssd is None:
+ service_offerings_ssd = ServiceOffering.create(cls.apiclient, storpool_service_offerings_ssd)
+ else:
+ service_offerings_ssd = service_offerings_ssd[0]
+
+ cls.service_offering = service_offerings_ssd
+ cls.debug(pprint.pformat(cls.service_offering))
+
+
+ cls.sp_template_2 = "ssd2"
+
+ storpool_primary_storage2 = {
+ "name" : cls.sp_template_2,
+ "zoneid": cls.zone.id,
+ "url": "SP_API_HTTP=10.2.23.248:81;SP_AUTH_TOKEN=6549874687;SP_TEMPLATE=%s" % cls.sp_template_2,
+ "scope": "zone",
+ "capacitybytes": 564325555333,
+ "capacityiops": 1554,
+ "hypervisor": "kvm",
+ "provider": "StorPool",
+ "tags": cls.sp_template_2
+ }
+
+ cls.storpool_primary_storage2 = storpool_primary_storage2
+ storage_pool = list_storage_pools(
+ cls.apiclient,
+ name=storpool_primary_storage2["name"]
+ )
+
+ if storage_pool is None:
+ newTemplate = sptypes.VolumeTemplateCreateDesc(name = storpool_primary_storage2["name"],placeAll = "virtual", placeTail = "virtual", placeHead = "virtual", replication=1)
+
+ template_on_local = cls.spapi.volumeTemplateCreate(newTemplate)
+
+ storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2)
+
+ else:
+ storage_pool = storage_pool[0]
+ cls.primary_storage2 = storage_pool
+
+ storpool_service_offerings_ssd2 = {
+ "name": cls.sp_template_2,
+ "displaytext": "SP_CO_2",
+ "cpunumber": 1,
+ "cpuspeed": 500,
+ "memory": 512,
+ "storagetype": "shared",
+ "customizediops": False,
+ "tags": cls.sp_template_2
+ }
+
+ service_offerings_ssd2 = list_service_offering(
+ cls.apiclient,
+ name=storpool_service_offerings_ssd2["name"]
+ )
+
+ if service_offerings_ssd2 is None:
+ service_offerings_ssd2 = ServiceOffering.create(cls.apiclient, storpool_service_offerings_ssd2)
+ else:
+ service_offerings_ssd2 = service_offerings_ssd2[0]
+
+ cls.service_offering2 = service_offerings_ssd2
+
+ disk_offerings = list_disk_offering(
+ cls.apiclient,
+ name="Small"
+ )
+
+ disk_offering_20 = list_disk_offering(
+ cls.apiclient,
+ name="Medium"
+ )
+
+ disk_offering_100 = list_disk_offering(
+ cls.apiclient,
+ name="Large"
+ )
+
+
+ cls.disk_offerings = disk_offerings[0]
+ cls.disk_offering_20 = disk_offering_20[0]
+ cls.disk_offering_100 = disk_offering_100[0]
+
+ #The version of CentOS has to be supported
+ template = get_template(
+ cls.apiclient,
+ cls.zone.id,
+ account = "system"
+ )
+
+ if template == FAILED:
+ assert False, "get_template() failed to return template\
+ with description %s" % cls.services["ostype"]
+
+ cls.services["domainid"] = cls.domain.id
+ cls.services["small"]["zoneid"] = cls.zone.id
+ cls.services["templates"]["ostypeid"] = template.ostypeid
+ cls.services["zoneid"] = cls.zone.id
+ cls.services["diskofferingid"] = cls.disk_offerings.id
+
+ role = Role.list(cls.apiclient, name='Admin')
+
+ # Create VMs, VMs etc
+ cls.account = Account.create(
+ cls.apiclient,
+ cls.services["account"],
+ domainid=cls.domain.id,
+ roleid = role[0].id
+ )
+
+ securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0]
+ cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id)
+ cls._cleanup.append(cls.account)
+
+ cls.volume_1 = Volume.create(
+ cls.apiclient,
+ {"diskname":"StorPoolDisk-1" },
+ zoneid=cls.zone.id,
+ diskofferingid=disk_offerings[0].id,
+ account=cls.account.name,
+ domainid=cls.account.domainid,
+ )
+
+ cls.volume_2 = Volume.create(
+ cls.apiclient,
+ {"diskname":"StorPoolDisk-2" },
+ zoneid=cls.zone.id,
+ diskofferingid=disk_offerings[0].id,
+ account=cls.account.name,
+ domainid=cls.account.domainid,
+ )
+
+ cls.volume = Volume.create(
+ cls.apiclient,
+ {"diskname":"StorPoolDisk-3" },
+ zoneid=cls.zone.id,
+ diskofferingid=disk_offerings[0].id,
+ account=cls.account.name,
+ domainid=cls.account.domainid,
+ )
+
+ cls.virtual_machine = VirtualMachine.create(
+ cls.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ zoneid=cls.zone.id,
+ templateid=template.id,
+ serviceofferingid=cls.service_offering.id,
+ hypervisor=cls.hypervisor,
+ rootdisksize=10
+ )
+
+ cls.virtual_machine2 = VirtualMachine.create(
+ cls.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ zoneid=cls.zone.id,
+ templateid=template.id,
+ serviceofferingid=cls.service_offering.id,
+ hypervisor=cls.hypervisor,
+ rootdisksize=10
+ )
+
+ cls.vm_migrate = VirtualMachine.create(
+ cls.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ zoneid=cls.zone.id,
+ templateid=template.id,
+ serviceofferingid=cls.service_offering.id,
+ hypervisor=cls.hypervisor,
+ rootdisksize=10
+ )
+
+ cls.template = template
+ cls.hostid = cls.virtual_machine.hostid
+ cls.random_data_0 = random_gen(size=100)
+ cls.test_dir = "/tmp"
+ cls.random_data = "random.data"
+ return
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.cleanUpCloudStack()
+
+ @classmethod
+ def cleanUpCloudStack(cls):
+ try:
+ # Cleanup resources used
+ cleanup_resources(cls.apiclient, cls._cleanup)
+
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+ return
+
+ def setUp(self):
+ self.apiclient = self.testClient.getApiClient()
+ self.dbclient = self.testClient.getDbConnection()
+
+ if self.unsupportedHypervisor:
+ self.skipTest("Skipping test because unsupported hypervisor\
+ %s" % self.hypervisor)
+ return
+
+ def tearDown(self):
+ return
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_01_snapshot_to_template(self):
+ ''' Create template from snapshot without bypass secondary storage
+ '''
+ volume = Volume.list(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "false")
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+ self.assertIsNotNone(snapshot, "Could not create snapshot")
+ self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")
+
+ template = self.create_template_from_snapshot(
+ self.apiclient,
+ self.services,
+ snapshotid = snapshot.id
+ )
+ virtual_machine = VirtualMachine.create(self.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ zoneid=self.zone.id,
+ templateid=template.id,
+ serviceofferingid=self.service_offering.id,
+ hypervisor=self.hypervisor,
+ rootdisksize=10
+ )
+ ssh_client = virtual_machine.get_ssh_client()
+
+ self.assertIsNotNone(template, "Template is None")
+ self.assertIsInstance(template, Template, "Template is instance of template")
+ self._cleanup.append(template)
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_02_snapshot_to_template_bypass_secondary(self):
+ ''' Test Create Template from snapshot bypassing secondary storage
+ '''
+ ##cls.virtual_machine
+ volume = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+ try:
+ name = volume[0].path.split("/")[3]
+ sp_volume = self.spapi.volumeList(volumeName = "~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "true")
+
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ if flag == False:
+ raise Exception("Could not find snasphot in snapshot_details")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertIsNotNone(snapshot, "Could not create snapshot")
+ self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")
+
+ template = self.create_template_from_snapshot(
+ self.apiclient,
+ self.services,
+ snapshotid = snapshot.id
+ )
+
+ flag = False
+ sp_snapshots = self.spapi.snapshotsList()
+ for snap in sp_snapshots:
+ tags = snap.tags
+ for t in tags:
+ if tags[t] == template.id:
+ flag = True
+ break
+ else:
+ continue
+ break
+
+ if flag is False:
+ raise Exception("Template does not exists in Storpool")
+ virtual_machine = VirtualMachine.create(self.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ zoneid=self.zone.id,
+ templateid=template.id,
+ serviceofferingid=self.service_offering.id,
+ hypervisor=self.hypervisor,
+ rootdisksize=10
+ )
+ ssh_client = virtual_machine.get_ssh_client()
+ self.assertIsNotNone(template, "Template is None")
+ self.assertIsInstance(template, Template, "Template is instance of template")
+ self._cleanup.append(template)
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_03_snapshot_volume_with_secondary(self):
+ '''
+ Test Create snapshot and backup to secondary
+ '''
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "false")
+ volume = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ if flag == False:
+ raise Exception("Could not find snapshot in snapshot_details")
+ except spapi.ApiError as err:
+ raise Exception(err)
+ self.assertIsNotNone(snapshot, "Could not create snapshot")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_04_snapshot_volume_bypass_secondary(self):
+ '''
+ Test snapshot bypassing secondary
+ '''
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "true")
+ volume = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ if flag == False:
+ raise Exception("Could not find snapshot in snapshot details")
+ except spapi.ApiError as err:
+ raise Exception(err)
+ self.assertIsNotNone(snapshot, "Could not create snapshot")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_05_delete_template_bypassed_secondary(self):
+ ''' Test delete template from snapshot bypassed secondary storage
+ '''
+ volume = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+ try:
+ name = volume[0].path.split("/")[3]
+ sp_volume = self.spapi.volumeList(volumeName = "~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "true")
+
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ if flag == False:
+ raise Exception("Could not find snapshot in snapshot details")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertIsNotNone(snapshot, "Could not create snapshot")
+ self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")
+
+ template = self.create_template_from_snapshot(
+ self.apiclient,
+ self.services,
+ snapshotid = snapshot.id
+ )
+
+ flag = False
+ storpoolGlId = None
+ sp_snapshots = self.spapi.snapshotsList()
+ for snap in sp_snapshots:
+ tags = snap.tags
+ for t in tags:
+ if tags[t] == template.id:
+ storpoolGlId = "~" + snap.globalId
+ flag = True
+ break
+ else:
+ continue
+ break
+
+ if flag is False:
+ raise Exception("Template does not exists in Storpool")
+
+ self.assertIsNotNone(template, "Template is None")
+ self.assertIsInstance(template, Template, "Template is instance of template")
+ temp = Template.delete(template, self.apiclient, self.zone.id)
+ self.assertIsNone(temp, "Template was not deleted")
+
+ try:
+ sp_snapshot = self.spapi.snapshotList(snapshotName = storpoolGlId)
+ if sp_snapshot is not None:
+ self.debug("Snapshot exists on StorPool name " + storpoolGlId)
+ except spapi.ApiError as err:
+ self.debug("Do nothing the template has to be deleted")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_06_template_from_snapshot(self):
+ ''' Test create template bypassing secondary from snapshot which is backed up on secondary storage
+ '''
+ ##cls.virtual_machine
+ volume = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+ try:
+ name = volume[0].path.split("/")[3]
+ sp_volume = self.spapi.volumeList(volumeName = "~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "false")
+
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ if flag == False:
+ raise Exception("Could not find snapshot in snapsho details")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertIsNotNone(snapshot, "Could not create snapshot")
+ self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")
+
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "true")
+
+ template = self.create_template_from_snapshot(
+ self.apiclient,
+ self.services,
+ snapshotid = snapshot.id
+ )
+
+ flag = False
+ globalId = None
+ sp_snapshots = self.spapi.snapshotsList()
+ for snap in sp_snapshots:
+ tags = snap.tags
+ for t in tags:
+ if tags[t] == template.id:
+ flag = True
+ globalId = snap.globalId
+ break
+ else:
+ continue
+ break
+
+ if flag is False:
+ raise Exception("Template does not exists in Storpool")
+
+
+ self.assertIsNotNone(template, "Template is None")
+ self.assertIsInstance(template, Template, "Template is instance of template")
+ temp = Template.delete(template, self.apiclient, self.zone.id)
+ self.assertIsNone(temp, "Template was not deleted")
+
+ if globalId is not None:
+ try:
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + globalId)
+ if sp_snapshot is not None:
+ self.debug("Snapshot exists on Storpool name " + globalId)
+ except spapi.ApiError as err:
+ self.debug("Do nothing the template has to be deleted")
+ else:
+ flag = False
+ sp_snapshots = self.spapi.snapshotsList()
+ for snap in sp_snapshots:
+ tags = snap.tags
+ for t in tags:
+ if tags[t] == template.id:
+ flag = True
+ break
+ else:
+ continue
+ break
+
+ if flag is True:
+ raise Exception("Template should not exists in Storpool")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_07_delete_snapshot_of_deleted_volume(self):
+ ''' Delete snapshot and template if volume is already deleted, not bypassing secondary
+ '''
+
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "false")
+
+ volume = Volume.create(
+ self.apiclient,
+ {"diskname":"StorPoolDisk-Delete" },
+ zoneid = self.zone.id,
+ diskofferingid = self.disk_offerings.id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+ delete = volume
+ self.virtual_machine2.stop(self.apiclient, forced=True)
+ self.virtual_machine2.attach_volume(
+ self.apiclient,
+ volume
+ )
+ self.virtual_machine2.detach_volume(
+ self.apiclient,
+ volume
+ )
+
+ volume = list_volumes(self.apiclient, id = volume.id)
+
+ name = volume[0].path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ try:
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ except spapi.ApiError as err:
+ raise Exception(err)
+ if flag == False:
+ raise Exception("Could not finad snapshot in snapshot details")
+ except Exception as err:
+ raise Exception(err)
+
+ template = self.create_template_from_snapshot(self.apiclient, self.services, snapshotid = snapshot.id)
+
+ template_from_volume = self.create_template_from_snapshot(self.apiclient, self.services, volumeid = volume[0].id)
+
+ Volume.delete(delete, self.apiclient, )
+ Snapshot.delete(snapshot, self.apiclient)
+
+ flag = False
+
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ if snapshot_details is not None:
+ try:
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ except spapi.ApiError as err:
+ flag = False
+
+ if flag is True:
+ raise Exception("Snapshot was not deleted")
+ except Exception as err:
+ self.debug('Snapshot was deleted %s' % err)
+
+ Template.delete(template, self.apiclient, zoneid = self.zone.id)
+ Template.delete(template_from_volume, self.apiclient, zoneid = self.zone.id)
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_08_delete_snapshot_of_deleted_volume(self):
+ ''' Delete snapshot and template if volume is already deleted, bypassing secondary
+ '''
+
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "true")
+
+ volume = Volume.create(
+ self.apiclient,
+ {"diskname":"StorPoolDisk-Delete" },
+ zoneid = self.zone.id,
+ diskofferingid = self.disk_offerings.id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+ delete = volume
+ self.virtual_machine2.attach_volume(
+ self.apiclient,
+ volume
+ )
+ self.virtual_machine2.detach_volume(
+ self.apiclient,
+ volume
+ )
+
+ volume = list_volumes(self.apiclient, id = volume.id)
+
+ name = volume[0].path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ if snapshot_details is not None:
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ try:
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ except spapi.ApiError as err:
+ raise Exception(err)
+ if flag == False:
+ raise Exception("Could not find snapshot in snapshot details")
+ except Exception as err:
+ raise Exception(err)
+
+ template = self.create_template_from_snapshot(self.apiclient, self.services, snapshotid = snapshot.id)
+
+ Volume.delete(delete, self.apiclient, )
+ Snapshot.delete(snapshot, self.apiclient)
+
+ flag = False
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ if snapshot_details is not None:
+ try:
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ except spapi.ApiError as err:
+ flag = False
+
+ if flag is True:
+ raise Exception("Snapshot was not deleted")
+ except Exception as err:
+ self.debug('Snapshot was deleted %s' % err)
+
+
+ Template.delete(template, self.apiclient, zoneid = self.zone.id)
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_09_vm_from_bypassed_template(self):
+ '''Create virtual machine with sp.bypass.secondary.storage=false
+ from template created on StorPool and Secondary Storage'''
+
+ volume = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+
+ name = volume[0].path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "true")
+
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ try:
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ except spapi.ApiError as err:
+ raise Exception(err)
+ if flag == False:
+ raise Exception("Could not find snapshot in snapshot details")
+ except Exception as err:
+ raise Exception(err)
+
+ self.assertIsNotNone(snapshot, "Could not create snapshot")
+ self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")
+
+ template = self.create_template_from_snapshot(
+ self.apiclient,
+ self.services,
+ snapshotid = snapshot.id
+ )
+ self._cleanup.append(template)
+
+ flag = False
+ sp_snapshots = self.spapi.snapshotsList()
+ for snap in sp_snapshots:
+ tags = snap.tags
+ for t in tags:
+ if tags[t] == template.id:
+ flag = True
+ break
+ else:
+ continue
+ break
+
+ if flag is False:
+ raise Exception("Template does not exists in Storpool")
+
+
+ self.assertIsNotNone(template, "Template is None")
+ self.assertIsInstance(template, Template, "Template is instance of template")
+
+ backup_config = Configurations.update(self.apiclient,
+ name = "sp.bypass.secondary.storage",
+ value = "false")
+
+ vm = VirtualMachine.create(
+ self.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ zoneid=self.zone.id,
+ templateid = template.id,
+ serviceofferingid=self.service_offering.id,
+ hypervisor=self.hypervisor,
+ rootdisksize=10,
+ )
+
+ ssh_client = vm.get_ssh_client(reconnect=True)
+
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_10_create_vm_snapshots(self):
+ """Test to create VM snapshots
+ """
+ volume_attached = self.virtual_machine.attach_volume(
+ self.apiclient,
+ self.volume
+ )
+
+ vol = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, id=volume_attached.id)
+ name = vol[0].path.split("/")[3]
+ sp_volume = self.spapi.volumeList(volumeName = "~" + name)
+ self.assertEqual(volume_attached.id, self.volume.id, "Is not the same volume ")
+ try:
+ # Login to VM and write data to file system
+ ssh_client = self.virtual_machine.get_ssh_client()
+
+ cmds = [
+ "echo %s > %s/%s" %
+ (self.random_data_0, self.test_dir, self.random_data),
+ "sync",
+ "sleep 1",
+ "sync",
+ "sleep 1",
+ "cat %s/%s" %
+ (self.test_dir, self.random_data)
+ ]
+
+ for c in cmds:
+ self.debug(c)
+ result = ssh_client.execute(c)
+ self.debug(result)
+
+
+ except Exception:
+ self.fail("SSH failed for Virtual machine: %s" %
+ self.virtual_machine.ipaddress)
+ self.assertEqual(
+ self.random_data_0,
+ result[0],
+ "Check the random data has be write into temp file!"
+ )
+
+ time.sleep(30)
+ MemorySnapshot = False
+ vm_snapshot = VmSnapshot.create(
+ self.apiclient,
+ self.virtual_machine.id,
+ MemorySnapshot,
+ "TestSnapshot",
+ "Display Text"
+ )
+ self.assertEqual(
+ vm_snapshot.state,
+ "Ready",
+ "Check the snapshot of vm is ready!"
+ )
+
+ return
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_11_revert_vm_snapshots(self):
+ """Test to revert VM snapshots
+ """
+
+ try:
+ ssh_client = self.virtual_machine.get_ssh_client()
+
+ cmds = [
+ "rm -rf %s/%s" % (self.test_dir, self.random_data),
+ "ls %s/%s" % (self.test_dir, self.random_data)
+ ]
+
+ for c in cmds:
+ self.debug(c)
+ result = ssh_client.execute(c)
+ self.debug(result)
+
+ except Exception:
+ self.fail("SSH failed for Virtual machine: %s" %
+ self.virtual_machine.ipaddress)
+
+ if str(result[0]).index("No such file or directory") == -1:
+ self.fail("Check the random data has be delete from temp file!")
+
+ time.sleep(30)
+
+ list_snapshot_response = VmSnapshot.list(
+ self.apiclient,
+ virtualmachineid=self.virtual_machine.id,
+ listall=True)
+
+ self.assertEqual(
+ isinstance(list_snapshot_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
+ self.assertNotEqual(
+ list_snapshot_response,
+ None,
+ "Check if snapshot exists in ListSnapshot"
+ )
+
+ self.assertEqual(
+ list_snapshot_response[0].state,
+ "Ready",
+ "Check the snapshot of vm is ready!"
+ )
+
+ self.virtual_machine.stop(self.apiclient, forced=True)
+
+ VmSnapshot.revertToSnapshot(
+ self.apiclient,
+ list_snapshot_response[0].id
+ )
+
+ self.virtual_machine.start(self.apiclient)
+
+ try:
+ ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
+
+ cmds = [
+ "cat %s/%s" % (self.test_dir, self.random_data)
+ ]
+
+ for c in cmds:
+ self.debug(c)
+ result = ssh_client.execute(c)
+ self.debug(result)
+
+ except Exception:
+ self.fail("SSH failed for Virtual machine: %s" %
+ self.virtual_machine.ipaddress)
+
+ self.assertEqual(
+ self.random_data_0,
+ result[0],
+ "Check the random data is equal with the ramdom file!"
+ )
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_12_delete_vm_snapshots(self):
+ """Test to delete vm snapshots
+ """
+
+ list_snapshot_response = VmSnapshot.list(
+ self.apiclient,
+ virtualmachineid=self.virtual_machine.id,
+ listall=True)
+
+ self.assertEqual(
+ isinstance(list_snapshot_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
+ self.assertNotEqual(
+ list_snapshot_response,
+ None,
+ "Check if snapshot exists in ListSnapshot"
+ )
+ VmSnapshot.deleteVMSnapshot(
+ self.apiclient,
+ list_snapshot_response[0].id)
+
+ time.sleep(30)
+
+ list_snapshot_response = VmSnapshot.list(
+ self.apiclient,
+ #vmid=self.virtual_machine.id,
+ virtualmachineid=self.virtual_machine.id,
+ listall=False)
+ self.debug('list_snapshot_response -------------------- %s' % list_snapshot_response)
+
+ self.assertIsNone(list_snapshot_response, "snapshot is already deleted")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_13_detach_volume(self):
+ '''Attach volume on VM on 2nd zone'''
+ self.virtual_machine.stop(self.apiclient)
+ self.virtual_machine.detach_volume(
+ self.apiclient,
+ self.volume
+ )
+ vol = list_volumes(self.apiclient, id=self.volume.id)
+ name = vol[0].path.split("/")[3]
+ spvolume = self.spapi.volumeList(volumeName = "~" + name)
+ self.assertEqual(vol[0].id, self.volume.id, "Is not the same volume ")
+ tags = spvolume[0].tags
+ for t in tags:
+ self.assertFalse(t.lower() == 'cvm'.lower(), "cvm tag still set on detached volume")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_14_attach_detach_volume_to_running_vm(self):
+ ''' Test Attach Volume To Running Virtual Machine
+ '''
+ time.sleep(60)
+ self.assertEqual(VirtualMachine.RUNNING, self.virtual_machine.state, "Running")
+ volume = self.virtual_machine.attach_volume(
+ self.apiclient,
+ self.volume_1
+ )
+ print(volume)
+ self.assertIsNotNone(volume, "Volume is not None")
+
+ list_vm_volumes = Volume.list(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ id= volume.id
+ )
+ print(list_vm_volumes)
+ self.assertEqual(volume.id, list_vm_volumes[0].id, "Is true")
+
+ name = list_vm_volumes[0].path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ volume = self.virtual_machine.detach_volume(
+ self.apiclient,
+ self.volume_1
+ )
+ list_vm_volumes = Volume.list(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ id = volume.id
+ )
+
+ print(list_vm_volumes)
+ self.assertIsNone(list_vm_volumes, "Is None")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_15_resize_root_volume_on_working_vm(self):
+ ''' Test Resize Root volume on Running Virtual Machine
+ '''
+ self.assertEqual(VirtualMachine.RUNNING, self.virtual_machine2.state, "Running")
+ volume = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine2.id,
+ type = "ROOT",
+ listall = True,
+ )
+ volume = volume[0]
+
+ name = volume.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != volume.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertEqual(volume.type, 'ROOT', "Volume is not of ROOT type")
+ shrinkOk = False
+ if volume.size > int((self.disk_offering_20.disksize) * (1024**3)):
+ shrinkOk= True
+
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = volume.id
+ cmd.size = 20
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+
+ new_size = Volume.list(
+ self.apiclient,
+ id=volume.id
+ )
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offering_20.disksize) * (1024**3))),
+ "New size is not int((self.disk_offering_20) * (1024**3)"
+ )
+ volume = new_size[0]
+
+ name = volume.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != volume.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ shrinkOk = False
+ if volume.size > int((self.disk_offering_100.disksize) * (1024**3)):
+ shrinkOk= True
+
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = volume.id
+ cmd.size = 100
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+ new_size = Volume.list(
+ self.apiclient,
+ id=volume.id
+ )
+
+ volume = new_size[0]
+
+ name = volume.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != volume.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offering_100.disksize) * (1024**3))),
+ "New size is not int((self.disk_offering_20) * (1024**3)"
+ )
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_16_resize_attached_volume_on_working_vm(self):
+ ''' Test Resize Volume Attached To Running Virtual Machine
+ '''
+ self.assertEqual(VirtualMachine.RUNNING, self.virtual_machine.state, "Running")
+ volume = self.virtual_machine.attach_volume(
+ self.apiclient,
+ self.volume_1
+ )
+
+ listvol = Volume.list(
+ self.apiclient,
+ id=volume.id
+ )
+ name = listvol[0].path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != listvol[0].size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ shrinkOk = False
+ if volume.size > int((self.disk_offering_20.disksize) * (1024**3)):
+ shrinkOk= True
+
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = volume.id
+ cmd.diskofferingid = self.disk_offering_20.id
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+
+ new_size = Volume.list(
+ self.apiclient,
+ id=volume.id
+ )
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offering_20.disksize) * (1024**3))),
+ "New size is not int((self.disk_offering_20) * (1024**3)"
+ )
+ volume = new_size[0]
+
+ name = volume.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != volume.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ shrinkOk = False
+ if volume.size > int((self.disk_offering_100.disksize) * (1024**3)):
+ shrinkOk= True
+
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = volume.id
+ cmd.diskofferingid = self.disk_offering_100.id
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+ new_size = Volume.list(
+ self.apiclient,
+ id=volume.id
+ )
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offering_100.disksize) * (1024**3))),
+ "New size is not int((self.disk_offering_20) * (1024**3)"
+ )
+
+ # return to small disk
+ volume = new_size[0]
+
+ name = volume.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != volume.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ shrinkOk = False
+ if volume.size > int((self.disk_offerings.disksize)* (1024**3)):
+ shrinkOk= True
+
+ cmd.diskofferingid = self.disk_offerings.id
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+ new_size = Volume.list(
+ self.apiclient,
+ id=volume.id
+ )
+
+ volume = new_size[0]
+
+ name = volume.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != volume.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offerings.disksize)*(1024**3))),
+ "Could not return to Small disk"
+ )
+
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_17_attach_detach_volume_to_stopped_vm(self):
+ ''' Test Attach Volume To Stopped Virtual Machine
+ '''
+ virtual_machine = self.virtual_machine.stop(
+ self.apiclient,
+ forced=True
+ )
+
+ time.sleep(60)
+ volume_2 = self.virtual_machine.attach_volume(
+ self.apiclient,
+ self.volume_2
+ )
+ list_vm_volumes = Volume.list(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ id= volume_2.id
+ )
+
+ name = list_vm_volumes[0].path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ print(list_vm_volumes)
+ self.assertEqual(volume_2.id,list_vm_volumes[0].id, "Is true")
+
+ time.sleep(90)
+ volume_2 = self.virtual_machine.detach_volume(
+ self.apiclient,
+ self.volume_2
+ )
+ list_vm_volumes = Volume.list(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ id = volume_2.id
+ )
+ print(list_vm_volumes)
+ self.assertIsNone(list_vm_volumes, "Is None")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_18_resize_attached_volume(self):
+ ''' Test Resize Volume Attached To Virtual Machine
+ '''
+
+ shrinkOk = False
+ if self.volume_1.size > int((self.disk_offering_20.disksize) * (1024**3)):
+ shrinkOk= True
+
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = self.volume_1.id
+ cmd.diskofferingid = self.disk_offering_20.id
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+
+ new_size = Volume.list(
+ self.apiclient,
+ id=self.volume_1.id
+ )
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offering_20.disksize) * (1024**3))),
+ "New size is not int((self.disk_offering_20) * (1024**3)"
+ )
+ self.volume_1 = new_size[0]
+
+ name = self.volume_1.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != self.volume_1.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ shrinkOk = False
+ if self.volume_1.size > int((self.disk_offering_100.disksize) * (1024**3)):
+ shrinkOk= True
+
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = self.volume_1.id
+ cmd.diskofferingid = self.disk_offering_100.id
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+ new_size = Volume.list(
+ self.apiclient,
+ id=self.volume_1.id
+ )
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offering_100.disksize) * (1024**3))),
+ "New size is not int((self.disk_offering_20) * (1024**3)"
+ )
+
+ # return to small disk
+ self.volume_1 = new_size[0]
+
+ name = self.volume_1.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != self.volume_1.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ shrinkOk = False
+ if self.volume_1.size > int((self.disk_offerings.disksize)* (1024**3)):
+ shrinkOk= True
+
+ cmd.diskofferingid = self.disk_offerings.id
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+ new_size = Volume.list(
+ self.apiclient,
+ id=self.volume_1.id
+ )
+
+ name = new_size[0].path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != new_size[0].size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offerings.disksize)*(1024**3))),
+ "Could not return to Small disk"
+ )
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_19_resize_detached_volume(self):
+ ''' Test Resize Volume Detached To Virtual Machine
+ '''
+ list_vm_volumes = Volume.list(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ id= self.volume_2.id
+ )
+
+ #check that the volume is not attached to VM
+ self.assertIsNone(list_vm_volumes, "List volumes is not None")
+
+ shrinkOk = False
+ if self.volume_2.size > int((self.disk_offering_20.disksize) * (1024**3)):
+ shrinkOk= True
+
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = self.volume_2.id
+ cmd.diskofferingid = self.disk_offering_20.id
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+
+ new_size = Volume.list(
+ self.apiclient,
+ id=self.volume_2.id
+ )
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offering_20.disksize) * (1024**3))),
+ "New size is not int((self.disk_offering_20) * (1024**3)"
+ )
+ self.volume_2 = new_size[0]
+
+ name = self.volume_2.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != self.volume_2.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ shrinkOk = False
+ if self.volume_2.size > int((self.disk_offering_100.disksize) * (1024**3)):
+ shrinkOk= True
+
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = self.volume_2.id
+ cmd.diskofferingid = self.disk_offering_100.id
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+ new_size = Volume.list(
+ self.apiclient,
+ id=self.volume_2.id
+ )
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offering_100.disksize) * (1024**3))),
+ "New size is not int((self.disk_offering_20) * (1024**3)"
+ )
+
+ # return to small disk
+ self.volume_2 = new_size[0]
+
+ name = self.volume_2.path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != self.volume_2.size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ shrinkOk = False
+ if self.volume_2.size > int((self.disk_offerings.disksize)* (1024**3)):
+ shrinkOk= True
+
+ cmd.diskofferingid = self.disk_offerings.id
+ cmd.shrinkok = shrinkOk
+
+ self.apiclient.resizeVolume(cmd)
+ new_size = Volume.list(
+ self.apiclient,
+ id=self.volume_2.id
+ )
+
+ name = new_size[0].path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ if spvolume[0].size != new_size[0].size:
+ raise Exception("Storpool volume size is not the same as CloudStack db size")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertTrue(
+ (new_size[0].size == int((self.disk_offerings.disksize)*(1024**3))),
+ "Could not return to Small disk"
+ )
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_20_snapshot_to_volume(self):
+ ''' Create volume from snapshot
+ '''
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = self.volume_2.id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ if flag == False:
+ raise Exception("Could not find snapshot in snapshot details")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertIsNotNone(snapshot, "Could not create snapshot")
+ self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")
+
+ volume = self.create_volume(
+ self.apiclient,
+ zoneid = self.zone.id,
+ snapshotid = snapshot.id,
+ account=self.account.name,
+ domainid=self.account.domainid
+ )
+
+ listvol = Volume.list(
+ self.apiclient,
+ id=volume.id
+ )
+ name = listvol[0].path.split("/")[3]
+ try:
+ spvolume = self.spapi.volumeList(volumeName="~" + name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertIsNotNone(volume, "Could not create volume from snapshot")
+ self.assertIsInstance(volume, Volume, "Volume is not instance of Volume")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_21_snapshot_detached_volume(self):
+ ''' Test Snapshot Detached Volume
+ '''
+ self.virtual_machine.stop(
+ self.apiclient,
+ forced = True
+ )
+ self.volume = self.virtual_machine.attach_volume(
+ self.apiclient,
+ self.volume
+ )
+ self.assertIsNotNone(self.volume, "Attach: Is none")
+ self.volume = self.virtual_machine.detach_volume(
+ self.apiclient,
+ self.volume
+ )
+
+ self.assertIsNotNone(self.volume, "Detach: Is none")
+
+ snapshot = Snapshot.create(
+ self.apiclient,
+ self.volume.id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ if flag == False:
+ raise Exception("Could not find snapshot in snapshot details")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertIsNotNone(snapshot, "Snapshot is None")
+
+ self.assertIsInstance(snapshot, Snapshot, "Snapshot is not Instance of Snappshot")
+
+ snapshot = Snapshot.delete(
+ snapshot,
+ self.apiclient
+ )
+
+ self.assertIsNone(snapshot, "Snapshot was not deleted")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_22_snapshot_root_disk(self):
+ ''' Test ROOT Disk Snapshot
+ '''
+ vm = VirtualMachine.create(self.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ zoneid = self.zone.id,
+ templateid = self.template.id,
+ serviceofferingid = self.service_offering.id,
+ hypervisor = self.hypervisor,
+ rootdisksize = 10
+ )
+ list_volumes_of_vm = list_volumes(
+ self.apiclient,
+ virtualmachineid = vm.id,
+ listall = True,
+ )
+ self.assertIs(len(list_volumes_of_vm), 1, "VM has more disk than 1")
+
+ snapshot = Snapshot.create(
+ self.apiclient,
+ list_volumes_of_vm[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+
+ try:
+ cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
+ cmd.snapshotid = snapshot.id
+ snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
+ flag = False
+ for s in snapshot_details:
+ if s["snapshotDetailsName"] == snapshot.id:
+ name = s["snapshotDetailsValue"].split("/")[3]
+ sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
+ flag = True
+ if flag == False:
+ raise Exception("Could not find snapshot in snapshot details")
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+ self.assertIsNotNone(snapshot, "Snapshot is None")
+
+ self.assertEqual(list_volumes_of_vm[0].id, snapshot.volumeid, "Snapshot is not for the same volume")
+
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_23_volume_to_template(self):
+ ''' Create Template From ROOT Volume
+ '''
+ volume = Volume.list(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+
+ self.virtual_machine.stop(self.apiclient)
+
+ template = self.create_template_from_snapshot(
+ self.apiclient,
+ self.services,
+ volumeid = volume[0].id
+ )
+
+ virtual_machine = VirtualMachine.create(self.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ zoneid=self.zone.id,
+ templateid=template.id,
+ serviceofferingid=self.service_offering.id,
+ hypervisor=self.hypervisor,
+ rootdisksize=10
+ )
+ ssh_client = virtual_machine.get_ssh_client()
+ self.assertIsNotNone(template, "Template is None")
+ self.assertIsInstance(template, Template, "Template is instance of template")
+ self._cleanup.append(template)
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_24_migrate_vm_to_another_storage(self):
+ ''' Migrate VM to another Primary Storage
+ '''
+ list_volumes_of_vm = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.vm_migrate.id,
+ listall = True,
+ )
+
+ self.assertTrue(len(list_volumes_of_vm) == 1, "There are more volumes attached to VM")
+
+ if list_volumes_of_vm[0].storageid is self.primary_storage.id:
+ cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
+ cmd.virtualmachineid = self.vm_migrate.id
+ if hostid:
+ cmd.hostid = hostid
+ vm = apiclient.migrateVirtualMachine(cmd)
+ volume = list_volumes(
+ self.apiclient,
+ virtualmachineid = vm.id
+ )[0]
+ self.assertNotEqual(volume.storageid, self.primary_storage.id, "Could not migrate VM")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_25_migrate_volume_to_another_storage(self):
+ ''' Migrate Volume To Another Primary Storage
+ '''
+ self.assertFalse(hasattr(self.volume, 'virtualmachineid') , "Volume is not detached")
+
+ self.assertFalse(hasattr(self.volume, 'storageid') , "Volume is not detached")
+ volume = Volume.migrate(
+ self.apiclient,
+ volumeid = self.volume.id,
+ storageid = self.primary_storage2.id
+ )
+
+ self.assertIsNotNone(volume, "Volume is None")
+
+ self.assertEqual(volume.storageid, self.primary_storage2.id, "Storage is the same")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_26_create_vm_on_another_storpool_storage(self):
+ """ Create Virtual Machine on another StorPool primary StoragePool"""
+ virtual_machine = VirtualMachine.create(self.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ zoneid=self.zone.id,
+ templateid=self.template.id,
+ serviceofferingid=self.service_offering2.id,
+ hypervisor=self.hypervisor,
+ rootdisksize=10
+ )
+ self.assertIsNotNone(virtual_machine, "Could not create virtual machine on another Storpool primary storage")
+
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_27_snapshot_to_volume_of_root_disk(self):
+ ''' Create volume from snapshot
+ '''
+ virtual_machine = VirtualMachine.create(self.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ zoneid=self.zone.id,
+ templateid=self.template.id,
+ serviceofferingid=self.service_offering.id,
+ hypervisor=self.hypervisor,
+ rootdisksize=10
+ )
+ volume1 = list_volumes(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+ snapshot = Snapshot.create(
+ self.apiclient,
+ volume_id = volume1[0].id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ )
+
+ self.assertIsNotNone(snapshot, "Could not create snapshot")
+ self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")
+
+ volume = self.create_volume(
+ self.apiclient,
+ zoneid = self.zone.id,
+ snapshotid = snapshot.id,
+ account=self.account.name,
+ domainid=self.account.domainid
+ )
+
+ self.assertIsNotNone(volume, "Could not create volume from snapshot")
+ self.assertIsInstance(volume, Volume, "Volume is not instance of Volume")
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_28_download_volume(self):
+ vol = self.volume.extract(
+ self.apiclient,
+ volume_id = self.volume.id,
+ zoneid = self.zone.id,
+ mode = "HTTP_DOWNLOAD"
+ )
+ self.assertIsNotNone(vol, "Volume is None")
+ self.assertIsNotNone(vol.url, "No URL provided")
+ Volume.delete(vol, self.apiclient)
+
+ @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
+ def test_29_create_vm_from_template_not_on_storpool(self):
+ ''' Create virtual machine from template which for some reason is deleted from StorPool, but exists in template_spoool_ref DB tables '''
+
+ volume = Volume.list(
+ self.apiclient,
+ virtualmachineid = self.virtual_machine.id,
+ type = "ROOT",
+ listall = True,
+ )
+
+ self.virtual_machine.stop(self.apiclient)
+
+ template = self.create_template_from_snapshot(
+ self.apiclient,
+ self.services,
+ volumeid = volume[0].id
+ )
+
+ virtual_machine = VirtualMachine.create(self.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ zoneid=self.zone.id,
+ templateid=template.id,
+ serviceofferingid=self.service_offering.id,
+ hypervisor=self.hypervisor,
+ rootdisksize=10
+ )
+ ssh_client = virtual_machine.get_ssh_client(reconnect= True)
+ name = 'ssd-' + template.id
+ flag = False
+ storpoolGlId = None
+
+ sp_snapshots = self.spapi.snapshotsList()
+ for snap in sp_snapshots:
+ tags = snap.tags
+ for t in tags:
+ if tags[t] == template.id:
+ storpoolGlId = snap.globalId
+ flag = True
+ break
+ else:
+ continue
+ break
+
+ if flag is False:
+ try:
+ sp_snapshot = self.spapi.snapshotList(snapshotName = name)
+ except spapi.ApiError as err:
+ raise Exception(err)
+
+
+ self.spapi.snapshotDelete(snapshotName ="~" + storpoolGlId)
+
+ virtual_machine2 = VirtualMachine.create(self.apiclient,
+ {"name":"StorPool-%s" % uuid.uuid4() },
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ zoneid=self.zone.id,
+ templateid=template.id,
+ serviceofferingid=self.service_offering.id,
+ hypervisor=self.hypervisor,
+ rootdisksize=10
+ )
+
+ ssh_client = virtual_machine2.get_ssh_client(reconnect= True)
+ self.assertIsNotNone(template, "Template is None")
+ self.assertIsInstance(template, Template, "Template is instance of template")
+ self._cleanup.append(template)
+
+ @classmethod
+ def create_volume(self, apiclient, zoneid=None, snapshotid=None, account=None, domainid=None):
+ """Create Volume"""
+ cmd = createVolume.createVolumeCmd()
+ cmd.name = "Test"
+
+ if zoneid:
+ cmd.zoneid = zoneid
+
+ if snapshotid:
+ cmd.snapshotid = snapshotid
+
+ if account:
+ cmd.account=account
+
+ if domainid:
+ cmd.domainid=domainid
+ return Volume(apiclient.createVolume(cmd).__dict__)
+
+ @classmethod
+ def get_local_cluster(cls):
+ storpool_clusterid = subprocess.check_output(['storpool_confshow', 'CLUSTER_ID'])
+ clusterid = storpool_clusterid.split("=")
+ cls.debug(storpool_clusterid)
+ clusters = list_clusters(cls.apiclient)
+ for c in clusters:
+ configuration = list_configurations(
+ cls.apiclient,
+ clusterid = c.id
+ )
+ for conf in configuration:
+ if conf.name == 'sp.cluster.id' and (conf.value in clusterid[1]):
+ return c
+
+ @classmethod
+ def get_remote_cluster(cls):
+ storpool_clusterid = subprocess.check_output(['storpool_confshow', 'CLUSTER_ID'])
+ clusterid = storpool_clusterid.split("=")
+ cls.debug(storpool_clusterid)
+ clusters = list_clusters(cls.apiclient)
+ for c in clusters:
+ configuration = list_configurations(
+ cls.apiclient,
+ clusterid = c.id
+ )
+ for conf in configuration:
+ if conf.name == 'sp.cluster.id' and (conf.value not in clusterid[1]):
+ return c
+
+ @classmethod
+ def list_hosts_by_cluster_id(cls, clusterid):
+ """List all Hosts matching criteria"""
+ cmd = listHosts.listHostsCmd()
+ cmd.clusterid = clusterid
+ return(cls.apiclient.listHosts(cmd))
+
+
+ def start(cls, vmid, hostid):
+ """Start the instance"""
+ cmd = startVirtualMachine.startVirtualMachineCmd()
+ cmd.id = vmid
+ cmd.hostid = hostid
+ return (cls.apiclient.startVirtualMachine(cmd))
+
+
+ @classmethod
+ def create_template_from_snapshot(self, apiclient, services, snapshotid=None, volumeid=None):
+ """Create template from Volume"""
+ # Create template from Virtual machine and Volume ID
+ cmd = createTemplate.createTemplateCmd()
+ cmd.displaytext = "StorPool_Template"
+ cmd.name = "-".join(["StorPool-", random_gen()])
+ if "ostypeid" in services:
+ cmd.ostypeid = services["ostypeid"]
+ elif "ostype" in services:
+ # Find OSTypeId from Os type
+ sub_cmd = listOsTypes.listOsTypesCmd()
+ sub_cmd.description = services["ostype"]
+ ostypes = apiclient.listOsTypes(sub_cmd)
+
+ if not isinstance(ostypes, list):
+ raise Exception(
+ "Unable to find Ostype id with desc: %s" %
+ services["ostype"])
+ cmd.ostypeid = ostypes[0].id
+ else:
+ raise Exception(
+ "Unable to find Ostype is required for creating template")
+
+ cmd.isfeatured = True
+ cmd.ispublic = True
+ cmd.isextractable = False
... 1729 lines suppressed ...