You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by al...@apache.org on 2012/06/29 02:45:07 UTC

[24/50] [abbrv] git commit: add clouddev

add clouddev


Project: http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/commit/5f6387e1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/tree/5f6387e1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/diff/5f6387e1

Branch: refs/heads/vpc
Commit: 5f6387e113871554c2947b5b0bc2fa8678c7b453
Parents: 3938c24
Author: Edison Su <su...@gmail.com>
Authored: Wed Jun 27 23:28:34 2012 -0700
Committer: Edison Su <su...@gmail.com>
Committed: Wed Jun 27 23:28:34 2012 -0700

----------------------------------------------------------------------
 build.xml                                          |    1 +
 build/build-clouddev.xml                           |   93 +
 .../xen/discoverer/XcpServerDiscoverer.java        |   81 +-
 .../xen/resource/CitrixResourceBase.java           |   44 +-
 .../hypervisor/xen/resource/XcpOssResource.java    |  127 ++
 scripts/vm/hypervisor/xenserver/xcposs/NFSSR.py    |  258 +++
 .../xcposs/copy_vhd_from_secondarystorage.sh       |  184 ++
 .../xcposs/copy_vhd_to_secondarystorage.sh         |  126 ++
 .../xcposs/create_privatetemplate_from_snapshot.sh |  134 ++
 scripts/vm/hypervisor/xenserver/xcposs/patch       |   49 +
 scripts/vm/hypervisor/xenserver/xcposs/vmops       | 1519 +++++++++++++++
 .../vm/hypervisor/xenserver/xcposs/vmopsSnapshot   |  552 ++++++
 .../vm/hypervisor/xenserver/xcposs/vmopspremium    |  129 ++
 .../consoleproxy/ConsoleProxyManagerImpl.java      |    4 +-
 .../router/VirtualNetworkApplianceManagerImpl.java |    4 +-
 .../secondary/SecondaryStorageManagerImpl.java     |    4 +-
 server/src/com/cloud/test/DatabaseConfig.java      |    2 +
 setup/db/clouddev.sql                              |   33 +
 setup/db/deploy-db-clouddev.sh                     |   23 +
 19 files changed, 3322 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/build.xml
----------------------------------------------------------------------
diff --git a/build.xml b/build.xml
index 4004117..9e217ff 100755
--- a/build.xml
+++ b/build.xml
@@ -27,6 +27,7 @@
     <import file="${base.dir}/build/build-marvin.xml" optional="true"/>
     <import file="${base.dir}/build/package.xml" optional="true"/>
     <import file="${base.dir}/build/developer.xml" optional="true"/>
+    <import file="${base.dir}/build/build-clouddev.xml" optional="true"/>
     <import file="${base.dir}/build/build-usage.xml" optional="false"/>
     <import file="${base.dir}/build/build-aws-api.xml" optional="false"/>
 </project>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/build/build-clouddev.xml
----------------------------------------------------------------------
diff --git a/build/build-clouddev.xml b/build/build-clouddev.xml
new file mode 100644
index 0000000..d557ef6
--- /dev/null
+++ b/build/build-clouddev.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+
+<project name="CloudDev Targets" basedir=".">
+
+
+  <condition property="port" value="${rport}" else="2222">
+      <isset property="rport"/>
+  </condition>
+
+  <condition property="host" value="${rhost}" else="localhost">
+      <isset property="rhost"/>
+  </condition>
+
+  <target name="deploydbIfSet" if="deploydb.is.set">
+    <echo message="ant deploydb"/>
+    <sshexec host="${host}" port="${port}" username="root" password="password" command="echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;killall java;cd /opt/incubator-cloudstack;ant deploycddb"/>
+  </target>
+
+  <target name="rdeploydb">
+    <echo message="ant rdeploydb"/>
+    <sshexec host="${host}" port="${port}" username="root" password="password" command="echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;killall java;cd /opt/incubator-cloudstack;ant deploycddb"/>
+  </target>
+
+  <target name="deploycddb" description="deploy specific db configuration for clouddev" depends="deploydb">
+    <exec dir="${db.scripts.dir}" executable="bash">
+      <arg value="deploy-db-clouddev.sh" />
+    </exec>
+  </target>
+
+  <target name="rdebug-suspend" >
+    <echo message="ant debug-suspend"/>
+    <sshexec host="${host}" port="${port}" username="root" password="password" command="killall java;sleep 1;echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;cd /opt/incubator-cloudstack;ant deploy-server;ant debug-suspend"/>
+  </target>
+  
+  <target name="rdebug">
+    <echo message="ant debug"/>
+    <sshexec host="${host}" port="${port}" username="root" password="password" command="killall java;sleep 1;echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;cd /opt/incubator-cloudstack;ant deploy-server; ant debug"/>
+  </target>
+
+
+  <target name="rdeploy" description="deploy to remote">
+    <condition property="zip.uptodate">
+      <available file="${deploy.work.dir}/client.zip" type="file"/>
+    </condition>
+    
+
+
+    <echo message="copying build folder to remote"/>
+    <scp trust="yes" port="${port}" todir="root:password@${host}:/opt/incubator-cloudstack/build">   
+      <fileset dir="build">
+      </fileset>
+    </scp>
+
+    <echo message="copying deps folder to remote"/>
+    <scp trust="yes" port="${port}" todir="root:password@${host}:/opt/incubator-cloudstack/deps">   
+      <fileset dir="deps">
+      </fileset>
+    </scp>
+
+    <echo message="copying target folder to remote"/>
+    <scp trust="yes" port="${port}" todir="root:password@${host}:/opt/incubator-cloudstack/target">   
+      <fileset dir="target">
+      </fileset>
+    </scp>
+
+    <echo message="copying dist folder to remote"/>
+    <scp trust="yes" port="${port}" todir="root:password@${host}:/opt/incubator-cloudstack/dist">   
+      <fileset dir="dist">
+      </fileset>
+    </scp>
+   
+    <sshexec host="${host}" port="${port}" username="root" password="password" command="echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;cd /opt/incubator-cloudstack;ant deploy-server"/>
+
+  </target>
+  
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java
index 8a6c605..4fd202b 100755
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java
@@ -59,6 +59,7 @@ import com.cloud.host.Status;
 import com.cloud.hypervisor.Hypervisor;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.hypervisor.xen.resource.CitrixResourceBase;
+import com.cloud.hypervisor.xen.resource.XcpOssResource;
 import com.cloud.hypervisor.xen.resource.XcpServerResource;
 import com.cloud.hypervisor.xen.resource.XenServer56FP1Resource;
 import com.cloud.hypervisor.xen.resource.XenServer56Resource;
@@ -253,8 +254,15 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
                 String hostAddr = record.address;
                 
                 String prodVersion = record.softwareVersion.get("product_version");
+                if (prodVersion == null) {
+                	prodVersion = record.softwareVersion.get("platform_version");
+                }
                 String xenVersion = record.softwareVersion.get("xen");
                 String hostOS = record.softwareVersion.get("product_brand");
+                if (hostOS == null) {
+                	hostOS = record.softwareVersion.get("platform_name");
+                }
+                
                 String hostOSVer = prodVersion;
                 String hostKernelVer = record.softwareVersion.get("linux");
 
@@ -400,34 +408,49 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
     }
     
     protected CitrixResourceBase createServerResource(long dcId, Long podId, Host.Record record) {
-        String prodBrand = record.softwareVersion.get("product_brand").trim();
-        String prodVersion = record.softwareVersion.get("product_version").trim();
-        
-        if(prodBrand.equals("XCP") && (prodVersion.equals("1.0.0") ||  prodVersion.equals("1.1.0") || prodVersion.equals("5.6.100") || prodVersion.startsWith("1.4") )) 
-        	return new XcpServerResource();
+    	String prodBrand = record.softwareVersion.get("product_brand");
+    	if (prodBrand == null) {
+    		prodBrand = record.softwareVersion.get("platform_name").trim();
+    	} else {
+    		prodBrand = prodBrand.trim();
+    	}
+    	String prodVersion = record.softwareVersion.get("product_version");
+    	if (prodVersion == null) {
+    		prodVersion = record.softwareVersion.get("platform_version").trim();
+    	} else {
+    		prodVersion = prodVersion.trim();
+    	}
 
-        if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0")) 
-        	return new XenServer56Resource();
-        
-        if (prodBrand.equals("XenServer") && prodVersion.equals("6.0.0"))
-            return new XenServer600Resource();
-        
-        if (prodBrand.equals("XenServer") && prodVersion.equals("6.0.2"))
-            return new XenServer602Resource();
+    	if(prodBrand.equals("XCP") && (prodVersion.equals("1.0.0") ||  prodVersion.equals("1.1.0") || prodVersion.equals("5.6.100") || prodVersion.startsWith("1.4") )) 
+    		return new XcpServerResource();
+
+    	if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0")) 
+    		return new XenServer56Resource();
+
+    	if (prodBrand.equals("XenServer") && prodVersion.equals("6.0.0"))
+    		return new XenServer600Resource();
+
+    	if (prodBrand.equals("XenServer") && prodVersion.equals("6.0.2"))
+    		return new XenServer602Resource();
+
+    	if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.100"))  {
+    		String prodVersionTextShort = record.softwareVersion.get("product_version_text_short").trim();
+    		if("5.6 SP2".equals(prodVersionTextShort)) {
+    			return new XenServer56SP2Resource();
+    		} else if("5.6 FP1".equals(prodVersionTextShort)) {
+    			return new XenServer56FP1Resource();
+    		}
+    	}
+    	
+    	if (prodBrand.equals("XCP_Kronos")) {
+    		return new XcpOssResource();
+    	}
+    	
+    	String msg = "Only support XCP 1.0.0, 1.1.0, 1.5 beta; XenServer 5.6,  XenServer 5.6 FP1, XenServer 5.6 SP2, Xenserver 6.0, 6.0.2 but this one is " + prodBrand + " " + prodVersion;
+    			_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, dcId, podId, msg, msg);
+    	s_logger.debug(msg);
+    	throw new RuntimeException(msg);
 
-        if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.100"))  {
-            String prodVersionTextShort = record.softwareVersion.get("product_version_text_short").trim();
-            if("5.6 SP2".equals(prodVersionTextShort)) {
-                return new XenServer56SP2Resource();
-            } else if("5.6 FP1".equals(prodVersionTextShort)) {
-                return new XenServer56FP1Resource();
-            }
-        }
-        
-        String msg = "Only support XCP 1.0.0, 1.1.0, 1.5 beta; XenServer 5.6,  XenServer 5.6 FP1, XenServer 5.6 SP2, Xenserver 6.0, 6.0.2 but this one is " + prodBrand + " " + prodVersion;
-        _alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, dcId, podId, msg, msg);
-        s_logger.debug(msg);
-        throw new RuntimeException(msg);
     }
     
     protected void serverConfig() {      
@@ -457,8 +480,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
         Boolean.parseBoolean(value);
 
         value = _params.get("xen.check.hvm");
-        _checkHvm = value == null ? true : Boolean.parseBoolean(value);
-        
+        _checkHvm = false;
         _connPool = XenServerConnectionPool.getInstance();
         
         _agentMgr.registerForHostEvents(this, true, false, true);
@@ -567,7 +589,10 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
             } else if("5.6 FP1".equals(prodVersionTextShort)) {
                 resource = XenServer56FP1Resource.class.getName();
             }
+        } else if (prodBrand.equals("XCP_Kronos")) {
+        	resource = XcpOssResource.class.getName();
         }
+        
         if( resource == null ){
             String msg = "Only support XCP 1.0.0, 1.1.0, 1.5 beta; XenServer 5.6, 5.6 FP1, 5.6 SP2 and Xenserver 6.0 , 6.0.2 but this one is " + prodBrand + " " + prodVersion;
             s_logger.debug(msg);

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
index 3917242..496aeb3 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
@@ -286,7 +286,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
     protected List<VIF> _tmpDom0Vif = new ArrayList<VIF>();
 
     public enum SRType {
-        NFS, LVM, ISCSI, ISO, LVMOISCSI, LVMOHBA, EXT;
+        NFS, LVM, ISCSI, ISO, LVMOISCSI, LVMOHBA, EXT, FILE;
 
         String _str;
 
@@ -1066,7 +1066,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
     }
 
     protected VBD createPatchVbd(Connection conn, String vmName, VM vm) throws XmlRpcException, XenAPIException {
-
+    	
         if(  _host.systemvmisouuid == null ) {
             Set<SR> srs = SR.getByNameLabel(conn, "XenServer Tools");
             if( srs.size() != 1 ) {
@@ -1100,8 +1100,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         cdromVBDR.type = Types.VbdType.CD;
         VBD cdromVBD = VBD.create(conn, cdromVBDR);
         cdromVBD.insert(conn, VDI.getByUuid(conn, _host.systemvmisouuid));
-
-        return cdromVBD;
+	
+    	return cdromVBD;
     }
 
     protected void destroyPatchVbd(Connection conn, String vmName) throws XmlRpcException, XenAPIException {
@@ -3870,7 +3870,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             Map<SR, SR.Record> map = SR.getAllRecords(conn);
             for (Map.Entry<SR, SR.Record> entry : map.entrySet()) {
                 SR.Record srRec = entry.getValue();
-                if (SRType.EXT.equals(srRec.type)) {
+                if (SRType.FILE.equals(srRec.type) || SRType.EXT.equals(srRec.type)) {
                     Set<PBD> pbds = srRec.PBDs;
                     if (pbds == null) {
                         continue;
@@ -3902,6 +3902,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         SR lvmsr = getLocalLVMSR(conn);
         if (lvmsr != null) {
             try {
+            	_host.localSRuuid = lvmsr.getUuid(conn);
+            	
                 String lvmuuid = lvmsr.getUuid(conn);
                 long cap = lvmsr.getPhysicalSize(conn);
                 if (cap > 0) {
@@ -3932,6 +3934,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         if (extsr != null) {
             try {
                 String extuuid = extsr.getUuid(conn);
+                _host.localSRuuid = extuuid;
                 long cap = extsr.getPhysicalSize(conn);
                 if (cap > 0) {
                     long avail = cap - extsr.getPhysicalUtilisation(conn);
@@ -3956,6 +3959,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 s_logger.warn(msg);
             }
         }
+        
         return null;
     }
 
@@ -4033,7 +4037,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 break;
             }
             Host.Record hr = myself.getRecord(conn);
-            _host.product_version = hr.softwareVersion.get("product_version").trim();
+            
+            _host.product_version = hr.softwareVersion.get("product_version");
+            if (_host.product_version == null) {
+            	_host.product_version = hr.softwareVersion.get("platform_version");
+            } else {
+            	_host.product_version = _host.product_version.trim();
+            }
 
             XsLocalNetwork privateNic = getManagementNetwork(conn);
             _privateNetworkName = privateNic.getNetworkRecord(conn).nameLabel;
@@ -4493,8 +4503,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             } finally {
                 sshConnection.close();
             }
+            
             hr.tags.add("vmops-version-" + version);
             host.setTags(conn, hr.tags);
+            
             return true;
         } catch (XenAPIException e) {
             String msg = "Xen setup failed due to " + e.toString();
@@ -5106,13 +5118,19 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             if (details == null) {
                 details = new HashMap<String, String>();
             }
-            details.put("product_brand", hr.softwareVersion.get("product_brand"));
-            details.put("product_version", hr.softwareVersion.get("product_version"));
+
+            String productBrand = hr.softwareVersion.get("product_brand");
+            if (productBrand == null) {
+            	productBrand = hr.softwareVersion.get("platform_name");
+            }
+            details.put("product_brand", productBrand);
+            details.put("product_version", _host.product_version);
+
             if( hr.softwareVersion.get("product_version_text_short") != null ) {
                 details.put("product_version_text_short", hr.softwareVersion.get("product_version_text_short"));
                 cmd.setHypervisorVersion(hr.softwareVersion.get("product_version_text_short"));                
             }else{
-                cmd.setHypervisorVersion(hr.softwareVersion.get("product_version"));
+            	cmd.setHypervisorVersion(_host.product_version);
             }
             if (_privateNetworkName != null) {
                 details.put("private.network.device", _privateNetworkName);
@@ -5165,9 +5183,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 cmd.setPrivateMacAddress(pifr.MAC);
                 cmd.setPrivateNetmask(pifr.netmask);
             } else {
-                String msg = "Private network " + _privateNetworkName + " doesn't have IP address, please check the host network configuration";
-                s_logger.error(msg);
-                throw new CloudRuntimeException(msg);
+            	 cmd.setPrivateIpAddress(_host.ip);
+                 cmd.setPrivateMacAddress(pifr.MAC);
+                 cmd.setPrivateNetmask("255.255.255.0");
             }
 
             pif = PIF.getByUuid(conn, _host.storagePif1);
@@ -5330,7 +5348,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 vdir.virtualSize = dskch.getSize();
                 vdi = VDI.create(conn, vdir);
             }
-
             VDI.Record vdir;
             vdir = vdi.getRecord(conn);
             s_logger.debug("Succesfully created VDI for " + cmd + ".  Uuid = " + vdir.uuid);
@@ -6764,6 +6781,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         public int speed;
         public int cpus;
         public String product_version;
+        public String localSRuuid;
 
         @Override
         public String toString() {

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
new file mode 100644
index 0000000..ef44f5e
--- /dev/null
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.hypervisor.xen.resource;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import javax.ejb.Local;
+
+import org.apache.log4j.Logger;
+import org.apache.xmlrpc.XmlRpcException;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.Command;
+import com.cloud.agent.api.NetworkUsageAnswer;
+import com.cloud.agent.api.NetworkUsageCommand;
+import com.cloud.agent.api.StartupRoutingCommand;
+import com.cloud.resource.ServerResource;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.script.Script;
+import com.xensource.xenapi.Connection;
+import com.xensource.xenapi.Types;
+import com.xensource.xenapi.VBD;
+import com.xensource.xenapi.VDI;
+import com.xensource.xenapi.VM;
+import com.xensource.xenapi.Types.XenAPIException;
+
+@Local(value=ServerResource.class)
+public class XcpOssResource extends CitrixResourceBase {
+	 private final static Logger s_logger = Logger.getLogger(XcpServerResource.class);
+    @Override
+    protected List<File> getPatchFiles() {
+        List<File> files = new ArrayList<File>();
+        String patch = "scripts/vm/hypervisor/xenserver/xcposs/patch";
+        String patchfilePath = Script.findScript("", patch);
+        if (patchfilePath == null) {
+            throw new CloudRuntimeException("Unable to find patch file " + patch);
+        }
+        File file = new File(patchfilePath);
+        files.add(file);
+        return files;
+    }
+    
+    @Override
+	protected void fillHostInfo(Connection conn, StartupRoutingCommand cmd) {
+    	super.fillHostInfo(conn, cmd);
+    	cmd.setCaps(cmd.getCapabilities() + " , hvm");
+    }
+    
+    @Override
+    protected String getGuestOsType(String stdType, boolean bootFromCD) {
+        return CitrixHelper.getXcpGuestOsType(stdType);
+    }
+    
+    protected VBD createPatchVbd(Connection conn, String vmName, VM vm) throws XmlRpcException, XenAPIException {
+    	if (_host.localSRuuid != null) {
+    		//create an iso vdi on it
+    		String result = callHostPlugin(conn, "vmops", "createISOVHD", "uuid", _host.localSRuuid);
+    		if (result == null || result.equalsIgnoreCase("Failed")) {
+    			 throw new CloudRuntimeException("can not create systemvm vdi");
+    		}
+    		
+    		Set<VDI> vdis = VDI.getByNameLabel(conn, "systemvm-vdi");
+    		if (vdis.size() != 1) {
+    			throw new CloudRuntimeException("can not find systemvmiso");
+    		}
+    		VDI systemvmVDI = vdis.iterator().next();
+    		
+    		VBD.Record cdromVBDR = new VBD.Record();
+            cdromVBDR.VM = vm;
+            cdromVBDR.empty = false;
+            cdromVBDR.bootable = false;
+            cdromVBDR.userdevice = "3";
+            cdromVBDR.mode = Types.VbdMode.RO;
+            cdromVBDR.type = Types.VbdType.DISK;
+            cdromVBDR.VDI = systemvmVDI;
+            VBD cdromVBD = VBD.create(conn, cdromVBDR);
+            return cdromVBD;
+    	} else {
+    		 throw new CloudRuntimeException("can not find local sr");
+    	}
+    }
+    
+
+    protected NetworkUsageAnswer execute(NetworkUsageCommand cmd) {
+        try {
+            Connection conn = getConnection();
+            if(cmd.getOption()!=null && cmd.getOption().equals("create") ){
+                String result = networkUsage(conn, cmd.getPrivateIP(), "create", null);
+                NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, result, 0L, 0L);
+                return answer;
+            }
+            long[] stats = getNetworkStats(conn, cmd.getPrivateIP());
+            NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, "", stats[0], stats[1]);
+            return answer;
+        } catch (Exception ex) {
+            s_logger.warn("Failed to get network usage stats due to ", ex);
+            return new NetworkUsageAnswer(cmd, ex); 
+        }
+    }
+    
+    @Override
+    public Answer executeRequest(Command cmd) {
+        if (cmd instanceof NetworkUsageCommand) {
+            return execute((NetworkUsageCommand) cmd);
+        } else {
+            return super.executeRequest(cmd);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/scripts/vm/hypervisor/xenserver/xcposs/NFSSR.py
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/xcposs/NFSSR.py b/scripts/vm/hypervisor/xenserver/xcposs/NFSSR.py
new file mode 100644
index 0000000..f18a12e
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/xcposs/NFSSR.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+# FileSR: local-file storage repository
+
+import SR, VDI, SRCommand, FileSR, util
+import errno
+import os, re, sys, stat
+import time
+import xml.dom.minidom
+import xs_errors
+import nfs
+import vhdutil
+from lock import Lock
+import cleanup
+
+CAPABILITIES = ["SR_PROBE","SR_UPDATE", "SR_CACHING", \
+                "VDI_CREATE","VDI_DELETE","VDI_ATTACH","VDI_DETACH", \
+                "VDI_UPDATE", "VDI_CLONE","VDI_SNAPSHOT","VDI_RESIZE", \
+                "VDI_RESIZE_ONLINE", "VDI_RESET_ON_BOOT", "ATOMIC_PAUSE"]
+
+CONFIGURATION = [ [ 'server', 'hostname or IP address of NFS server (required)' ], \
+                  [ 'serverpath', 'path on remote server (required)' ] ]
+
+                  
+DRIVER_INFO = {
+    'name': 'NFS VHD',
+    'description': 'SR plugin which stores disks as VHD files on a remote NFS filesystem',
+    'vendor': 'Citrix Systems Inc',
+    'copyright': '(C) 2008 Citrix Systems Inc',
+    'driver_version': '1.0',
+    'required_api_version': '1.0',
+    'capabilities': CAPABILITIES,
+    'configuration': CONFIGURATION
+    }
+
+
+# The mountpoint for the directory when performing an sr_probe.  All probes
+PROBE_MOUNTPOINT = "probe"
+NFSPORT = 2049
+DEFAULT_TRANSPORT = "tcp"
+
+
+class NFSSR(FileSR.FileSR):
+    """NFS file-based storage repository"""
+    def handles(type):
+        return type == 'nfs'
+    handles = staticmethod(handles)
+
+
+    def load(self, sr_uuid):
+        self.ops_exclusive = FileSR.OPS_EXCLUSIVE
+        self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
+        self.sr_vditype = SR.DEFAULT_TAP
+        if not self.dconf.has_key('server'):
+            raise xs_errors.XenError('ConfigServerMissing')
+        self.remoteserver = self.dconf['server']
+        self.path = os.path.join(SR.MOUNT_BASE, sr_uuid)
+
+        # Test for the optional 'nfsoptions' dconf attribute
+        self.transport = DEFAULT_TRANSPORT
+        if self.dconf.has_key('useUDP') and self.dconf['useUDP'] == 'true':
+            self.transport = "udp"
+
+
+    def validate_remotepath(self, scan):
+        if not self.dconf.has_key('serverpath'):
+            if scan:
+                try:
+                    self.scan_exports(self.dconf['server'])
+                except:
+                    pass
+            raise xs_errors.XenError('ConfigServerPathMissing')
+        if not self._isvalidpathstring(self.dconf['serverpath']):
+            raise xs_errors.XenError('ConfigServerPathBad', \
+                  opterr='serverpath is %s' % self.dconf['serverpath'])
+
+    def check_server(self):
+        try:
+            nfs.check_server_tcp(self.remoteserver)
+        except nfs.NfsException, exc:
+            raise xs_errors.XenError('NFSVersion',
+                                     opterr=exc.errstr)
+
+
+    def mount(self, mountpoint, remotepath):
+        try:
+            nfs.soft_mount(mountpoint, self.remoteserver, remotepath, self.transport)
+        except nfs.NfsException, exc:
+            raise xs_errors.XenError('NFSMount', opterr=exc.errstr)
+
+
+    def attach(self, sr_uuid):
+        self.validate_remotepath(False)
+        #self.remotepath = os.path.join(self.dconf['serverpath'], sr_uuid)
+        self.remotepath = self.dconf['serverpath']
+        util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
+        self.mount_remotepath(sr_uuid)
+
+
+    def mount_remotepath(self, sr_uuid):
+        if not self._checkmount():
+            self.check_server()
+            self.mount(self.path, self.remotepath)
+
+        return super(NFSSR, self).attach(sr_uuid)
+
+
+    def probe(self):
+        # Verify NFS target and port
+        util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
+        
+        self.validate_remotepath(True)
+        self.check_server()
+
+        temppath = os.path.join(SR.MOUNT_BASE, PROBE_MOUNTPOINT)
+
+        self.mount(temppath, self.dconf['serverpath'])
+        try:
+            return nfs.scan_srlist(temppath)
+        finally:
+            try:
+                nfs.unmount(temppath, True)
+            except:
+                pass
+
+
+    def detach(self, sr_uuid):
+        """Detach the SR: Unmounts and removes the mountpoint"""
+        if not self._checkmount():
+            return
+        util.SMlog("Aborting GC/coalesce")
+        cleanup.abort(self.uuid)
+
+        # Change directory to avoid unmount conflicts
+        os.chdir(SR.MOUNT_BASE)
+
+        try:
+            nfs.unmount(self.path, True)
+        except nfs.NfsException, exc:
+            raise xs_errors.XenError('NFSUnMount', opterr=exc.errstr)
+
+        return super(NFSSR, self).detach(sr_uuid)
+        
+
+    def create(self, sr_uuid, size):
+        util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
+        self.validate_remotepath(True)
+        if self._checkmount():
+            raise xs_errors.XenError('NFSAttached')
+
+        # Set the target path temporarily to the base dir
+        # so that we can create the target SR directory
+        self.remotepath = self.dconf['serverpath']
+        try:
+            self.mount_remotepath(sr_uuid)
+        except Exception, exn:
+            try:
+                os.rmdir(self.path)
+            except:
+                pass
+            raise exn
+
+        #newpath = os.path.join(self.path, sr_uuid)
+        #if util.ioretry(lambda: util.pathexists(newpath)):
+        #    if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
+        #        self.detach(sr_uuid)
+        #        raise xs_errors.XenError('SRExists')
+        #else:
+        #    try:
+        #        util.ioretry(lambda: util.makedirs(newpath))
+        #    except util.CommandException, inst:
+        #        if inst.code != errno.EEXIST:
+        #            self.detach(sr_uuid)
+        #            raise xs_errors.XenError('NFSCreate', 
+        #                opterr='remote directory creation error is %d' 
+        #                % inst.code)
+        self.detach(sr_uuid)
+
+    def delete(self, sr_uuid):
+        # try to remove/delete non VDI contents first
+        super(NFSSR, self).delete(sr_uuid)
+        try:
+            if self._checkmount():
+                self.detach(sr_uuid)
+
+            # Set the target path temporarily to the base dir
+            # so that we can remove the target SR directory
+            self.remotepath = self.dconf['serverpath']
+            self.mount_remotepath(sr_uuid)
+            newpath = os.path.join(self.path, sr_uuid)
+
+            if util.ioretry(lambda: util.pathexists(newpath)):
+                util.ioretry(lambda: os.rmdir(newpath))
+            self.detach(sr_uuid)
+        except util.CommandException, inst:
+            self.detach(sr_uuid)
+            if inst.code != errno.ENOENT:
+                raise xs_errors.XenError('NFSDelete')
+
+    def vdi(self, uuid, loadLocked = False):
+        if not loadLocked:
+            return NFSFileVDI(self, uuid)
+        return NFSFileVDI(self, uuid)
+    
+    def _checkmount(self):
+        return util.ioretry(lambda: util.pathexists(self.path)) \
+               and util.ioretry(lambda: util.ismount(self.path))
+
+    def scan_exports(self, target):
+        util.SMlog("scanning2 (target=%s)" % target)
+        dom = nfs.scan_exports(target)
+        print >>sys.stderr,dom.toprettyxml()
+
+class NFSFileVDI(FileSR.FileVDI):
+    def attach(self, sr_uuid, vdi_uuid):
+        try:
+            vdi_ref = self.sr.srcmd.params['vdi_ref']
+            self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
+                    "vdi-type")
+            self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
+                    "storage-type")
+            self.session.xenapi.VDI.add_to_xenstore_data(vdi_ref, \
+                    "storage-type", "nfs")
+        except:
+            util.logException("NFSSR:attach")
+            pass
+        return super(NFSFileVDI, self).attach(sr_uuid, vdi_uuid)
+
+    def get_mtime(self, path):
+        st = util.ioretry_stat(lambda: os.stat(path))
+        return st[stat.ST_MTIME]
+
+    def clone(self, sr_uuid, vdi_uuid):
+        timestamp_before = int(self.get_mtime(self.sr.path))
+        ret = super(NFSFileVDI, self).clone(sr_uuid, vdi_uuid)
+        timestamp_after = int(self.get_mtime(self.sr.path))
+        if timestamp_after == timestamp_before:
+            util.SMlog("SR dir timestamp didn't change, updating")
+            timestamp_after += 1
+            os.utime(self.sr.path, (timestamp_after, timestamp_after))
+        return ret
+
+
+if __name__ == '__main__':
+    SRCommand.run(NFSSR, DRIVER_INFO)
+else:
+    SR.registerSR(NFSSR)

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/scripts/vm/hypervisor/xenserver/xcposs/copy_vhd_from_secondarystorage.sh
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/xcposs/copy_vhd_from_secondarystorage.sh b/scripts/vm/hypervisor/xenserver/xcposs/copy_vhd_from_secondarystorage.sh
new file mode 100644
index 0000000..074b842
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/xcposs/copy_vhd_from_secondarystorage.sh
@@ -0,0 +1,184 @@
+#!/bin/bash
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+
+#set -x
+ 
+usage() {
+  printf "Usage: %s [vhd file in secondary storage] [uuid of the source sr] [name label]  \n" $(basename $0) 
+}
+
+cleanup()
+{
+  if [ ! -z $localmp ]; then 
+    umount -fl $localmp
+    if [ $? -eq 0 ];  then
+      rmdir $localmp
+    fi
+  fi
+}
+
+if [ -z $1 ]; then
+  usage
+  echo "2#no mountpoint"
+  exit 0
+else
+  mountpoint=${1%/*}
+  vhdfilename=${1##*/}
+fi
+
+if [ -z $2 ]; then
+  usage
+  echo "3#no uuid of the source sr"
+  exit 0
+else
+  sruuid=$2
+fi
+
+type=$(xe sr-param-get uuid=$sruuid param-name=type)
+if [ $? -ne 0 ]; then
+  echo "4#sr $sruuid doesn't exist"
+  exit 0
+fi
+
+if [ -z $3 ]; then
+  usage
+  echo "3#no namelabel"
+  exit 0
+else
+  namelabel=$3
+fi
+
+localmp=/var/run/cloud_mount/$(uuidgen -r)
+
+mkdir -p $localmp
+if [ $? -ne 0 ]; then
+  echo "5#can't make dir $localmp"
+  exit 0
+fi
+
+mount -o tcp,soft,ro,timeo=133,retrans=1 $mountpoint $localmp
+if [ $? -ne 0 ]; then
+  echo "6#can't mount $mountpoint to $localmp"
+  exit 0
+fi
+
+vhdfile=$localmp/$vhdfilename
+if [ ${vhdfile%.vhd} == ${vhdfile} ] ; then
+  vhdfile=$(ls $vhdfile/*.vhd)
+  if [ $? -ne 0 ]; then
+    echo "7#There is no vhd file under $mountpoint"
+    cleanup
+    exit 0
+  fi
+fi
+
+
+
+VHDUTIL="/usr/bin/vhd-util"
+
+copyvhd()
+{
+  local desvhd=$1
+  local srcvhd=$2
+  local vsize=$3
+  local type=$4
+  local parent=`$VHDUTIL query -p -n $srcvhd`
+  if [ $? -ne 0 ]; then
+    echo "30#failed to query $srcvhd"
+    cleanup
+    exit 0
+  fi
+  if [ "${parent##*vhd has}" = " no parent" ]; then
+    dd if=$srcvhd of=$desvhd bs=2M     
+    if [ $? -ne 0 ]; then
+      echo "31#failed to dd $srcvhd to $desvhd"
+      cleanup
+     exit 0
+    fi
+    if [ $type != "nfs" -a $type != "ext" -a $type != "file" ]; then
+      dd if=$srcvhd of=$desvhd bs=512 seek=$(($(($vsize/512))-1)) count=1
+      $VHDUTIL modify -s $vsize -n $desvhd
+      if [ $? -ne 0 ]; then
+        echo "32#failed to set new vhd physical size for vdi vdi $uuid"
+        cleanup
+        exit 0
+      fi
+    fi
+  else
+    copyvhd $desvhd $parent $vsize $type
+    $VHDUTIL coalesce -p $desvhd -n $srcvhd
+    if [ $? -ne 0 ]; then
+      echo "32#failed to coalesce  $desvhd to $srcvhd"
+      cleanup
+     exit 0
+    fi
+  fi
+}
+
+size=$($VHDUTIL query -v -n $vhdfile)
+uuid=$(xe vdi-create sr-uuid=$sruuid virtual-size=${size}MiB type=user name-label=$namelabel)
+if [ $? -ne 0 ]; then
+  echo "9#can not create vdi in sr $sruuid"
+  cleanup
+  exit 0
+fi
+
+
+if [ $type == "nfs" -o $type == "ext" ]; then
+  desvhd=/run/sr-mount/$sruuid/$uuid.vhd
+  copyvhd $desvhd $vhdfile 0 $type
+
+elif [ $type == "lvmoiscsi" -o $type == "lvm" -o $type == "lvmohba" ]; then
+  lvsize=$(xe vdi-param-get uuid=$uuid param-name=physical-utilisation)
+  if [ $? -ne 0 ]; then
+    echo "12#failed to get physical size of vdi $uuid"
+    cleanup
+    exit 0
+  fi
+  desvhd=/dev/VG_XenStorage-$sruuid/VHD-$uuid
+  lvchange -ay $desvhd
+  if [ $? -ne 0 ]; then
+    echo "10#lvm can not make VDI $uuid  visible"
+    cleanup
+    exit 0
+  fi
+  copyvhd $desvhd $vhdfile $lvsize $type
+elif [ $type == "file" ]; then
+  pbd=`xe sr-param-list uuid=$sruuid |grep PBDs | awk '{print $3}'`
+  path=`xe pbd-param-list uuid=$pbd |grep device-config |awk '{print $4}'`
+  desvhd=$path/$uuid.vhd
+  copyvhd $desvhd $vhdfile 0 $type
+
+else 
+  echo "15#doesn't support sr type $type"
+  cleanup
+  exit 0
+fi
+
+$VHDUTIL set -n $desvhd -f "hidden" -v "0" > /dev/null
+if [ $? -ne 0 ]; then
+  echo "21#failed to set hidden to 0  $desvhd"
+  cleanup
+  exit 0
+fi
+xe sr-scan uuid=$sruuid
+if [ $? -ne 0 ]; then
+  echo "14#failed to scan sr $sruuid"
+  cleanup
+  exit 0
+fi
+
+echo "0#$uuid"
+cleanup
+exit 0

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/scripts/vm/hypervisor/xenserver/xcposs/copy_vhd_to_secondarystorage.sh
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/xcposs/copy_vhd_to_secondarystorage.sh b/scripts/vm/hypervisor/xenserver/xcposs/copy_vhd_to_secondarystorage.sh
new file mode 100644
index 0000000..e972db6
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/xcposs/copy_vhd_to_secondarystorage.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+
+#set -x
+ 
+usage() {
+  printf "Usage: %s [mountpoint in secondary storage] [uuid of the source vdi] [uuid of the source sr]\n" $(basename $0) 
+}
+
+cleanup()
+{
+  if [ ! -z $localmp ]; then 
+    umount $localmp
+    if [ $? -eq 0 ];  then
+      rmdir $localmp
+    fi
+  fi
+}
+
+if [ -z $1 ]; then
+  usage
+  echo "1#no mountpoint"
+  exit 0
+else
+  mountpoint=$1
+fi
+
+if [ -z $2 ]; then
+  usage
+  echo "2#no uuid of the source sr"
+  exit 0
+else
+  vdiuuid=$2
+fi
+
+
+if [ -z $3 ]; then
+  usage
+  echo "3#no uuid of the source sr"
+  exit 0
+else
+  sruuid=$3
+fi
+
+type=$(xe sr-param-get uuid=$sruuid param-name=type)
+if [ $? -ne 0 ]; then
+  echo "4#sr $sruuid doesn't exist"
+  exit 0
+fi
+
+localmp=/var/run/cloud_mount/$(uuidgen -r)
+
+mkdir -p $localmp
+if [ $? -ne 0 ]; then
+  echo "5#can't make dir $localmp"
+  exit 0
+fi
+
+mount -o tcp,soft,timeo=133,retrans=1 $mountpoint $localmp
+if [ $? -ne 0 ]; then
+  echo "6#can't mount $mountpoint to $localmp"
+  exit 0
+fi
+
+vhdfile=$localmp/${vdiuuid}.vhd
+
+if [ $type == "nfs" -o $type == "ext" ]; then
+  dd if=/var/run/sr-mount/$sruuid/${vdiuuid}.vhd of=$vhdfile bs=2M
+  if [ $? -ne 0 ]; then
+    rm -f $vhdfile
+    echo "8#failed to copy /var/run/sr-mount/$sruuid/${vdiuuid}.vhd to secondarystorage"
+    cleanup
+    exit 0
+  fi
+elif [ $type == "lvmoiscsi" -o $type == "lvm" -o $type == "lvmohba" ]; then
+  lvchange -ay /dev/VG_XenStorage-$sruuid/VHD-$vdiuuid
+  if [ $? -ne 0 ]; then
+    echo "9#lvm can not make VDI $vdiuuid  visible"
+    cleanup
+    exit 0
+  fi
+  size=$(vhd-util query -s -n /dev/VG_XenStorage-$sruuid/VHD-$vdiuuid)
+  if [ $? -ne 0 ]; then
+    echo "10#can not get physical size of /dev/VG_XenStorage-$sruuid/VHD-$vdiuuid"
+    cleanup
+    exit 0
+  fi
+#in 2M unit
+  size=$((size>>21))
+  size=$((size+1))
+  dd if=/dev/VG_XenStorage-$sruuid/VHD-$vdiuuid of=$vhdfile bs=2M count=$size
+  if [ $? -ne 0 ]; then
+    rm -f $vhdfile
+    echo "8#failed to copy /dev/VG_XenStorage-$sruuid/VHD-$vdiuuid to secondarystorage"
+    cleanup
+    exit 0
+  fi
+#in byte unit
+  size=$((size<<21))
+  vhd-util modify -s $size -n $vhdfile
+  if [ $? -ne 0 ]; then
+    rm -f $vhdfile
+    echo "11#failed to change $vhdfile physical size"
+    cleanup
+    exit 0
+  fi
+else 
+  echo "15#doesn't support sr type $type"
+  cleanup
+  exit 0
+fi
+
+echo "0#$vdiuuid"
+cleanup
+exit 0

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/scripts/vm/hypervisor/xenserver/xcposs/create_privatetemplate_from_snapshot.sh
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/xcposs/create_privatetemplate_from_snapshot.sh b/scripts/vm/hypervisor/xenserver/xcposs/create_privatetemplate_from_snapshot.sh
new file mode 100644
index 0000000..c9c9b90
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/xcposs/create_privatetemplate_from_snapshot.sh
@@ -0,0 +1,134 @@
+#!/bin/bash
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+
+#set -x
+ 
+usage() {
+  printf "Usage: %s [vhd file in secondary storage] [template directory in secondary storage] [template local dir] \n" $(basename $0) 
+}
+options='tcp,soft,timeo=133,retrans=1'
+cleanup()
+{
+  if [ ! -z $snapshotdir ]; then 
+    umount $snapshotdir
+    if [ $? -eq 0 ];  then
+      rmdir $snapshotdir
+    fi
+  fi
+  if [ ! -z $templatedir ]; then 
+    umount $templatedir
+    if [ $? -eq 0 ];  then
+      rmdir $templatedir
+    fi
+  fi
+}
+
+if [ -z $1 ]; then
+  usage
+  echo "2#no vhd file path"
+  exit 0
+else
+  snapshoturl=${1%/*}
+  vhdfilename=${1##*/}
+fi
+
+if [ -z $2 ]; then
+  usage
+  echo "3#no template path"
+  exit 0
+else
+  templateurl=$2
+fi
+
+if [ -z $3 ]; then
+  usage
+  echo "3#no template local dir"
+  exit 0
+else
+  tmpltLocalDir=$3
+fi
+
+
+snapshotdir=/run/cloud_mount/$(uuidgen -r)
+mkdir -p $snapshotdir
+if [ $? -ne 0 ]; then
+  echo "4#cann't make dir $snapshotdir"
+  exit 0
+fi
+
+mount -o $options $snapshoturl $snapshotdir
+if [ $? -ne 0 ]; then
+  rmdir $snapshotdir
+  echo "5#can not mount $snapshoturl to $snapshotdir"
+  exit 0
+fi
+
+templatedir=/run/cloud_mount/$tmpltLocalDir
+mkdir -p $templatedir
+if [ $? -ne 0 ]; then
+  templatedir=""
+  cleanup
+  echo "6#cann't make dir $templatedir"
+  exit 0
+fi
+
+mount -o $options $templateurl $templatedir
+if [ $? -ne 0 ]; then
+  rmdir $templatedir
+  templatedir=""
+  cleanup
+  echo "7#can not mount $templateurl to $templatedir"
+  exit 0
+fi
+
+VHDUTIL="vhd-util"
+
+copyvhd()
+{
+  local desvhd=$1
+  local srcvhd=$2
+  local parent=
+  parent=`$VHDUTIL query -p -n $srcvhd`
+  if [ $? -ne 0 ]; then
+    echo "30#failed to query $srcvhd"
+    cleanup
+    exit 0
+  fi
+  if [[ "${parent}"  =~ " no parent" ]]; then
+    dd if=$srcvhd of=$desvhd bs=2M     
+    if [ $? -ne 0 ]; then
+      echo "31#failed to dd $srcvhd to $desvhd"
+      cleanup
+      exit 0
+    fi
+  else
+    copyvhd $desvhd $parent
+    $VHDUTIL coalesce -p $desvhd -n $srcvhd
+    if [ $? -ne 0 ]; then
+      echo "32#failed to coalesce  $desvhd to $srcvhd"
+      cleanup
+      exit 0
+    fi
+  fi
+}
+
+templateuuid=$(uuidgen -r)
+desvhd=$templatedir/$templateuuid.vhd
+srcvhd=$snapshotdir/$vhdfilename
+copyvhd $desvhd $srcvhd
+virtualSize=`$VHDUTIL query -v -n $desvhd`
+physicalSize=`ls -l $desvhd | awk '{print $5}'`
+cleanup
+echo "0#$templateuuid#$physicalSize#$virtualSize"
+exit 0

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/5f6387e1/scripts/vm/hypervisor/xenserver/xcposs/patch
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/xcposs/patch b/scripts/vm/hypervisor/xenserver/xcposs/patch
new file mode 100644
index 0000000..8ddccc7
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/xcposs/patch
@@ -0,0 +1,49 @@
+# This file specifies the files that need
+# to be transferred over to the XenServer.
+# The format of this file is as follows:
+# [Name of file]=[source path],[file permission],[destination path]
+# [destination path] is required.
+# If [file permission] is missing, 755 is assumed.
+# If [source path] is missing, it looks in the same
+# directory as the patch file.
+# If [source path] starts with '/', then it is absolute path.
+# If [source path] starts with '~', then it is path relative to management server home directory.
+# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file. 
+NFSSR.py=/usr/lib/xcp/sm
+vmops=.,0755,/usr/lib/xcp/plugins
+ovsgre=..,0755,/usr/lib/xcp/plugins
+ovstunnel=..,0755,/usr/lib/xcp/plugins
+vmopsSnapshot=.,0755,/usr/lib/xcp/plugins
+hostvmstats.py=..,0755,/usr/lib/xcp/sm
+systemvm.iso=../../../../../vms,0644,/usr/share/xcp/packages/iso/
+id_rsa.cloud=../../../systemvm,0600,/root/.ssh
+network_info.sh=..,0755,/usr/lib/xcp/bin
+setupxenserver.sh=..,0755,/usr/lib/xcp/bin
+make_migratable.sh=..,0755,/usr/lib/xcp/bin
+setup_iscsi.sh=..,0755,/usr/lib/xcp/bin
+pingtest.sh=../../..,0755,/usr/lib/xcp/bin
+dhcp_entry.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+ipassoc.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+vm_data.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+save_password_to_domr.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+networkUsage.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+call_firewall.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+call_loadbalancer.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+l2tp_vpn.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+cloud-setup-bonding.sh=..,0755,/usr/lib/xcp/bin
+copy_vhd_to_secondarystorage.sh=.,0755,/usr/lib/xcp/bin
+copy_vhd_from_secondarystorage.sh=.,0755,/usr/lib/xcp/bin
+setup_heartbeat_sr.sh=..,0755,/usr/lib/xcp/bin
+setup_heartbeat_file.sh=..,0755,/usr/lib/xcp/bin
+check_heartbeat.sh=..,0755,/usr/lib/xcp/bin
+xenheartbeat.sh=..,0755,/usr/lib/xcp/bin
+launch_hb.sh=..,0755,/usr/lib/xcp/bin
+vhd-util=..,0755,/usr/lib/xcp/bin
+vmopspremium=.,0755,/usr/lib/xcp/plugins
+create_privatetemplate_from_snapshot.sh=.,0755,/usr/lib/xcp/bin
+upgrade_snapshot.sh=..,0755,/usr/lib/xcp/bin
+cloud-clean-vlan.sh=..,0755,/usr/lib/xcp/bin
+cloud-prepare-upgrade.sh=..,0755,/usr/lib/xcp/bin
+getRouterStatus.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+bumpUpPriority.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
+getDomRVersion.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin