You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ah...@apache.org on 2012/06/20 03:48:00 UTC

[6/8] Introduced plugins directory. Moved ovm into plugins. Introduced build.xml for ovm.

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py
new file mode 100755
index 0000000..12c3206
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py
@@ -0,0 +1,538 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+'''
+Created on May 17, 2011
+
+@author: frank
+'''
+from OvmCommonModule import *
+from OvmDiskModule import *
+from OvmVifModule import *
+from OvmHostModule import OvmHost
+from string import Template
+from OVSXXenVMConfig import *
+from OVSSiteVM import start_vm, stop_vm, reset_vm
+from OVSSiteCluster import *
+from OvmStoragePoolModule import OvmStoragePool
+from OVSXXenStore import xen_get_vm_path, xen_get_vnc_port
+from OVSDB import db_get_vm
+from OVSXMonitor import xen_get_vm_perf_metrics, xen_get_xm_info
+from OVSXXenVM import xen_migrate_vm
+from OVSSiteRMVM import unregister_vm, register_vm, set_vm_status
+from OVSSiteVMInstall import install_vm_hvm
+from OVSSiteRMServer import get_master_ip
+from OVSXXenVMInstall import xen_change_vm_cdrom
+from OVSXAPIUtil import XenAPIObject, session_login, session_logout
+
+
+logger = OvmLogger("OvmVm")
+
+class OvmVmDecoder(json.JSONDecoder):
+    def decode(self, jStr):
+        deDict = asciiLoads(jStr)
+        vm = OvmVm()
+        setAttrFromDict(vm, 'cpuNum', deDict, int)
+        setAttrFromDict(vm, 'memory', deDict, long)
+        setattr(vm, 'rootDisk', toOvmDisk(deDict['rootDisk']))
+        setattr(vm, 'vifs', toOvmVifList(deDict['vifs']))
+        setattr(vm, 'disks', toOvmDiskList(deDict['disks']))
+        setAttrFromDict(vm, 'name', deDict)
+        setAttrFromDict(vm, 'uuid', deDict)
+        setAttrFromDict(vm, 'bootDev', deDict)
+        setAttrFromDict(vm, 'type', deDict)
+        return vm
+
+class OvmVmEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if not isinstance(obj, OvmVm): raise Exception("%s is not instance of OvmVm"%type(obj))
+        dct = {}
+        safeDictSet(obj, dct, 'cpuNum')
+        safeDictSet(obj, dct, 'memory')
+        safeDictSet(obj, dct, 'powerState')
+        safeDictSet(obj, dct, 'name')
+        safeDictSet(obj, dct, 'type')
+        vifs = fromOvmVifList(obj.vifs)
+        dct['vifs'] = vifs
+        rootDisk = fromOvmDisk(obj.rootDisk)
+        dct['rootDisk'] = rootDisk
+        disks = fromOvmDiskList(obj.disks)
+        dct['disks'] = disks
+        return dct
+        
+def toOvmVm(jStr):
+    return json.loads(jStr, cls=OvmVmDecoder)
+
+def fromOvmVm(vm):
+    return normalizeToGson(json.dumps(vm, cls=OvmVmEncoder))
+
+class OvmVm(OvmObject):
+    cpuNum = 0
+    memory = 0
+    rootDisk = None
+    vifs = []
+    disks = []
+    powerState = ''
+    name = ''
+    bootDev = ''
+    type = ''
+        
+    def _getVifs(self, vmName):
+        vmPath = OvmHost()._vmNameToPath(vmName)
+        domId = OvmHost()._getDomainIdByName(vmName)
+        vifs = successToMap(xen_get_vifs(vmPath))
+        lst = []
+        for k in vifs:
+            v = vifs[k]
+            vifName = 'vif' + domId + '.' + k[len('vif'):]
+            vif = OvmVif()
+            (mac, bridge, type) = v.split(',')
+            safeSetAttr(vif, 'name', vifName)
+            safeSetAttr(vif, 'mac', mac)
+            safeSetAttr(vif, 'bridge', bridge)
+            safeSetAttr(vif, 'type', type)
+            lst.append(vif)
+            
+        return lst
+    
+    def _getVifsFromConfig(self, vmPath):
+        vifs = successToMap(xen_get_vifs(vmPath))
+        lst = []
+        for k in vifs:
+            v = vifs[k]
+            vif = OvmVif()
+            (mac, bridge, type) = v.split(',')
+            safeSetAttr(vif, 'name', k)
+            safeSetAttr(vif, 'mac', mac)
+            safeSetAttr(vif, 'bridge', bridge)
+            safeSetAttr(vif, 'type', type)
+            lst.append(vif)
+        return lst
+    
+    def _getIsoMountPath(self, vmPath):
+        vmName = basename(vmPath)
+        priStoragePath = vmPath.rstrip(join('running_pool', vmName))
+        return join(priStoragePath, 'iso_pool', vmName)
+    
+    def _getVmTypeFromConfigFile(self, vmPath):
+        vmType = successToMap(xen_get_vm_type(vmPath))['type']
+        return vmType.replace('hvm', 'HVM').replace('para', 'PV')
+    
+    def _tapAOwnerFile(self, vmPath):
+        # Create a file with name convention 'host_ip_address' in vmPath
+        # Because xm list doesn't return vm that has been stopped, we scan
+        # primary storage for stopped vm. This file tells us which host it belongs
+        # to. The file is used in OvmHost.getAllVms()
+        self._cleanUpOwnerFile(vmPath)
+        ownerFileName = makeOwnerFileName()
+        fd = open(join(vmPath, ownerFileName), 'w')
+        fd.write(ownerFileName)
+        fd.close()
+    
+    def _cleanUpOwnerFile(self, vmPath):
+        for f in os.listdir(vmPath):
+            fp = join(vmPath, f)
+            if isfile(fp) and f.startswith(OWNER_FILE_PREFIX):
+                os.remove(fp)
+    
+    @staticmethod
+    def create(jsonString):    
+        def dumpCfg(vmName, cfgPath):
+            cfgFd = open(cfgPath, 'r')
+            cfg = cfgFd.readlines()
+            cfgFd.close()
+            logger.info(OvmVm.create, "Start %s with configure:\n\n%s\n"%(vmName, "".join(cfg)))
+        
+        def setVifsType(vifs, type):
+            for vif in vifs:
+                vif.type = type
+                
+        def hddBoot(vm, vmPath):
+            vmType = vm.type
+            if vmType == "FROMCONFIGFILE":
+                vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
+                
+            cfgDict = {}
+            if vmType == "HVM":
+                cfgDict['builder'] = "'hvm'"
+                cfgDict['acpi'] = "1"
+                cfgDict['apic'] = "1"
+                cfgDict['device_model'] = "'/usr/lib/xen/bin/qemu-dm'"
+                cfgDict['kernel'] = "'/usr/lib/xen/boot/hvmloader'"
+                vifType = 'ioemu'
+            else:
+                cfgDict['bootloader'] = "'/usr/bin/pygrub'"
+                vifType = 'netfront'
+            
+            cfgDict['name'] = "'%s'"%vm.name
+            cfgDict['disk'] = "[]"
+            cfgDict['vcpus'] = "''"
+            cfgDict['memory'] = "''"
+            cfgDict['on_crash'] = "'destroy'"
+            cfgDict['on_reboot'] = "'restart'"
+            cfgDict['vif'] = "[]"
+            
+            items = []
+            for k in cfgDict.keys():
+                item = " = ".join([k, cfgDict[k]])
+                items.append(item)
+            vmSpec = "\n".join(items)
+                
+            vmCfg = open(join(vmPath, 'vm.cfg'), 'w')
+            vmCfg.write(vmSpec)
+            vmCfg.close()
+            
+            setVifsType(vm.vifs, vifType)
+            raiseExceptionIfFail(xen_set_vcpus(vmPath, vm.cpuNum))
+            raiseExceptionIfFail(xen_set_memory(vmPath, BytesToM(vm.memory)))
+            raiseExceptionIfFail(xen_add_disk(vmPath, vm.rootDisk.path, mode=vm.rootDisk.type))
+            vifs = [OvmVif.toXenString(v) for v in vm.vifs]
+            for vif in vifs:
+                raiseExceptionIfFail(xen_set_vifs(vmPath, vif))
+                
+            for disk in vm.disks:
+                raiseExceptionIfFail(xen_add_disk(vmPath, disk.path, mode=disk.type))
+
+            raiseExceptionIfFail(xen_set_vm_vnc_password(vmPath, ""))
+            cfgFile = join(vmPath, 'vm.cfg')
+            # only HVM supports attaching cdrom
+            if vmType == 'HVM':
+                # Add an empty "hdc:cdrom" entry in config. Fisrt we set boot order to 'd' that is cdrom boot,
+                # then 'hdc:cdrom' entry will be in disk list. Second, change boot order to 'c' which
+                # is harddisk boot. VM can not start with an empty 'hdc:cdrom' when boot order is 'd'.
+                # it's tricky !
+                raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'd'))
+                raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'c'))
+                
+            raiseExceptionIfFail(xen_correct_cfg(cfgFile, vmPath))
+            xen_correct_qos_cfg(cfgFile)
+            dumpCfg(vm.name, cfgFile)
+            server = successToMap(get_master_ip())['ip']
+            raiseExceptionIfFail(start_vm(vmPath, server))
+            rs = SUCC()
+            return rs
+        
+        def cdBoot(vm, vmPath):
+            isoMountPath = None
+            try:
+                cdrom = None
+                for disk in vm.disks:
+                    if disk.isIso == True:
+                        cdrom = disk
+                        break
+                if not cdrom: raise Exception("Cannot find Iso in disks")
+                
+                isoOnSecStorage = dirname(cdrom.path)
+                isoName = basename(cdrom.path)
+                isoMountPath = OvmVm()._getIsoMountPath(vmPath)
+                OvmStoragePool()._mount(isoOnSecStorage, isoMountPath)
+                isoPath = join(isoMountPath, isoName)
+                if not exists(isoPath):
+                    raise Exception("Cannot found iso %s at %s which mounts to %s"%(isoName, isoOnSecStorage, isoMountPath))
+                
+                stdout = run_cmd(args=['file', isoPath])
+                if not stdout.strip().endswith("(bootable)"): raise Exception("ISO %s is not bootable"%cdrom.path)
+                
+                #now alter cdrom to correct path
+                cdrom.path = isoPath
+                if len(vm.vifs) != 0:
+                    vif = vm.vifs[0]
+                    #ISO boot must be HVM
+                    vifCfg = ','.join([vif.mac, vif.bridge, 'ioemu'])
+                else:
+                    vifCfg = ''
+                
+                rootDiskSize = os.path.getsize(vm.rootDisk.path)
+                rooDiskCfg = ':'.join([join(vmPath, basename(vm.rootDisk.path)), str(BytesToG(rootDiskSize)), 'True'])
+                disks = [rooDiskCfg]
+                for d in vm.disks:
+                    if d.isIso: continue
+                    size = os.path.getsize(d.path)
+                    cfg = ':'.join([d.path, str(BytesToG(size)), 'True'])
+                    disks.append(cfg)
+                disksCfg = ','.join(disks)
+                server = successToMap(get_master_ip())['ip']
+                   
+                raiseExceptionIfFail(install_vm_hvm(vmPath, BytesToM(vm.memory), vm.cpuNum, vifCfg, disksCfg, cdrom.path, vncpassword='', dedicated_server=server))
+                rs = SUCC()
+                return rs
+            except Exception, e:
+                if isoMountPath and OvmStoragePool()._isMounted(isoMountPath):
+                    doCmd(['umount', '-f', isoMountPath])
+                errmsg = fmt_err_msg(e)
+                raise Exception(errmsg)
+        
+        try:
+            vm = toOvmVm(jsonString)
+            logger.debug(OvmVm.create, "creating vm, spec:%s"%jsonString)
+            rootDiskPath = vm.rootDisk.path
+            if not exists(rootDiskPath): raise Exception("Cannot find root disk %s"%rootDiskPath)
+    
+            rootDiskDir = dirname(rootDiskPath)
+            vmPath = join(dirname(rootDiskDir), vm.name)
+            if not exists(vmPath):
+                doCmd(['ln', '-s', rootDiskDir, vmPath])
+            vmNameFile = open(join(rootDiskDir, 'vmName'), 'w')
+            vmNameFile.write(vm.name)
+            vmNameFile.close()
+            
+            OvmVm()._tapAOwnerFile(rootDiskDir)
+            # set the VM to DOWN before starting, OVS agent will check this status
+            set_vm_status(vmPath, 'DOWN')
+            if vm.bootDev == "HDD":
+                return hddBoot(vm, vmPath)
+            elif vm.bootDev == "CD":
+                return cdBoot(vm, vmPath)
+            else:
+                raise Exception("Unkown bootdev %s for %s"%(vm.bootDev, vm.name))
+
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVm.create, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.create), errmsg)
+    
+    @staticmethod
+    def stop(vmName):
+        try:
+            try:
+                OvmHost()._getDomainIdByName(vmName)
+            except NoVmFoundException, e:
+                logger.info(OvmVm.stop, "vm %s is already stopped"%vmName)
+                return SUCC()
+                
+            logger.info(OvmVm.stop, "Stop vm %s"%vmName)
+            try:
+                vmPath = OvmHost()._vmNameToPath(vmName)
+            except Exception, e:
+                errmsg = fmt_err_msg(e)
+                logger.info(OvmVm.stop, "Cannot find link for vm %s on primary storage, treating it as stopped\n %s"%(vmName, errmsg))
+                return SUCC()
+            # set the VM to RUNNING before stopping, OVS agent will check this status
+            set_vm_status(vmPath, 'RUNNING')
+            raiseExceptionIfFail(stop_vm(vmPath))
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVm.stop, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.stop), errmsg)
+    
+    @staticmethod
+    def reboot(vmName):
+        try:
+            #===================================================================
+            # Xend has a bug of reboot. If reboot vm too quick, xend return success
+            # but actually it refused reboot (seen from log)
+            # vmPath = successToMap(xen_get_vm_path(vmName))['path']
+            # raiseExceptionIfFail(reset_vm(vmPath))
+            #===================================================================
+            vmPath = OvmHost()._vmNameToPath(vmName)
+            OvmVm.stop(vmName)
+            raiseExceptionIfFail(start_vm(vmPath))
+            vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
+            logger.info(OvmVm.stop, "reboot vm %s, new vncPort is %s"%(vmName, vncPort))
+            return toGson({"vncPort":str(vncPort)})
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVm.reboot, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.reboot), errmsg)
+    
+    @staticmethod
+    def getDetails(vmName):          
+        try:
+            vm = OvmVm()
+            
+            try:
+                OvmHost()._getDomainIdByName(vmName)
+                vmPath = OvmHost()._vmNameToPath(vmName)
+                vifsFromConfig = False
+            except NoVmFoundException, e:
+                vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
+                vifsFromConfig = True
+                
+            
+            if not isdir(vmPath):
+                # The case is, when vm starting was not completed at primaryStroageDownload or createVolume(e.g. mgmt server stop), the mgmt
+                # server will keep vm state in staring, then a stop command will be sent. The stop command will delete bridges that vm attaches,
+                # by retriving birdge info by OvmVm.getDetails(). In this case, the vm doesn't exists, so returns a fake object here.
+                fakeDisk = OvmDisk()
+                vm.rootDisk = fakeDisk
+            else:
+                if vifsFromConfig:
+                    vm.vifs.extend(vm._getVifsFromConfig(vmPath))
+                else:
+                    vm.vifs.extend(vm._getVifs(vmName))
+                    
+                safeSetAttr(vm, 'name', vmName)
+                disks = successToMap(xen_get_vdisks(vmPath))['vdisks'].split(',')
+                rootDisk = None
+                #BUG: there is no way to get type of disk, assume all are "w"
+                for d in disks:
+                    if vmName in d:
+                        rootDisk = OvmDisk()
+                        safeSetAttr(rootDisk, 'path', d)
+                        safeSetAttr(rootDisk, 'type', "w")
+                        continue
+                    disk = OvmDisk()
+                    safeSetAttr(disk, 'path', d)
+                    safeSetAttr(disk, 'type', "w")
+                    vm.disks.append(disk)
+                if not rootDisk: raise Exception("Cannot find root disk for vm %s"%vmName)
+                safeSetAttr(vm, 'rootDisk', rootDisk)
+                vcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
+                safeSetAttr(vm, 'cpuNum', vcpus)
+                memory = MtoBytes(int(successToMap(xen_get_memory(vmPath))['memory']))
+                safeSetAttr(vm, 'memory', memory)
+                vmStatus = db_get_vm(vmPath)
+                safeSetAttr(vm, 'powerState',  vmStatus['status'])
+                vmType = successToMap(xen_get_vm_type(vmPath))['type'].replace('hvm', 'HVM').replace('para', 'PV')
+                safeSetAttr(vm, 'type', vmType)
+                
+            rs = fromOvmVm(vm)
+            logger.info(OvmVm.getDetails, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVm.getDetails, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getDetails), errmsg)
+    
+    @staticmethod
+    def getVmStats(vmName):
+        def getVcpuNumAndUtils():
+            try:
+                session = session_login()
+                refs = session.xenapi.VM.get_by_name_label(vmName)
+                if len(refs) == 0:
+                    raise Exception("No ref for %s found in xenapi VM objects"%vmName)
+                vm = XenAPIObject('VM', session, refs[0])
+                VM_metrics = XenAPIObject("VM_metrics", session, vm.get_metrics())
+                items = VM_metrics.get_VCPUs_utilisation().items()
+                nvCpus = len(items)
+                if nvCpus == 0:
+                    raise Exception("vm %s has 0 vcpus !!!"%vmName)
+                
+                xmInfo = successToMap(xen_get_xm_info())
+                nCpus = int(xmInfo['nr_cpus'])
+                totalUtils = 0.0
+                # CPU utlization of VM = (total cpu utilization of each vcpu) / number of physical cpu
+                for num, util in items:
+                    totalUtils += float(util)
+                avgUtils = float(totalUtils/nCpus) * 100
+                return (nvCpus, avgUtils)
+            finally:
+                session_logout()
+                
+                
+        try:
+            try:
+                OvmHost()._getDomainIdByName(vmName)
+                vmPath = OvmHost()._vmNameToPath(vmName)
+                (nvcpus, avgUtils) = getVcpuNumAndUtils()
+                vifs = successToMap(xen_get_vifs(vmPath))
+                rxBytes = 0
+                txBytes = 0
+                vifs = OvmVm()._getVifs(vmName)
+                for vif in vifs:
+                    rxp = join('/sys/class/net', vif.name, 'statistics/rx_bytes')
+                    txp = join("/sys/class/net/", vif.name, "statistics/tx_bytes")
+                    if not exists(rxp): raise Exception('can not find %s'%rxp)
+                    if not exists(txp): raise Exception('can not find %s'%txp)
+                    rxBytes += long(doCmd(['cat', rxp])) / 1000
+                    txBytes += long(doCmd(['cat', txp])) / 1000
+            except NoVmFoundException, e:
+                vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
+                nvcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
+                avgUtils = 0
+                rxBytes = 0
+                txBytes = 0
+            
+            rs = toGson({"cpuNum":nvcpus, "cpuUtil":avgUtils, "rxBytes":rxBytes, "txBytes":txBytes})
+            logger.debug(OvmVm.getVmStats, rs)
+            return rs           
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVm.getVmStats, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVmStats), errmsg)
+    
+    @staticmethod
+    def migrate(vmName, targetHost):
+        try:
+            vmPath = OvmHost()._vmNameToPath(vmName)
+            raiseExceptionIfFail(xen_migrate_vm(vmPath, targetHost))
+            unregister_vm(vmPath)
+            OvmVm()._cleanUpOwnerFile(vmPath)
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVm.migrate, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.migrate), errmsg)
+    
+    @staticmethod
+    def register(vmName):
+        try:
+            vmPath = OvmHost()._vmNameToPath(vmName)
+            raiseExceptionIfFail(register_vm(vmPath))
+            OvmVm()._tapAOwnerFile(vmPath)
+            vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
+            rs = toGson({"vncPort":str(vncPort)})
+            logger.debug(OvmVm.register, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVm.register, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.register), errmsg)
+    
+    @staticmethod
+    def getVncPort(vmName):
+        try:
+            vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
+            rs = toGson({"vncPort":vncPort})
+            logger.debug(OvmVm.getVncPort, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVm.getVncPort, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVncPort), errmsg)
+        
+    @staticmethod
+    def detachOrAttachIso(vmName, iso, isAttach):
+        try:
+            if vmName in OvmHost.getAllVms():
+                scope = 'both'
+                vmPath = OvmHost()._vmNameToPath(vmName)
+            else:
+                scope = 'cfg'
+                vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
+            
+            vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
+            if vmType != 'HVM':
+                raise Exception("Only HVM supports attaching/detaching ISO")
+            
+            if not isAttach:
+                iso = ''
+            else:
+                isoName = basename(iso)
+                isoMountPoint = OvmVm()._getIsoMountPath(vmPath)
+                isoOnSecStorage = dirname(iso)
+                OvmStoragePool()._mount(isoOnSecStorage, isoMountPoint)
+                iso = join(isoMountPoint, isoName)
+                       
+            exceptionIfNoSuccess(xen_change_vm_cdrom(vmPath, iso, scope))
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVm.detachOrAttachIso, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.detachOrAttachIso), errmsg)
+        
+if __name__ == "__main__":
+    import sys
+    print OvmVm.getDetails(sys.argv[1])
+    #print OvmVm.getVmStats(sys.argv[1])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVolumeModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
new file mode 100755
index 0000000..8daa846
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
@@ -0,0 +1,156 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+'''
+Created on June 2, 2011
+
+@author: frank
+'''
+from OvmCommonModule import *
+from OvmStoragePoolModule import OvmStoragePool
+from OVSXUtility import xen_create_disk
+from OvmHostModule import OvmHost
+import os
+
+logger = OvmLogger("OvmVolume")
+
+class OvmVolumeDecoder(json.JSONDecoder):
+    def decode(self, jStr):
+        deDict = asciiLoads(jStr)
+        vol = OvmVolume()
+        setAttrFromDict(vol, 'uuid', deDict)
+        setAttrFromDict(vol, 'size', deDict, long)
+        setAttrFromDict(vol, 'poolUuid', deDict)
+        return vol
+    
+class OvmVolumeEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if not isinstance(obj, OvmVolume): raise Exception("%s is not instance of OvmVolume"%type(obj))
+        dct = {}
+        safeDictSet(obj, dct, 'name')
+        safeDictSet(obj, dct, 'uuid')
+        safeDictSet(obj, dct, 'poolUuid')
+        safeDictSet(obj, dct, 'path')
+        safeDictSet(obj, dct, 'size')
+        return dct
+    
+def toOvmVolume(jStr):
+    return json.loads(jStr, cls=OvmVolumeDecoder)
+
+def fromOvmVolume(vol):
+    return normalizeToGson(json.dumps(vol, cls=OvmVolumeEncoder))
+
+class OvmVolume(OvmObject):
+    name = ''
+    uuid = ''
+    poolUuid = ''
+    path = ''
+    size = 0
+    
+    @staticmethod
+    def createDataDisk(poolUuid, size, isRoot):
+        try:
+            vol = OvmVolume()
+            vol.size = long(size)
+            vol.poolUuid = poolUuid
+            pool = OvmStoragePool()
+            sr = pool._getSrByNameLable(vol.poolUuid)
+            if isRoot:
+                path = join(sr.mountpoint, 'running_pool', get_uuid())
+            else:
+                path = join(sr.mountpoint, 'shareDisk')
+            if not exists(path): os.makedirs(path)
+            freeSpace = pool._getSpaceinfoOfDir(path)
+            if freeSpace < vol.size:
+                raise Exception("%s has not enough space (available:%s, required:%s"%(path, freeSpace, vol.size))
+            
+            vol.uuid = get_uuid()
+            vol.name = vol.uuid + '.raw'
+            filePath = join(path, vol.name)
+            exceptionIfNoSuccess(xen_create_disk(filePath, BytesToM(vol.size)), "Create datadisk %s failed"%filePath)
+            vol.path = filePath
+            rs = fromOvmVolume(vol)
+            logger.debug(OvmVolume.createDataDisk, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVolume.createDataDisk, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.createDataDisk, errmsg))
+        
+    @staticmethod
+    def createFromTemplate(poolUuid, templateUrl):
+        try:
+            if not exists(templateUrl):
+                raise Exception("Cannot find template:%s"%templateUrl)
+            sr = OvmStoragePool()._getSrByNameLable(poolUuid)
+            volDirUuid = get_uuid()
+            volUuid = get_uuid()
+            priStorageMountPoint = sr.mountpoint
+            volDir = join(priStorageMountPoint, 'running_pool', volDirUuid)
+            if exists(volDir):
+                raise Exception("Volume dir %s alreay existed, can not override"%volDir)
+            os.makedirs(volDir)
+            OvmStoragePool()._checkDirSizeForImage(volDir, templateUrl)
+            volName = volUuid + '.raw'
+            tgt = join(volDir, volName)
+            cpVolCmd = ['cp', templateUrl, tgt]
+            doCmd(cpVolCmd)
+            volSize = os.path.getsize(tgt)
+            vol = OvmVolume()
+            vol.name = volName
+            vol.path = tgt
+            vol.size = volSize
+            vol.uuid = volUuid
+            vol.poolUuid = poolUuid
+            rs = fromOvmVolume(vol)
+            logger.debug(OvmVolume.createFromTemplate, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVolume.createFromTemplate, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.createFromTemplate), errmsg)
+    
+    @staticmethod
+    def destroy(poolUuid, path):
+        try:
+            OvmStoragePool()._getSrByNameLable(poolUuid)
+            if not exists(path): raise Exception("Cannot find %s"%path)
+            dir = dirname(path)
+            if exists(join(dir, 'vm.cfg')):
+                # delete root disk
+                vmNamePath = join(dir, 'vmName')
+                if exists(vmNamePath):
+                    vmNameFd = open(vmNamePath, 'r')
+                    vmName = vmNameFd.readline()
+                    vmName = vmName.rstrip('\n')
+                    link = join(dirname(dir), vmName)
+                    doCmd(['rm', '-rf', link])
+                    vmNameFd.close()
+                else:
+                    logger.warning(OvmVolume.destroy, "Can not find vmName file in %s"%dir)
+                doCmd(['rm','-rf', dir])
+            else:
+                doCmd(['rm', path])
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmVolume.destroy, errmsg)
+            raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.destroy), errmsg)
+        
+        
+                              
+if __name__ == "__main__":
+    print OvmVolume.detachOrAttachIso(sys.argv[1], '', False)
+                              
+                              
+                              
+                              
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh
new file mode 100755
index 0000000..1fffd5a
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh
@@ -0,0 +1,127 @@
+#!/bin/sh
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+
+errExit() {
+    echo $@
+    exit 1
+}
+
+stopHeartbeat() {
+    pidFile="/var/run/ovs-agent/heartbeat.pid"
+    if [ -f $pidFile ]; then
+        pid=`cat $pidFile`
+        ps -p $pid &>/dev/null
+        if [ $? -eq 0 ]; then
+            kill $pid &>/dev/null
+        fi
+    fi
+}
+
+openPortOnIptables() {
+	port="$1"
+	protocol="$2"
+    chkconfig --list iptables | grep "on"
+	if [ $? -eq 0 ]; then
+	    iptables-save | grep "A INPUT -p $protocol -m $protocol --dport $port -j ACCEPT" >/dev/null
+	    if [ $? -ne 0 ]; then
+	        iptables -I INPUT 1 -p $protocol --dport $port -j ACCEPT
+	        if [ $? -ne 0 ]; then
+	            exit_with_error "iptables -I INPUT 1 -p $protocol --dport $port -j ACCEPT failed"
+	        fi
+	        echo "iptables:Open $protocol port $port for DHCP"
+	    fi
+	fi
+}
+
+applyPatch() {
+    patchFile="$1"
+    level="$2"
+
+    [ ! -f $patchFile ] && errExit "Can not find $patchFile"
+
+    if [ $? -ne 0 ]; then
+        pushd /opt/ovs-agent-latest &>/dev/null
+        test=`patch -p$level --dry-run -N < $patchFile`
+        if [ $? -ne 0 ]; then
+            tmp=`mktemp`
+            echo $test > $tmp
+            grep "Reversed (or previously applied) patch detected" $tmp &>/dev/null
+            if [ $? -eq 0 ]; then
+                # The file has been patched
+                rm $tmp -f
+                popd &>/dev/null
+                return
+            else
+                rm $tmp -f
+                popd &>/dev/null
+                errExit "Can not apply $patchFile beacuse $test"
+            fi
+        fi
+        patch -p$level < $patchFile
+        [ $? -ne 0 ] && errExit "Patch to $target failed"
+        popd &>/dev/null
+    fi
+}
+
+postSetup() {
+    openPortOnIptables 7777 tcp # for OCFS2, maybe tcp only
+    openPortOnIptables 7777 udp
+    openPortOnIptables 3260 tcp # for ISCSI, maybe tcp only
+    openPortOnIptables 3260 udp
+    applyPatch "/opt/ovs-agent-latest/OvmPatch.patch" 2
+    applyPatch "/opt/ovs-agent-latest/OvmDontTouchOCFS2ClusterWhenAgentStart.patch" 1
+    applyPatch "/opt/ovs-agent-latest/Fixget_storage_reposExceptionDueToWrongReturnValueCheck.patch" 1
+
+    stopHeartbeat
+
+    /etc/init.d/ovs-agent restart --disable-nowayout
+    [ $? -ne 0 ] && errExit "Restart ovs agent failed"
+    exit 0
+}
+
+preSetup() {
+    agentConfig="/etc/ovs-agent/agent.ini"
+    agentInitScript="/etc/init.d/ovs-agent"
+
+    [ ! -f $agentConfig ] && errExit "Can not find $agentConfig"
+    [ ! -f $agentInitScript ] && errExit "Can not find $agentInitScript"
+
+    version=`grep "version="  $agentInitScript | cut -d "=" -f 2`
+    [ x"$version" != x"2.3" ] && errExit "The OVS agent version is $version, we only support 2.3 now"
+
+    # disable SSL
+    sed -i 's/ssl=enable/ssl=disable/g' $agentConfig
+    [ $? -ne 0 ] && errExit "configure ovs agent to non ssl failed"
+
+    if [ ! -L /opt/ovs-agent-latest ]; then
+        eval $agentInitScript status | grep 'down' && $agentInitScript start
+        [ $? -ne 0 ] && errExit "Start ovs agent failed"
+        [ ! -L /opt/ovs-agent-latest ] && errExit "No link at /opt/ovs-agent-latest"
+    fi
+    exit 0
+}
+
+[ $# -ne 1 ] && errExit "Usage: configureOvm.sh command"
+
+case "$1" in
+    preSetup)
+        preSetup
+        ;;
+    postSetup)
+        postSetup
+        ;;
+    *)
+        errExit "Valid commands: preSetup postSetup"
+esac
+

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java
new file mode 100755
index 0000000..3523562
--- /dev/null
+++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java
@@ -0,0 +1,287 @@
+// Copyright 2012 Citrix Systems, Inc. Licensed under the
+// Apache License, Version 2.0 (the "License"); you may not use this
+// file except in compliance with the License.  Citrix Systems, Inc.
+// reserves all rights not expressly granted by the License.
+// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// 
+// Automatically generated by addcopyright.py at 04/03/2012
+package com.cloud.ovm.hypervisor;
+
+import java.net.InetAddress;
+import java.net.URI;
+import java.net.UnknownHostException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import javax.ejb.Local;
+import javax.naming.ConfigurationException;
+
+import org.apache.log4j.Logger;
+import org.apache.xmlrpc.XmlRpcException;
+
+import com.cloud.configuration.Config;
+import com.cloud.agent.api.StartupCommand;
+import com.cloud.agent.api.StartupRoutingCommand;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.DiscoveryException;
+import com.cloud.host.HostInfo;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.ovm.object.Connection;
+import com.cloud.ovm.object.OvmHost;
+import com.cloud.resource.Discoverer;
+import com.cloud.resource.DiscovererBase;
+import com.cloud.resource.ResourceManager;
+import com.cloud.resource.ResourceStateAdapter;
+import com.cloud.resource.ServerResource;
+import com.cloud.resource.UnableDeleteHostException;
+import com.cloud.utils.component.Inject;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.db.SearchCriteria2;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.ssh.SSHCmdHelper;
+
+@Local(value = Discoverer.class)
+public class OvmDiscoverer extends DiscovererBase implements Discoverer,
+		ResourceStateAdapter {
+	private static final Logger s_logger = Logger
+			.getLogger(OvmDiscoverer.class);
+	protected String _publicNetworkDevice;
+	protected String _privateNetworkDevice;
+	protected String _guestNetworkDevice;
+
+	@Inject
+	ClusterDao _clusterDao;
+	@Inject
+	ResourceManager _resourceMgr;
+
+	@Override
+	public boolean configure(String name, Map<String, Object> params)
+			throws ConfigurationException {
+		super.configure(name, params);
+		_publicNetworkDevice = _params.get(Config.OvmPublicNetwork.key());
+		_privateNetworkDevice = _params.get(Config.OvmPrivateNetwork.key());
+		_guestNetworkDevice = _params.get(Config.OvmGuestNetwork.key());
+		_resourceMgr.registerResourceStateAdapter(this.getClass()
+				.getSimpleName(), this);
+		return true;
+	}
+
+	protected OvmDiscoverer() {
+	}
+
+	@Override
+	public boolean stop() {
+		_resourceMgr.unregisterResourceStateAdapter(this.getClass()
+				.getSimpleName());
+		return super.stop();
+	}
+
+	private boolean checkIfExisted(String guid) {
+		SearchCriteria2<HostVO, HostVO> sc = SearchCriteria2.create(HostVO.class);
+		sc.addAnd(sc.getEntity().getGuid(), SearchCriteria.Op.EQ, guid);
+		sc.addAnd(sc.getEntity().getHypervisorType(), SearchCriteria.Op.EQ,
+				HypervisorType.Ovm);
+		List<HostVO> hosts = sc.list();
+		return !hosts.isEmpty();
+	}
+
+	@Override
+	public Map<? extends ServerResource, Map<String, String>> find(long dcId,
+			Long podId, Long clusterId, URI url, String username,
+			String password, List<String> hostTags) throws DiscoveryException {
+		Connection conn = null;
+
+		if (!url.getScheme().equals("http")) {
+			String msg = "urlString is not http so we're not taking care of the discovery for this: "
+					+ url;
+			s_logger.debug(msg);
+			return null;
+		}
+		if (clusterId == null) {
+			String msg = "must specify cluster Id when add host";
+			s_logger.debug(msg);
+			throw new CloudRuntimeException(msg);
+		}
+
+		if (podId == null) {
+			String msg = "must specify pod Id when add host";
+			s_logger.debug(msg);
+			throw new CloudRuntimeException(msg);
+		}
+
+		ClusterVO cluster = _clusterDao.findById(clusterId);
+		if (cluster == null
+				|| (cluster.getHypervisorType() != HypervisorType.Ovm)) {
+			if (s_logger.isInfoEnabled())
+				s_logger.info("invalid cluster id or cluster is not for Ovm hypervisors");
+			return null;
+		}
+
+		String agentUsername = _params.get("agentusername");
+		if (agentUsername == null) {
+			throw new CloudRuntimeException("Agent user name must be specified");
+		}
+
+		String agentPassword = _params.get("agentpassword");
+		if (agentPassword == null) {
+			throw new CloudRuntimeException("Agent password must be specified");
+		}
+
+		try {
+			String hostname = url.getHost();
+			InetAddress ia = InetAddress.getByName(hostname);
+			String hostIp = ia.getHostAddress();
+			String guid = UUID.nameUUIDFromBytes(hostIp.getBytes()).toString();
+
+			if (checkIfExisted(guid)) {
+				throw new CloudRuntimeException("The host " + hostIp
+						+ " has been added before");
+			}
+
+			s_logger.debug("Ovm discover is going to disover host having guid "
+					+ guid);
+
+			ClusterVO clu = _clusterDao.findById(clusterId);
+			if (clu.getGuid() == null) {
+				clu.setGuid(UUID.randomUUID().toString());
+				_clusterDao.update(clusterId, clu);
+			}
+
+			com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(
+					hostIp, 22);
+			sshConnection.connect(null, 60000, 60000);
+			sshConnection = SSHCmdHelper.acquireAuthorizedConnection(hostIp,
+					username, password);
+			if (sshConnection == null) {
+				throw new DiscoveryException(
+						String.format(
+								"Cannot connect to ovm host(IP=%1$s, username=%2$s, password=%3$s, discover failed",
+								hostIp, username, password));
+			}
+
+			if (!SSHCmdHelper.sshExecuteCmd(sshConnection,
+					"[ -f '/etc/ovs-agent/agent.ini' ]")) {
+				throw new DiscoveryException(
+						"Can not find /etc/ovs-agent/agent.ini " + hostIp);
+			}
+
+			Map<String, String> details = new HashMap<String, String>();
+			OvmResourceBase ovmResource = new OvmResourceBase();
+			details.put("ip", hostIp);
+			details.put("username", username);
+			details.put("password", password);
+			details.put("zone", Long.toString(dcId));
+			details.put("guid", guid);
+			details.put("pod", Long.toString(podId));
+			details.put("cluster", Long.toString(clusterId));
+			details.put("agentusername", agentUsername);
+			details.put("agentpassword", agentPassword);
+			if (_publicNetworkDevice != null) {
+				details.put("public.network.device", _publicNetworkDevice);
+			}
+			if (_privateNetworkDevice != null) {
+				details.put("private.network.device", _privateNetworkDevice);
+			}
+			if (_guestNetworkDevice != null) {
+				details.put("guest.network.device", _guestNetworkDevice);
+			}
+
+			Map<String, Object> params = new HashMap<String, Object>();
+			params.putAll(details);
+			ovmResource.configure("Ovm Server", params);
+			ovmResource.start();
+
+			conn = new Connection(hostIp, "oracle", agentPassword);
+			/* After resource start, we are able to execute our agent api */
+			OvmHost.Details d = OvmHost.getDetails(conn);
+			details.put("agentVersion", d.agentVersion);
+			details.put(HostInfo.HOST_OS_KERNEL_VERSION, d.dom0KernelVersion);
+			details.put(HostInfo.HYPERVISOR_VERSION, d.hypervisorVersion);
+
+			Map<OvmResourceBase, Map<String, String>> resources = new HashMap<OvmResourceBase, Map<String, String>>();
+			resources.put(ovmResource, details);
+			return resources;
+		} catch (XmlRpcException e) {
+			s_logger.debug("XmlRpc exception, Unable to discover OVM: " + url,
+					e);
+			return null;
+		} catch (UnknownHostException e) {
+			s_logger.debug(
+					"Host name resolve failed exception, Unable to discover OVM: "
+							+ url, e);
+			return null;
+		} catch (ConfigurationException e) {
+			s_logger.debug(
+					"Configure resource failed, Unable to discover OVM: " + url,
+					e);
+			return null;
+		} catch (Exception e) {
+			s_logger.debug("Unable to discover OVM: " + url, e);
+			return null;
+		}
+	}
+
+	@Override
+	public void postDiscovery(List<HostVO> hosts, long msId)
+			throws DiscoveryException {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public boolean matchHypervisor(String hypervisor) {
+		return HypervisorType.Ovm.toString().equalsIgnoreCase(hypervisor);
+	}
+
+	@Override
+	public HypervisorType getHypervisorType() {
+		return HypervisorType.Ovm;
+	}
+
+	@Override
+	public HostVO createHostVOForConnectedAgent(HostVO host,
+			StartupCommand[] cmd) {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public HostVO createHostVOForDirectConnectAgent(HostVO host,
+			StartupCommand[] startup, ServerResource resource,
+			Map<String, String> details, List<String> hostTags) {
+		StartupCommand firstCmd = startup[0];
+		if (!(firstCmd instanceof StartupRoutingCommand)) {
+			return null;
+		}
+
+		StartupRoutingCommand ssCmd = ((StartupRoutingCommand) firstCmd);
+		if (ssCmd.getHypervisorType() != HypervisorType.Ovm) {
+			return null;
+		}
+
+		return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.Ovm,
+				details, hostTags);
+	}
+
+	@Override
+	public DeleteHostAnswer deleteHost(HostVO host, boolean isForced,
+			boolean isForceDeleteStorage) throws UnableDeleteHostException {
+		if (host.getType() != com.cloud.host.Host.Type.Routing
+				|| host.getHypervisorType() != HypervisorType.Ovm) {
+			return null;
+		}
+
+		_resourceMgr.deleteRoutingHost(host, isForced, isForceDeleteStorage);
+		return new DeleteHostAnswer(true);
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmFencer.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmFencer.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmFencer.java
new file mode 100755
index 0000000..fe33677
--- /dev/null
+++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmFencer.java
@@ -0,0 +1,122 @@
+// Copyright 2012 Citrix Systems, Inc. Licensed under the
+// Apache License, Version 2.0 (the "License"); you may not use this
+// file except in compliance with the License.  Citrix Systems, Inc.
+// reserves all rights not expressly granted by the License.
+// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// 
+// Automatically generated by addcopyright.py at 04/03/2012
+package com.cloud.ovm.hypervisor;
+
+import java.util.List;
+import java.util.Map;
+
+import javax.ejb.Local;
+import javax.naming.ConfigurationException;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.FenceAnswer;
+import com.cloud.agent.api.FenceCommand;
+import com.cloud.exception.AgentUnavailableException;
+import com.cloud.exception.OperationTimedoutException;
+import com.cloud.ha.FenceBuilder;
+import com.cloud.host.HostVO;
+import com.cloud.host.Status;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.utils.component.Inject;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.resource.ResourceManager;
+
+@Local(value=FenceBuilder.class)
+public class OvmFencer implements FenceBuilder {
+	private static final Logger s_logger = Logger.getLogger(OvmFencer.class);
+	String _name;
+	@Inject AgentManager _agentMgr;
+    @Inject ResourceManager _resourceMgr;
+	
+	@Override
+	public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
+		_name = name;
+		return true;
+	}
+
+	@Override
+	public String getName() {
+		// TODO Auto-generated method stub
+		return _name;
+	}
+
+	@Override
+	public boolean start() {
+		// TODO Auto-generated method stub
+		return true;
+	}
+
+	@Override
+	public boolean stop() {
+		// TODO Auto-generated method stub
+		return true;
+	}
+	
+	public OvmFencer() {
+		super();
+	}
+
+	@Override
+	public Boolean fenceOff(VMInstanceVO vm, HostVO host) {
+		if (host.getHypervisorType() != HypervisorType.Ovm) {
+			s_logger.debug("Don't know how to fence non Ovm hosts " + host.getHypervisorType());
+			return null;
+		}
+		
+		List<HostVO> hosts = _resourceMgr.listAllHostsInCluster(host.getClusterId());
+		FenceCommand fence = new FenceCommand(vm, host);
+		
+		for (HostVO h : hosts) {
+			if (h.getHypervisorType() != HypervisorType.Ovm) {
+				continue;
+			}
+			
+			if( h.getStatus() != Status.Up ) {
+				continue;
+			}
+			
+			if( h.getId() == host.getId() ) {
+				continue;
+			}
+			
+			FenceAnswer answer;
+			try {
+				answer = (FenceAnswer)_agentMgr.send(h.getId(), fence);
+			} catch (AgentUnavailableException e) {
+				if (s_logger.isDebugEnabled()) {
+					s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable");
+				}
+				continue;
+			} catch (OperationTimedoutException e) {
+				if (s_logger.isDebugEnabled()) {
+					s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable");
+				}
+				continue;
+			}
+			
+			if (answer != null && answer.getResult()) {
+				return true;
+			}
+		}
+		
+		if (s_logger.isDebugEnabled()) {
+			s_logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString());
+		}
+		
+		return false;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmGuru.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmGuru.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmGuru.java
new file mode 100755
index 0000000..b24be71
--- /dev/null
+++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmGuru.java
@@ -0,0 +1,57 @@
+// Copyright 2012 Citrix Systems, Inc. Licensed under the
+// Apache License, Version 2.0 (the "License"); you may not use this
+// file except in compliance with the License.  Citrix Systems, Inc.
+// reserves all rights not expressly granted by the License.
+// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// 
+// Automatically generated by addcopyright.py at 04/03/2012
+package com.cloud.ovm.hypervisor;
+
+import javax.ejb.Local;
+
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.hypervisor.HypervisorGuru;
+import com.cloud.hypervisor.HypervisorGuruBase;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.storage.GuestOSVO;
+import com.cloud.storage.dao.GuestOSDao;
+import com.cloud.utils.component.Inject;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
+
+@Local(value=HypervisorGuru.class)
+public class OvmGuru extends HypervisorGuruBase implements HypervisorGuru {
+	@Inject GuestOSDao _guestOsDao;
+	protected OvmGuru() {
+		super();
+	}
+	
+	@Override
+	public HypervisorType getHypervisorType() {
+		return HypervisorType.Ovm;
+	}
+
+	@Override
+	public <T extends VirtualMachine> VirtualMachineTO implement(
+			VirtualMachineProfile<T> vm) {
+		VirtualMachineTO to = toVirtualMachineTO(vm);
+		to.setBootloader(vm.getBootLoaderType());
+
+		// Determine the VM's OS description
+		GuestOSVO guestOS = _guestOsDao.findById(vm.getVirtualMachine().getGuestOSId());
+		to.setOs(guestOS.getDisplayName());
+
+		return to;
+	}
+
+    @Override
+    public boolean trackVmHostChange() {
+        return true;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmHelper.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmHelper.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmHelper.java
new file mode 100755
index 0000000..6330058
--- /dev/null
+++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmHelper.java
@@ -0,0 +1,61 @@
+// Copyright 2012 Citrix Systems, Inc. Licensed under the
+// Apache License, Version 2.0 (the "License"); you may not use this
+// file except in compliance with the License.  Citrix Systems, Inc.
+// reserves all rights not expressly granted by the License.
+// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// 
+// Automatically generated by addcopyright.py at 04/03/2012
+package com.cloud.ovm.hypervisor;
+
+import java.util.HashMap;
+
+public class OvmHelper {
+	 private static final HashMap<String, String> _ovmMap = new HashMap<String, String>();
+	 
+	 public static final String ORACLE_LINUX = "Oracle Linux";
+	 public static final String ORACLE_SOLARIS = "Oracle Solaris";
+	 public static final String WINDOWS = "Windows";
+	 
+	 static {
+	    _ovmMap.put("Oracle Enterprise Linux 6.0 (32-bit)", ORACLE_LINUX);
+	    _ovmMap.put("Oracle Enterprise Linux 6.0 (64-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.0 (32-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.0 (64-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.1 (32-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.1 (64-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.2 (32-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.2 (64-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.3 (32-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.3 (64-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.4 (32-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.4 (64-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.5 (32-bit)", ORACLE_LINUX);
+		_ovmMap.put("Oracle Enterprise Linux 5.5 (64-bit)", ORACLE_LINUX);
+		_ovmMap.put("Windows 7 (32-bit)", WINDOWS);
+		_ovmMap.put("Windows 7 (64-bit)", WINDOWS);
+		_ovmMap.put("Windows Server 2003 (32-bit)", WINDOWS);
+		_ovmMap.put("Windows Server 2003 (64-bit)", WINDOWS);
+		_ovmMap.put("Windows Server 2008 (32-bit)", WINDOWS);
+		_ovmMap.put("Windows Server 2008 (64-bit)", WINDOWS);
+		_ovmMap.put("Windows Server 2008 R2 (64-bit)", WINDOWS);
+		_ovmMap.put("Windows 2000 SP4 (32-bit)", WINDOWS);
+		_ovmMap.put("Windows Vista (32-bit)", WINDOWS);
+		_ovmMap.put("Windows XP SP2 (32-bit)", WINDOWS);
+		_ovmMap.put("Windows XP SP3 (32-bit)", WINDOWS);
+		_ovmMap.put("Sun Solaris 10(32-bit)", ORACLE_SOLARIS);
+		_ovmMap.put("Sun Solaris 10(64-bit)", ORACLE_SOLARIS);
+		_ovmMap.put("Sun Solaris 9(Experimental)", ORACLE_SOLARIS);
+		_ovmMap.put("Sun Solaris 8(Experimental)", ORACLE_SOLARIS);
+		_ovmMap.put("Sun Solaris 11 (32-bit)", ORACLE_SOLARIS);
+		_ovmMap.put("Sun Solaris 11 (64-bit)", ORACLE_SOLARIS);
+	}
+	 
+	public static String getOvmGuestType(String stdType) {
+		return _ovmMap.get(stdType);
+	}
+}