You are viewing a plain text version of this content. The canonical link for it is here.
Posted to tashi-commits@incubator.apache.org by st...@apache.org on 2009/05/04 21:18:19 UTC
svn commit: r771448 - in /incubator/tashi/trunk: etc/TashiDefaults.cfg
src/tashi/nodemanager/nodemanagerservice.py
src/tashi/nodemanager/vmcontrol/qemu.py
src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py
src/tashi/nodemanager/vmcontrol/xenpv.py
Author: stroucki
Date: Mon May 4 21:18:19 2009
New Revision: 771448
URL: http://svn.apache.org/viewvc?rev=771448&view=rev
Log:
Patches from mryan to update xen defaults and support querying hosts from the vmm.
Modified:
incubator/tashi/trunk/etc/TashiDefaults.cfg
incubator/tashi/trunk/src/tashi/nodemanager/nodemanagerservice.py
incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/qemu.py
incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py
incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/xenpv.py
Modified: incubator/tashi/trunk/etc/TashiDefaults.cfg
URL: http://svn.apache.org/viewvc/incubator/tashi/trunk/etc/TashiDefaults.cfg?rev=771448&r1=771447&r2=771448&view=diff
==============================================================================
--- incubator/tashi/trunk/etc/TashiDefaults.cfg (original)
+++ incubator/tashi/trunk/etc/TashiDefaults.cfg Mon May 4 21:18:19 2009
@@ -78,6 +78,9 @@
[XenPV]
vmNamePrefix = tashi
transientdir = /tmp
+defaultVmType = kernel
+defaultKernel = /boot/vmlinuz-xen
+defaultRamdisk = /boot/initrd-xen
[Vfs]
prefix = /var/tmp/
Modified: incubator/tashi/trunk/src/tashi/nodemanager/nodemanagerservice.py
URL: http://svn.apache.org/viewvc/incubator/tashi/trunk/src/tashi/nodemanager/nodemanagerservice.py?rev=771448&r1=771447&r2=771448&view=diff
==============================================================================
--- incubator/tashi/trunk/src/tashi/nodemanager/nodemanagerservice.py (original)
+++ incubator/tashi/trunk/src/tashi/nodemanager/nodemanagerservice.py Mon May 4 21:18:19 2009
@@ -29,7 +29,7 @@
from tashi.services.ttypes import ResumeVmRes, Host, HostState, InstanceState, TashiException, Errors, Instance
from tashi.services import clustermanagerservice
from tashi.nodemanager import RPC
-from tashi import boolean, vmStates, logged, ConnectionManager, timed, version
+from tashi import boolean, vmStates, logged, ConnectionManager, timed
class NodeManagerService(object):
"""RPC handler for the NodeManager
@@ -99,28 +99,6 @@
success()
return True
- def getHostInfo(self):
- host = Host()
- host.id = self.id
- host.name = socket.gethostname()
- memoryStr = os.popen2("head -n 1 /proc/meminfo | awk '{print $2 \" \" $3}'")[1].read().strip()
- if (memoryStr[-2:] == "kB"):
- host.memory = int(memoryStr[:-2])/1024
- elif (memoryStr[-2:] == "mB"):
- host.memory = int(memoryStr[:-2])
- elif (memoryStr[-2:] == "gB"):
- host.memory = int(memoryStr[:-2])*1024
- elif (memoryStr[-2:] == " B"):
- host.memory = int(memoryStr[:-2])/(1024*1024)
- else:
- self.log.warning('Unable to determine amount of physical memory - reporting 0')
- host.memory = 0
- host.cores = os.sysconf("SC_NPROCESSORS_ONLN")
- host.up = True
- host.decayed = False
- host.version = version
- return host
-
def backupVmInfoAndFlushNotifyCM(self):
cm = ConnectionManager(clustermanagerservice.Client, self.cmPort)[self.cmHost]
while True:
@@ -158,7 +136,7 @@
while True:
start = time.time()
try:
- host = self.getHostInfo()
+ host = self.vmm.getHostInfo(self)
instances = self.instances.values()
self.id = cm.registerNodeManager(host, instances)
except Exception, e:
Modified: incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/qemu.py
URL: http://svn.apache.org/viewvc/incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/qemu.py?rev=771448&r1=771447&r2=771448&view=diff
==============================================================================
--- incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/qemu.py (original)
+++ incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/qemu.py Mon May 4 21:18:19 2009
@@ -29,6 +29,7 @@
from tashi.services.ttypes import *
from tashi.util import broken, logged, scrubString, boolean
+from tashi import version
from vmcontrolinterface import VmControlInterface
log = logging.getLogger(__file__)
@@ -289,7 +290,29 @@
info = open(self.INFO_DIR + "/%d"%(child.pid), "w")
cPickle.dump((child.instance, child.pid, child.ptyFile), info)
info.close()
-
+
+ def getHostInfo(self, service):
+ host = Host()
+ host.id = service.id
+ host.name = socket.gethostname()
+ memoryStr = os.popen2("head -n 1 /proc/meminfo | awk '{print $2 \" \" $3}'")[1].read().strip()
+ if (memoryStr[-2:] == "kB"):
+ host.memory = int(memoryStr[:-2])/1024
+ elif (memoryStr[-2:] == "mB"):
+ host.memory = int(memoryStr[:-2])
+ elif (memoryStr[-2:] == "gB"):
+ host.memory = int(memoryStr[:-2])*1024
+ elif (memoryStr[-2:] == " B"):
+ host.memory = int(memoryStr[:-2])/(1024*1024)
+ else:
+ log.warning('Unable to determine amount of physical memory - reporting 0')
+ host.memory = 0
+ host.cores = os.sysconf("SC_NPROCESSORS_ONLN")
+ host.up = True
+ host.decayed = False
+ host.version = version
+ return host
+
def startVm(self, instance, source):
"""Universal function to start a VM -- used by instantiateVM, resumeVM, and prepReceiveVM"""
clockString = instance.hints.get("clock", "dynticks")
Modified: incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py
URL: http://svn.apache.org/viewvc/incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py?rev=771448&r1=771447&r2=771448&view=diff
==============================================================================
--- incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py (original)
+++ incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py Mon May 4 21:18:19 2009
@@ -79,3 +79,7 @@
def listVms(self):
"""Returns a list of vmIds to the caller"""
raise NotImplementedError
+
+ def getHostInfo(self, service):
+ """Returns a Host object for the current host"""
+ raise NotImplementedError
Modified: incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/xenpv.py
URL: http://svn.apache.org/viewvc/incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/xenpv.py?rev=771448&r1=771447&r2=771448&view=diff
==============================================================================
--- incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/xenpv.py (original)
+++ incubator/tashi/trunk/src/tashi/nodemanager/vmcontrol/xenpv.py Mon May 4 21:18:19 2009
@@ -22,11 +22,12 @@
import time
import threading
import logging
+import socket
from vmcontrolinterface import VmControlInterface
from tashi.services.ttypes import Errors, InstanceState, TashiException
from tashi.services.ttypes import Instance, Host
-from tashi import boolean, convertExceptions, ConnectionManager
+from tashi import boolean, convertExceptions, ConnectionManager, version
from tashi.util import isolatedRPC
import tashi.parallel
@@ -90,7 +91,6 @@
instance.memory = int(vminfo['memory'])
instance.cores = int(vminfo['cores'])
instance.disks = []
-
r[instance.vmId] = instance
return r
@@ -101,14 +101,13 @@
class XenPV(VmControlInterface, threading.Thread):
def __init__(self, config, dfs, cm):
threading.Thread.__init__(self)
- if self.__class__ is VmControlInterface:
- raise NotImplementedError
self.config = config
self.dfs = dfs
self.cm = cm
self.vmNamePrefix = self.config.get("XenPV", "vmNamePrefix")
self.transientDir = self.config.get('XenPV', 'transientDir')
+ self.defaultVmType = self.config.get('XenPV', 'defaultVmType')
self.newvms = listVms(self.vmNamePrefix)
self.hostId = -1
@@ -153,24 +152,61 @@
# a lot easier
########################################
def createXenConfig(self, vmName,
- image, macAddr, memory, cores):
+ image, macAddr, memory, cores, hints):
fn = os.path.join("/tmp", vmName)
- cfgstr = """
-# kernel="/boot/vmlinuz-2.6.24-17-xen"
-# ramdisk="/boot/initrd.img-2.6.24-17-xen"
+ vmType = hints.get('vmtype', self.defaultVmType)
+ print 'starting vm with type: ', vmType
+ bootstr = ''
+ if vmType == 'pvgrub':
+ # FIXME: untested, requires Xen 3.3
+ bootstr = '''
+kernel = '/usr/lib/xen/boot/pv-grub-x86_64.gz'
+extra = '(hd0,0)/grub/menu.lst'
+'''
+ elif vmType == 'pygrub':
+ bootstr = '''
bootloader="/usr/bin/pygrub"
+'''
+ elif vmType == 'kernel':
+ kernel = hints.get('kernel', None)
+ ramdisk = hints.get('ramdisk', None)
+ if kernel == None:
+ try:
+ kernel = self.config.get('XenPV', 'defaultKernel')
+ except:
+ raise Exception, "vmtype=kernel requires kernel= argument"
+ bootstr = "kernel=\"%s\"\n"%kernel
+ if ramdisk == None:
+ try:
+ ramdisk = self.config.get('XenPV', 'defaultRamdisk')
+ except:
+ ramdisk = None
+ if ramdisk != None:
+ bootstr = bootstr + "ramdisk = \"%s\"\n"%ramdisk
+ elif vmType == 'hvm':
+ # FIXME: untested, I don't have any hvm domains set up
+ bootstr = '''
+import os, re
+arch = os.uname()[4]
+if re.search('63', arch):
+ arch_libdir = 'lib64'
+else:
+ arch_libdir = 'lib'
+kernel = '/usr/lib/xen/boot/hvmlocader'
+builder = 'hvm'
+'''
+ else:
+ raise Exception, "Unknown vmType in hints: %s"%vmType
+ cfgstr = """
disk=['tap:qcow:%s,xvda1,w']
vif = [ 'mac=%s' ]
-# vif = ['ip=172.19.158.1']
-# vif = ['']
memory=%i
-#cpus is a list of cpus for pinning, this is not what we want
-#cpus=%i
+vcpus=%i
root="/dev/xvda1"
extra='xencons=tty'
"""%(image, macAddr, memory, cores)
f = open(fn, "w")
- f.write(cfgstr)
+ f.write(bootstr+cfgstr)
f.close()
return fn
def deleteXenConfig(self, vmName):
@@ -213,7 +249,8 @@
instance.disks[0].local,
instance.nics[0].mac,
instance.memory,
- instance.cores)
+ instance.cores,
+ instance.hints)
cmd = "xm create %s"%fn
r = os.system(cmd)
# self.deleteXenConfig(name)
@@ -349,8 +386,10 @@
@synchronizedmethod
- def getHostInfo(self):
+ def getHostInfo(self, service):
host = Host()
+ host.id = service.id
+ host.name = socket.gethostname()
memp = subprocess.Popen("xm info | awk '/^total_memory/ { print $3 }' ",
shell = True,
stdout = subprocess.PIPE)
@@ -361,6 +400,9 @@
stdout = subprocess.PIPE)
cores = corep.stdout.readline()
host.cores = int(cores)
+ host.up = True
+ host.decayed = False
+ host.version = version
return host