You are viewing a plain text version of this content. The canonical link for it is here.
Posted to tashi-commits@incubator.apache.org by st...@apache.org on 2012/02/29 03:23:18 UTC
svn commit: r1294960 [3/3] - in /incubator/tashi/branches:
stroucki-accounting/ stroucki-accounting/src/tashi/
stroucki-accounting/src/tashi/accounting/
stroucki-accounting/src/tashi/agents/ stroucki-accounting/src/tashi/client/
stroucki-accounting/src...
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/agents/locality-server.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/agents/locality-server.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/agents/locality-server.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/agents/locality-server.py Wed Feb 29 03:23:15 2012
@@ -6,15 +6,15 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
-# under the License.
+# under the License.
# this module provides a service to locate servers that are close
# to a VM. Uses all-pairs shortest path algorithm. Need to provide
@@ -45,184 +45,184 @@ from scipy import *
cnames = {}
def cannonicalName(hn):
- try:
- if cnames.has_key(hn):
- return cnames[hn]
- r = socket.gethostbyname_ex(hn)[0]
- cnames[hn] = r
- return r
- except:
- return hn
+ try:
+ if cnames.has_key(hn):
+ return cnames[hn]
+ r = socket.gethostbyname_ex(hn)[0]
+ cnames[hn] = r
+ return r
+ except:
+ return hn
# define matrix multiplication that can be used to calculate a min-plus
# distance product
def genMul(A, B, add, mult):
- '''generalized matrix multiplication'''
- C = zeros((shape(A)[0], shape(B)[1]))
- for i in range(shape(C)[0]):
- for j in range(shape(C)[1]):
- C[i,j] = add(mult(A[i,:], B[:,j]))
- return C
+ '''generalized matrix multiplication'''
+ C = zeros((shape(A)[0], shape(B)[1]))
+ for i in range(shape(C)[0]):
+ for j in range(shape(C)[1]):
+ C[i,j] = add(mult(A[i,:], B[:,j]))
+ return C
def addHost(graph, hostVals, host):
- if not graph.has_key(host):
- graph[host] = []
- if not hostVals.has_key(host):
- hostVals[host] = len(hostVals)
+ if not graph.has_key(host):
+ graph[host] = []
+ if not hostVals.has_key(host):
+ hostVals[host] = len(hostVals)
def graphConnect(graph, h1, h2):
- if not h1 in graph[h2]:
- graph[h2].append(h1)
- if not h2 in graph[h1]:
- graph[h1].append(h2)
+ if not h1 in graph[h2]:
+ graph[h2].append(h1)
+ if not h2 in graph[h1]:
+ graph[h1].append(h2)
def graphFromFile(fn = 'serverLayout', graph = {}, hostVals = {}):
- f = open(fn)
- for line in f.readlines():
- line = line.split()
- if len(line) < 1:
- continue
- server = cannonicalName(line[0].strip())
-
- addHost(graph, hostVals, server)
- for peer in line[1:]:
- peer = cannonicalName(peer.strip())
- addHost(graph, hostVals, peer)
- graphConnect(graph, server, peer)
- return graph, hostVals
+ f = open(fn)
+ for line in f.readlines():
+ line = line.split()
+ if len(line) < 1:
+ continue
+ server = cannonicalName(line[0].strip())
+
+ addHost(graph, hostVals, server)
+ for peer in line[1:]:
+ peer = cannonicalName(peer.strip())
+ addHost(graph, hostVals, peer)
+ graphConnect(graph, server, peer)
+ return graph, hostVals
def graphFromTashi(client, transport, graph={}, hostVals={}):
- print 'getting graph'
- if not transport.isOpen():
- transport.open()
- hosts = client.getHosts()
- instances = client.getInstances()
- for instance in instances:
- host = [cannonicalName(h.name) for h in hosts if h.id == instance.hostId]
- if len(host) <1 :
- print 'cant find vm host'
- continue
- host = host[0]
- print 'host is ', host
- addHost(graph, hostVals, host)
- print 'added host'
- vmhost = cannonicalName(instance.name)
- addHost(graph, hostVals, vmhost)
- print 'added vm'
- graphConnect(graph, host, vmhost)
- print 'connected'
- print 'returning from graphFromTashi'
- return graph, hostVals
+ print 'getting graph'
+ if not transport.isOpen():
+ transport.open()
+ hosts = client.getHosts()
+ instances = client.getInstances()
+ for instance in instances:
+ host = [cannonicalName(h.name) for h in hosts if h.id == instance.hostId]
+ if len(host) <1 :
+ print 'cant find vm host'
+ continue
+ host = host[0]
+ print 'host is ', host
+ addHost(graph, hostVals, host)
+ print 'added host'
+ vmhost = cannonicalName(instance.name)
+ addHost(graph, hostVals, vmhost)
+ print 'added vm'
+ graphConnect(graph, host, vmhost)
+ print 'connected'
+ print 'returning from graphFromTashi'
+ return graph, hostVals
def graphToArray(graph, hostVals):
- a = zeros((len(hostVals), len(hostVals)))
- for host in graph.keys():
- if not hostVals.has_key(host):
- continue
- a[hostVals[host], hostVals[host]] = 1
- for peer in graph[host]:
- if not hostVals.has_key(peer):
- continue
- a[hostVals[host], hostVals[peer]] = 1
- a[a==0] = inf
- for i in range(shape(a)[0]):
- a[i,i]=0
- return a
+ a = zeros((len(hostVals), len(hostVals)))
+ for host in graph.keys():
+ if not hostVals.has_key(host):
+ continue
+ a[hostVals[host], hostVals[host]] = 1
+ for peer in graph[host]:
+ if not hostVals.has_key(peer):
+ continue
+ a[hostVals[host], hostVals[peer]] = 1
+ a[a==0] = inf
+ for i in range(shape(a)[0]):
+ a[i,i]=0
+ return a
def shortestPaths(graphArray):
- a = graphArray
- for i in range(math.ceil(math.log(shape(a)[0],2))):
- a = genMul(a,a,min,plus)
- return a
+ a = graphArray
+ for i in range(math.ceil(math.log(shape(a)[0],2))):
+ a = genMul(a,a,min,plus)
+ return a
def plus(A, B):
- return A + B
+ return A + B
def getHopCountMatrix(sourceHosts, destHosts, array, hostVals):
- a = zeros((len(sourceHosts), len(destHosts)))
- a[a==0] = inf
- for i in range(len(sourceHosts)):
- sh = cannonicalName(sourceHosts[i])
- shv = None
- if hostVals.has_key(sh):
- shv = hostVals[sh]
- else:
- print 'host not found', sh
- continue
- for j in range(len(destHosts)):
- dh = cannonicalName(destHosts[j])
- dhv = None
- if hostVals.has_key(dh):
- dhv = hostVals[dh]
- else:
- print 'dest not found', dh
- continue
- print sh, dh, i,j, shv, dhv, array[shv, dhv]
- a[i,j] = array[shv, dhv]
- return a
+ a = zeros((len(sourceHosts), len(destHosts)))
+ a[a==0] = inf
+ for i in range(len(sourceHosts)):
+ sh = cannonicalName(sourceHosts[i])
+ shv = None
+ if hostVals.has_key(sh):
+ shv = hostVals[sh]
+ else:
+ print 'host not found', sh
+ continue
+ for j in range(len(destHosts)):
+ dh = cannonicalName(destHosts[j])
+ dhv = None
+ if hostVals.has_key(dh):
+ dhv = hostVals[dh]
+ else:
+ print 'dest not found', dh
+ continue
+ print sh, dh, i,j, shv, dhv, array[shv, dhv]
+ a[i,j] = array[shv, dhv]
+ return a
class LocalityService:
- def __init__(self):
- (config, configFiles) = getConfig(["Agent"])
- self.port = int(config.get('LocalityService', 'port'))
- print 'Locality service on port %i' % self.port
- self.processor = localityservice.Processor(self)
- self.transport = TSocket.TServerSocket(self.port)
- self.tfactory = TTransport.TBufferedTransportFactory()
- self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
- self.server = TServer.TThreadedServer(self.processor,
- self.transport,
- self.tfactory,
- self.pfactory)
-
- self.hostVals =[]
- self.array = array([[]])
- self.rtime = 0
-
-
- self.fileName = os.path.expanduser(config.get("LocalityService", "staticLayout"))
- (self.client, self.transport) = createClient(config)
-
- self.server.serve()
-
- @synchronizedmethod
- def refresh(self):
- if time.time() - self.rtime < 10:
- return
- g, self.hostVals = graphFromFile(self.fileName)
- try:
- g, self.hostVals = graphFromTashi(self.client, self.transport, g, self.hostVals)
- except e:
- print e
- print 'could not get instance list from cluster manager'
- print 'graph to array'
- a = graphToArray(g, self.hostVals)
- print 'calling shortest paths ', a.shape
- self.array = shortestPaths(a)
- print 'computed shortest paths'
- print self.array
- print self.hostVals
- @synchronizedmethod
- def getHopCountMatrix(self, sourceHosts, destHosts):
- self.refresh()
- print 'getting hop count matrix for', sourceHosts, destHosts
- hcm = getHopCountMatrix(sourceHosts, destHosts, self.array, self.hostVals)
- print hcm
- return hcm
+ def __init__(self):
+ (config, configFiles) = getConfig(["Agent"])
+ self.port = int(config.get('LocalityService', 'port'))
+ print 'Locality service on port %i' % self.port
+ self.processor = localityservice.Processor(self)
+ self.transport = TSocket.TServerSocket(self.port)
+ self.tfactory = TTransport.TBufferedTransportFactory()
+ self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
+ self.server = TServer.TThreadedServer(self.processor,
+ self.transport,
+ self.tfactory,
+ self.pfactory)
+
+ self.hostVals =[]
+ self.array = array([[]])
+ self.rtime = 0
+
+
+ self.fileName = os.path.expanduser(config.get("LocalityService", "staticLayout"))
+ (self.client, self.transport) = createClient(config)
+
+ self.server.serve()
+
+ @synchronizedmethod
+ def refresh(self):
+ if time.time() - self.rtime < 10:
+ return
+ g, self.hostVals = graphFromFile(self.fileName)
+ try:
+ g, self.hostVals = graphFromTashi(self.client, self.transport, g, self.hostVals)
+ except e:
+ print e
+ print 'could not get instance list from cluster manager'
+ print 'graph to array'
+ a = graphToArray(g, self.hostVals)
+ print 'calling shortest paths ', a.shape
+ self.array = shortestPaths(a)
+ print 'computed shortest paths'
+ print self.array
+ print self.hostVals
+ @synchronizedmethod
+ def getHopCountMatrix(self, sourceHosts, destHosts):
+ self.refresh()
+ print 'getting hop count matrix for', sourceHosts, destHosts
+ hcm = getHopCountMatrix(sourceHosts, destHosts, self.array, self.hostVals)
+ print hcm
+ return hcm
def main():
- #XXXstroucki This code has not been updated for several years.
- # It may still be useful as an example.
- import sys
- sys.exit(0);
+ #XXXstroucki This code has not been updated for several years.
+ # It may still be useful as an example.
+ import sys
+ sys.exit(0);
- ls = LocalityService()
+ ls = LocalityService()
if __name__ == "__main__":
- main()
+ main()
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/agents/primitive.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/agents/primitive.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/agents/primitive.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/agents/primitive.py Wed Feb 29 03:23:15 2012
@@ -43,7 +43,7 @@ class Primitive(object):
self.hooks.append(instantiateImplementation(value, config, cmclient, False))
except:
self.log.exception("Failed to load hook %s" % (value))
- self.hosts = {}
+ self.hosts = {}
self.load = {}
self.instances = {}
self.muffle = {}
@@ -62,9 +62,9 @@ class Primitive(object):
for h in self.cm.getHosts():
#XXXstroucki get all hosts here?
#if (h.up == True and h.state == HostState.Normal):
- hosts[ctr] = h
- ctr = ctr + 1
- load[h.id] = []
+ hosts[ctr] = h
+ ctr = ctr + 1
+ load[h.id] = []
load[None] = []
_instances = self.cm.getInstances()
@@ -199,7 +199,7 @@ class Primitive(object):
if myDisk == i.disks[0].uri and i.disks[0].persistent == True:
count += 1
if count > 1:
- minMaxHost = None
+ minMaxHost = None
if (minMaxHost):
# found a host
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/client/tashi-client.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/client/tashi-client.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/client/tashi-client.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/client/tashi-client.py Wed Feb 29 03:23:15 2012
@@ -166,7 +166,7 @@ def getSlots(cores, memory):
continue
countbycores = int((h.cores - h.usedCores) / cores)
countbymemory = int((h.memory - h.usedMemory) / memory)
- count += min(countbycores, countbymemory)
+ count += max(0, min(countbycores, countbymemory))
print "%d" % (count),
print (lambda:"instances", lambda:"instance")[count == 1](),
@@ -186,12 +186,26 @@ def createMany(instance, count):
instances.append(client.createVm(instance))
return instances
+def shutdownMany(basename):
+ return __shutdownOrDestroyMany("shutdown", basename)
+
def destroyMany(basename):
+ return __shutdownOrDestroyMany("destroy", basename)
+
+def __shutdownOrDestroyMany(method, basename):
instances = client.getInstances()
count = 0
for i in instances:
if (i.name.startswith(basename + "-") and i.name[len(basename)+1].isdigit()):
- client.destroyVm(i.id)
+ if method == "shutdown":
+ client.shutdownVm(i.id)
+
+ elif method == "destroy":
+ client.destroyVm(i.id)
+
+ else:
+ raise ValueError("Unknown method")
+
count = count + 1
if (count == 0):
raise ValueError("That is an unused basename")
@@ -213,6 +227,7 @@ extraViews = {
'copyImage': (None, None),
'createVm': (None, ['id', 'hostId', 'name', 'user', 'state', 'disk', 'memory', 'cores']),
'createMany': (createMany, ['id', 'hostId', 'name', 'user', 'state', 'disk', 'memory', 'cores']),
+'shutdownMany': (shutdownMany, None),
'destroyMany': (destroyMany, None),
'getVmLayout': (getVmLayout, ['id', 'name', 'state', 'instances', 'usedMemory', 'memory', 'usedCores', 'cores']),
'getInstances': (None, ['id', 'hostId', 'name', 'user', 'state', 'disk', 'memory', 'cores']),
@@ -225,6 +240,7 @@ argLists = {
'createMany': [('userId', int, getUser, False), ('basename', str, lambda: requiredArg('basename'), True), ('cores', int, lambda: 1, False), ('memory', int, lambda: 128, False), ('disks', parseDisks, lambda: requiredArg('disks'), True), ('nics', parseNics, randomNetwork, False), ('hints', parseHints, lambda: {}, False), ('count', int, lambda: requiredArg('count'), True)],
'shutdownVm': [('instance', checkIid, lambda: requiredArg('instance'), True)],
'destroyVm': [('instance', checkIid, lambda: requiredArg('instance'), True)],
+'shutdownMany': [('basename', str, lambda: requiredArg('basename'), True)],
'destroyMany': [('basename', str, lambda: requiredArg('basename'), True)],
'suspendVm': [('instance', checkIid, lambda: requiredArg('instance'), True)],
'resumeVm': [('instance', checkIid, lambda: requiredArg('instance'), True)],
@@ -250,6 +266,7 @@ convertArgs = {
'createMany': '[Instance(d={"userId":userId,"name":basename,"cores":cores,"memory":memory,"disks":disks,"nics":nics,"hints":hints}), count]',
'shutdownVm': '[instance]',
'destroyVm': '[instance]',
+'shutdownMany': '[basename]',
'destroyMany': '[basename]',
'suspendVm': '[instance]',
'resumeVm': '[instance]',
@@ -268,6 +285,7 @@ description = {
'createMany': 'Utility function that creates many VMs with the same set of parameters',
'shutdownVm': 'Attempts to shutdown a VM nicely',
'destroyVm': 'Immediately destroys a VM -- it is the same as unplugging a physical machine and should be used for non-persistent VMs or when all else fails',
+'shutdownMany': 'Attempts to gracefully shut down a group of VMs created with createMany',
'destroyMany': 'Destroys a group of VMs created with createMany',
'suspendVm': 'Suspends a running VM to disk',
'resumeVm': 'Resumes a suspended VM from disk',
@@ -293,6 +311,7 @@ examples = {
'createMany': ['--basename foobar --disks i386-hardy.qcow2 --count 4'],
'shutdownVm': ['--instance 12345', '--instance foobar'],
'destroyVm': ['--instance 12345', '--instance foobar'],
+'shutdownMany': ['--basename foobar'],
'destroyMany': ['--basename foobar'],
'suspendVm': ['--instance 12345', '--instance foobar'],
'resumeVm': ['--instance 12345', '--instance foobar'],
@@ -563,18 +582,26 @@ def main():
f = getattr(client, function, None)
- if (f is None):
- f = extraViews[function][0]
- if (function in convertArgs):
- fargs = eval(convertArgs[function], globals(), vals)
- else:
- fargs = []
+ try:
+ if (f is None):
+ f = extraViews[function][0]
+ if (function in convertArgs):
+ fargs = eval(convertArgs[function], globals(), vals)
+ else:
+ fargs = []
+ except NameError, e:
+ print e
+ print "Please run tashi-client --examples for syntax information"
+ sys.exit(-1)
+
res = f(*fargs)
if (res != None):
keys = extraViews.get(function, (None, None))[1]
try:
if (type(res) == types.ListType):
makeTable(res, keys)
+ elif (type(res) == types.StringType):
+ print res
else:
makeTable([res], keys)
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/clustermanagerservice.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/clustermanagerservice.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/clustermanagerservice.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/clustermanagerservice.py Wed Feb 29 03:23:15 2012
@@ -19,7 +19,7 @@ import logging
import threading
import time
-from tashi.rpycservices import rpycservices
+from tashi.rpycservices import rpycservices
from tashi.rpycservices.rpyctypes import Errors, InstanceState, HostState, TashiException
from tashi import boolean, ConnectionManager, vmStates, version, scrubString
@@ -126,7 +126,7 @@ class ClusterManagerService(object):
except:
self.log.exception("Invalid host data")
- secondary = ','.join(filter(None, (hostText, instanceText)))
+ secondary = ','.join(filter(None, (hostText, instanceText)))
line = "%s|%s|%s" % (now, text, secondary)
@@ -269,18 +269,19 @@ class ClusterManagerService(object):
# iterate through all VMs I believe are active
for instanceId in self.instanceLastContactTime.keys():
- # Don't query non-running VMs. eg. if a VM
- # is suspended, and has no host, then there's
- # no one to ask
- if instance.state != InstanceState.Running and \
- instance.state != InstanceState.Activating and \
- instance.state != InstanceState.Orphaned:
- continue
# XXXstroucki should lock instance here?
if (self.instanceLastContactTime[instanceId] < (self.__now() - self.allowDecayed)):
try:
instance = self.data.acquireInstance(instanceId)
+ # Don't query non-running VMs. eg. if a VM
+ # is suspended, and has no host, then there's
+ # no one to ask
+ if instance.state != InstanceState.Running and \
+ instance.state != InstanceState.Activating and \
+ instance.state != InstanceState.Orphaned:
+ self.data.releaseInstance(instance)
+ continue
except:
continue
@@ -678,12 +679,12 @@ class ClusterManagerService(object):
self.data.releaseInstance(instance)
return "success"
- def registerHost(self, hostname, memory, cores, version):
- hostId, alreadyRegistered = self.data.registerHost(hostname, memory, cores, version)
- if alreadyRegistered:
- self.log.info("Host %s is already registered, it was updated now" % hostname)
- else:
- self.log.info("A host was registered - hostname: %s, version: %s, memory: %s, cores: %s" % (hostname, version, memory, cores))
+ def registerHost(self, hostname, memory, cores, version):
+ hostId, alreadyRegistered = self.data.registerHost(hostname, memory, cores, version)
+ if alreadyRegistered:
+ self.log.info("Host %s is already registered, it was updated now" % hostname)
+ else:
+ self.log.info("A host was registered - hostname: %s, version: %s, memory: %s, cores: %s" % (hostname, version, memory, cores))
try:
host = self.data.getHost(hostId)
@@ -691,9 +692,9 @@ class ClusterManagerService(object):
except:
self.log.warning("Failed to lookup host %s" % hostId)
- return hostId
+ return hostId
- def unregisterHost(self, hostId):
+ def unregisterHost(self, hostId):
try:
host = self.data.getHost(hostId)
self.__ACCOUNT("CM HOST UNREGISTER", host=host)
@@ -701,9 +702,9 @@ class ClusterManagerService(object):
self.log.warning("Failed to lookup host %s" % hostId)
return
- self.data.unregisterHost(hostId)
- self.log.info("Host %s was unregistered" % hostId)
- return
+ self.data.unregisterHost(hostId)
+ self.log.info("Host %s was unregistered" % hostId)
+ return
# service thread
def __monitorCluster(self):
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/datainterface.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/datainterface.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/datainterface.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/datainterface.py Wed Feb 29 03:23:15 2012
@@ -45,6 +45,9 @@ class DataInterface(object):
def getHost(self, id):
raise NotImplementedError
+
+ def getImages(self):
+ raise NotImplementedError
def getInstances(self):
raise NotImplementedError
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/ldapoverride.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/ldapoverride.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/ldapoverride.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/ldapoverride.py Wed Feb 29 03:23:15 2012
@@ -17,9 +17,12 @@
import subprocess
import time
+#XXXstroucki getImages requires os?
+import os
+from tashi.rpycservices.rpyctypes import Errors, Network, Host, User, Instance, TashiException, LocalImages, DiskConfiguration, NetworkConfiguration
+from tashi.util import stringPartition, boolean, instantiateImplementation, humanReadable
from tashi.rpycservices.rpyctypes import User
from tashi.clustermanager.data import DataInterface
-from tashi.util import instantiateImplementation
class LdapOverride(DataInterface):
def __init__(self, config):
@@ -31,6 +34,7 @@ class LdapOverride(DataInterface):
self.nameKey = config.get("LdapOverride", "nameKey")
self.idKey = config.get("LdapOverride", "idKey")
self.ldapCommand = config.get("LdapOverride", "ldapCommand")
+ self.dfs = instantiateImplementation(config.get("ClusterManager", "dfs"), config)
def registerInstance(self, instance):
return self.baseDataObject.registerInstance(instance)
@@ -68,6 +72,17 @@ class LdapOverride(DataInterface):
def getNetwork(self, id):
return self.baseDataObject.getNetwork(id)
+ def getImages(self):
+ count = 0
+ myList = []
+ for i in self.dfs.list("images"):
+ myFile = self.dfs.getLocalHandle("images/" + i)
+ if os.path.isfile(myFile):
+ image = LocalImages(d={'id':count, 'imageName':i, 'imageSize':humanReadable(self.dfs.stat(myFile)[6])})
+ myList.append(image)
+ count += 1
+ return myList
+
def fetchFromLdap(self):
now = time.time()
if (now - self.lastUserUpdate > self.fetchThreshold):
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/pickled.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/pickled.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/pickled.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/pickled.py Wed Feb 29 03:23:15 2012
@@ -39,22 +39,27 @@ class Pickled(FromConfig):
def cleanInstances(self):
ci = {}
- for i in self.instances.itervalues():
+ for ignore, i in self.instances.items():
i2 = Instance(d=i.__dict__)
ci[i2.id] = i2
return ci
def cleanHosts(self):
ch = {}
- for h in self.hosts.itervalues():
+ for ignore, h in self.hosts.items():
h2 = Host(d=h.__dict__)
ch[h2.id] = h2
return ch
def save(self):
- file = open(self.file, "w")
+ filename = self.file
+ # XXXstroucki could be better
+ tempfile = "%s.new" % filename
+
+ file = open(tempfile, "w")
cPickle.dump((self.cleanHosts(), self.cleanInstances(), self.networks, self.users), file)
file.close()
+ os.rename(tempfile, filename)
def load(self):
if (os.access(self.file, os.F_OK)):
@@ -67,11 +72,11 @@ class Pickled(FromConfig):
self.instances = instances
self.networks = networks
self.users = users
- for i in self.instances.itervalues():
+ for ignore, i in self.instances.items():
if (i.id >= self.maxInstanceId):
self.maxInstanceId = i.id + 1
i._lock = threading.Lock()
self.lockNames[i._lock] = "i%d" % (i.id)
- for h in self.hosts.itervalues():
+ for ignore, h in self.hosts.items():
h._lock = threading.Lock()
self.lockNames[h._lock] = "h%d" % (h.id)
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/sql.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/sql.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/sql.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/clustermanager/data/sql.py Wed Feb 29 03:23:15 2012
@@ -284,16 +284,17 @@ class SQL(DataInterface):
network = Network(d={'id':r[0], 'name':r[1]})
return network
- def getImages(self):
- count = 0
- myList = []
- for i in self.dfs.list("images"):
- myFile = self.dfs.getLocalHandle("images/" + i)
- if os.path.isfile(myFile):
- image = LocalImages(d={'id':count, 'imageName':i, 'imageSize':humanReadable(self.dfs.stat(myFile)[6])})
- myList.append(image)
- count += 1
- return myList
+ def getImages(self):
+ count = 0
+ myList = []
+ for i in self.dfs.list("images"):
+ myFile = self.dfs.getLocalHandle("images/" + i)
+ if os.path.isfile(myFile):
+ image = LocalImages(d={'id':count, 'imageName':i, 'imageSize':humanReadable(self.dfs.stat(myFile)[6])})
+ myList.append(image)
+ count += 1
+
+ return myList
def getUsers(self):
cur = self.executeStatement("SELECT * from users")
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/nodemanagerservice.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/nodemanagerservice.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/nodemanagerservice.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/nodemanagerservice.py Wed Feb 29 03:23:15 2012
@@ -5,15 +5,15 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
-# under the License.
+# under the License.
import logging
import socket
@@ -28,10 +28,10 @@ import tashi
class NodeManagerService(object):
"""RPC handler for the NodeManager
-
- Perhaps in the future I can hide the dfs from the
+
+ Perhaps in the future I can hide the dfs from the
VmControlInterface and do all dfs operations here?"""
-
+
def __init__(self, config, vmm):
self.config = config
self.vmm = vmm
@@ -83,18 +83,18 @@ class NodeManagerService(object):
# start service threads
threading.Thread(target=self.__registerWithClusterManager).start()
threading.Thread(target=self.__statsThread).start()
-
+
def __initAccounting(self):
- self.accountBuffer = []
- self.accountLines = 0
- self.accountingClient = None
- try:
- if (self.accountingHost is not None) and \
- (self.accountingPort is not None):
- self.accountingClient=rpycservices.client(self.accountingHost, self.accountingPort)
- except:
- self.log.exception("Could not init accounting")
-
+ self.accountBuffer = []
+ self.accountLines = 0
+ self.accountingClient = None
+ try:
+ if (self.accountingHost is not None) and \
+ (self.accountingPort is not None):
+ self.accountingClient=rpycservices.client(self.accountingHost, self.accountingPort)
+ except:
+ self.log.exception("Could not init accounting")
+
def __loadVmInfo(self):
try:
self.instances = self.vmm.getInstances()
@@ -135,7 +135,7 @@ class NodeManagerService(object):
#if (toSleep > 0):
#time.sleep(toSleep)
- def __ACCOUNTFLUSH(self):
+ def __ACCOUNTFLUSH(self):
try:
if (self.accountingClient is not None):
self.accountingClient.record(self.accountBuffer)
@@ -145,33 +145,33 @@ class NodeManagerService(object):
self.log.exception("Failed to flush accounting data")
- def __ACCOUNT(self, text, instance=None, host=None):
- now = time.time()
- instanceText = None
- hostText = None
+ def __ACCOUNT(self, text, instance=None, host=None):
+ now = time.time()
+ instanceText = None
+ hostText = None
- if instance is not None:
+ if instance is not None:
try:
- instanceText = 'Instance(%s)' % (instance)
+ instanceText = 'Instance(%s)' % (instance)
except:
self.log.exception("Invalid instance data")
- if host is not None:
+ if host is not None:
try:
- hostText = "Host(%s)" % (host)
+ hostText = "Host(%s)" % (host)
except:
self.log.exception("Invalid host data")
- secondary = ','.join(filter(None, (hostText, instanceText)))
+ secondary = ','.join(filter(None, (hostText, instanceText)))
- line = "%s|%s|%s" % (now, text, secondary)
+ line = "%s|%s|%s" % (now, text, secondary)
- self.accountBuffer.append(line)
- self.accountLines += 1
+ self.accountBuffer.append(line)
+ self.accountLines += 1
# XXXstroucki think about force flush every so often
- if (self.accountLines > 0):
- self.__ACCOUNTFLUSH()
+ if (self.accountLines > 0):
+ self.__ACCOUNTFLUSH()
# service thread function
@@ -213,14 +213,14 @@ class NodeManagerService(object):
self.log.exception('statsThread threw an exception')
time.sleep(self.statsInterval)
- def __registerHost(self):
- hostname = socket.gethostname()
+ def __registerHost(self):
+ hostname = socket.gethostname()
# populate some defaults
# XXXstroucki: I think it's better if the nodemanager fills these in properly when registering with the clustermanager
memory = 0
cores = 0
version = "empty"
- #self.cm.registerHost(hostname, memory, cores, version)
+ #self.cm.registerHost(hostname, memory, cores, version)
def __getInstance(self, vmId):
instance = self.instances.get(vmId, None)
@@ -235,7 +235,7 @@ class NodeManagerService(object):
raise TashiException(d={'errno':Errors.NoSuchVmId,'msg':"There is no vmId %d on this host" % (vmId)})
-
+
# remote
# Called from VMM to update self.instances
# but only changes are Exited, MigrateTrans and Running
@@ -252,11 +252,11 @@ class NodeManagerService(object):
# make a note of mismatch, but go on.
# the VMM should know best
self.log.warning('VM state was %s, call indicated %s' % (vmStates[instance.state], vmStates[old]))
-
+
instance.state = cur
self.__ACCOUNT("NM VM STATE CHANGE", instance=instance)
-
+
newInst = Instance(d={'state':cur})
success = lambda: None
# send the state change up to the CM
@@ -278,8 +278,8 @@ class NodeManagerService(object):
def createInstance(self, instance):
vmId = instance.vmId
self.instances[vmId] = instance
-
-
+
+
# remote
def instantiateVm(self, instance):
self.__ACCOUNT("NM VM INSTANTIATE", instance=instance)
@@ -291,7 +291,7 @@ class NodeManagerService(object):
return vmId
except:
self.log.exception("Failed to start instance")
-
+
# remote
def suspendVm(self, vmId, destination):
instance = self.__getInstance(vmId)
@@ -300,7 +300,7 @@ class NodeManagerService(object):
instance.state = InstanceState.Suspending
self.instances[vmId] = instance
threading.Thread(target=self.vmm.suspendVm, args=(vmId, destination)).start()
-
+
# called by resumeVm as thread
def __resumeVmHelper(self, instance, name):
self.vmm.resumeVmHelper(instance, name)
@@ -323,7 +323,7 @@ class NodeManagerService(object):
self.log.exception('resumeVm failed')
raise TashiException(d={'errno':Errors.UnableToResume,'msg':"resumeVm failed on the node manager"})
return instance.vmId
-
+
# remote
def prepReceiveVm(self, instance, source):
self.__ACCOUNT("NM VM MIGRATE RECEIVE PREP")
@@ -353,7 +353,7 @@ class NodeManagerService(object):
self.instances[vmId] = instance
threading.Thread(target=self.__migrateVmHelper, args=(instance, target, transportCookie)).start()
return
-
+
# called by receiveVm as thread
# XXXstroucki migrate in?
def __receiveVmHelper(self, instance, transportCookie):
@@ -429,4 +429,3 @@ class NodeManagerService(object):
# remote
def liveCheck(self):
return "alive"
-
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/qemu.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/qemu.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/qemu.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/qemu.py Wed Feb 29 03:23:15 2012
@@ -132,10 +132,14 @@ class Qemu(VmControlInterface):
def __getHostPids(self):
"""Utility function to get a list of system PIDs that match the QEMU_BIN specified (/proc/nnn/exe)"""
pids = []
+ real_bin = self.QEMU_BIN
+ while os.path.islink(real_bin):
+ real_bin = os.readlink(self.QEMU_BIN)
+
for f in os.listdir("/proc"):
try:
bin = os.readlink("/proc/%s/exe" % (f))
- if (bin.find(self.QEMU_BIN) != -1):
+ if (bin.find(real_bin) != -1):
pids.append(int(f))
except Exception:
pass
@@ -206,7 +210,7 @@ class Qemu(VmControlInterface):
if self.scratchVg is not None:
log.info("Removing any scratch for %s" % (name))
cmd = "/sbin/lvremove --quiet -f %s" % self.scratchVg
- result = subprocess.Popen(cmd.split(), executable=cmd.split()[0], stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), close_fds=True).wait()
+ result = subprocess.Popen(cmd.split(), executable=cmd.split()[0], stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), close_fds=True).wait()
except:
log.warning("Problem cleaning scratch volumes")
pass
@@ -500,8 +504,14 @@ class Qemu(VmControlInterface):
nicModel = self.__stripSpace(nicModel)
nicString = ""
+ nicNetworks = {}
for i in range(0, len(instance.nics)):
+ # Don't allow more than one interface per vlan
nic = instance.nics[i]
+ if nicNetworks.has_key(nic.network):
+ continue
+ nicNetworks[nic.network] = True
+
nicString = nicString + "-net nic,macaddr=%s,model=%s,vlan=%d -net tap,ifname=%s%d.%d,vlan=%d,script=/etc/qemu-ifup.%d " % (nic.mac, nicModel, nic.network, self.ifPrefix, instance.id, i, nic.network, nic.network)
# ACPI
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py Wed Feb 29 03:23:15 2012
@@ -28,8 +28,8 @@ class VmControlInterface(object):
self.dfs = dfs
self.nm = nm
- def getInstances(self):
- """Will return a dict of instances by vmId to the caller"""
+ def getInstances(self):
+ """Will return a dict of instances by vmId to the caller"""
raise NotImplementedError
def instantiateVm(self, instance):
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/xenpv.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/xenpv.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/xenpv.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/nodemanager/vmcontrol/xenpv.py Wed Feb 29 03:23:15 2012
@@ -168,7 +168,7 @@ class XenPV(VmControlInterface, threadin
vmType = hints.get('vmtype', self.defaultVmType)
print 'starting vm with type: ', vmType
- disk0 = 'tap:%s' % self.disktype
+ disk0 = 'tap:%s' % self.disktype
diskU = 'xvda1'
try:
@@ -313,10 +313,10 @@ extra='xencons=tty'
@synchronizedmethod
def instantiateVm(self, instance):
- try:
- disktype = self.config.get('XenPV', 'defaultDiskType')
- except:
- disktype = 'vhd'
+ try:
+ disktype = self.config.get('XenPV', 'defaultDiskType')
+ except:
+ disktype = 'vhd'
# FIXME: this is NOT the right way to get out hostId
self.hostId = instance.hostId
@@ -346,6 +346,8 @@ extra='xencons=tty'
instance.disks[i].local = newdisk
+ # XXXstroucki if ever supporting multiple nics,
+ # ensure more than one isn't put on the same network.
fn = self.createXenConfig(name,
instance.disks[0].local,
instance.nics[0].mac,
Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/util.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/util.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/util.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/util.py Wed Feb 29 03:23:15 2012
@@ -218,17 +218,25 @@ def getConfig(additionalNames=[], additi
raise Exception("No config file could be found: %s" % (str(allLocations)))
return (config, configFiles)
+def __getShellFn():
+ if sys.version_info < (2, 6, 1):
+ from IPython.Shell import IPShellEmbed
+ return IPShellEmbed()
+ else:
+ import IPython
+ return IPython.embed()
+
def debugConsole(globalDict):
"""A debugging console that optionally uses pysh"""
def realDebugConsole(globalDict):
try :
import atexit
- from IPython.Shell import IPShellEmbed
+ shellfn = __getShellFn()
def resetConsole():
# XXXpipe: make input window sane
(stdin, stdout) = os.popen2("reset")
stdout.read()
- dbgshell = IPShellEmbed()
+ dbgshell = shellfn()
atexit.register(resetConsole)
dbgshell(local_ns=globalDict, global_ns=globalDict)
except Exception:
Modified: incubator/tashi/branches/stroucki-rpyc/src/utils/nmd.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/utils/nmd.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/utils/nmd.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/utils/nmd.py Wed Feb 29 03:23:15 2012
@@ -16,9 +16,10 @@
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
- * under the License.
+ * under the License.
*/
"""
+# XXXstroucki: why not use something like supervise instead?
import os
import sys
@@ -36,81 +37,81 @@ LOG_FILE="/var/log/nodemanager.log"
*/
"""
def make_invincible():
- # dependent on linux
- try:
- oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
- except IOError:
- pass
- else:
- os.write(oom_adj_fd, "-17\n")
- os.close(oom_adj_fd)
+ # dependent on linux
+ try:
+ oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
+ except IOError:
+ pass
+ else:
+ os.write(oom_adj_fd, "-17\n")
+ os.close(oom_adj_fd)
"""
/* This function resets (on Linux!) its oom scoring to default
*/
"""
def make_vulnerable():
- # dependent on linux
- try:
- oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
- except IOError:
- pass
- else:
- os.write(oom_adj_fd, "0\n")
- os.close(oom_adj_fd)
+ # dependent on linux
+ try:
+ oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
+ except IOError:
+ pass
+ else:
+ os.write(oom_adj_fd, "0\n")
+ os.close(oom_adj_fd)
def main(argv=None):
- if argv is None:
- argv = sys.argv
- try:
- opts, args = getopt.getopt(argv[1:], "f", ["foreground"])
- except getopt.GetoptError, err:
- # print help information and exit:
- print str(err) # will print something like "option -a not recognized"
- # usage()
- return 2
- foreground = False
- for o, a in opts:
- if o in ("-f", "--foreground"):
- foreground = True
- else:
- assert False, "unhandled option"
- if foreground == False:
- pid = os.fork();
- if pid != 0:
- os._exit(0)
- os.close(0)
- os.close(1)
- os.close(2)
-
- # adjust oom preference
- make_invincible()
-
- # configure environment of children
- env = {"PYTHONPATH":TASHI_PATH+"/src"}
- while True:
- pid = os.fork();
- if pid == 0:
- # child
- # nodemanagers are vulnerable, not the supervisor
- make_vulnerable()
- if foreground == False:
- try:
- lfd = os.open(LOG_FILE, os.O_APPEND|os.O_CREAT|os.O_WRONLY)
- except IOError:
- lfd = os.open("/dev/null", os.O_WRONLY)
- # make this fd stdout and stderr
- os.dup2(lfd, 1)
- os.dup2(lfd, 2)
- # close stdin
- os.close(0)
- os.chdir(TASHI_PATH)
- os.execle("./bin/nodemanager.py", "./bin/nodemanager.py", env)
- os._exit(-1)
- # sleep before checking child status
- time.sleep(SLEEP_INTERVAL)
- os.waitpid(pid, 0)
- return 0
+ if argv is None:
+ argv = sys.argv
+ try:
+ opts, args = getopt.getopt(argv[1:], "f", ["foreground"])
+ except getopt.GetoptError, err:
+ # print help information and exit:
+ print str(err) # will print something like "option -a not recognized"
+ # usage()
+ return 2
+ foreground = False
+ for o, a in opts:
+ if o in ("-f", "--foreground"):
+ foreground = True
+ else:
+ assert False, "unhandled option"
+ if foreground == False:
+ pid = os.fork();
+ if pid != 0:
+ os._exit(0)
+ os.close(0)
+ os.close(1)
+ os.close(2)
+
+ # adjust oom preference
+ make_invincible()
+
+ # configure environment of children
+ env = {"PYTHONPATH":TASHI_PATH+"/src"}
+ while True:
+ pid = os.fork();
+ if pid == 0:
+ # child
+ # nodemanagers are vulnerable, not the supervisor
+ make_vulnerable()
+ if foreground == False:
+ try:
+ lfd = os.open(LOG_FILE, os.O_APPEND|os.O_CREAT|os.O_WRONLY)
+ except IOError:
+ lfd = os.open("/dev/null", os.O_WRONLY)
+ # make this fd stdout and stderr
+ os.dup2(lfd, 1)
+ os.dup2(lfd, 2)
+ # close stdin
+ os.close(0)
+ os.chdir(TASHI_PATH)
+ os.execle("./bin/nodemanager.py", "./bin/nodemanager.py", env)
+ os._exit(-1)
+ # sleep before checking child status
+ time.sleep(SLEEP_INTERVAL)
+ os.waitpid(pid, 0)
+ return 0
if __name__ == "__main__":
- sys.exit(main())
+ sys.exit(main())
Modified: incubator/tashi/branches/stroucki-rpyc/src/zoni/client/zoni-cli.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/zoni/client/zoni-cli.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/zoni/client/zoni-cli.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/zoni/client/zoni-cli.py Wed Feb 29 03:23:15 2012
@@ -327,11 +327,11 @@ def main():
if (options.nodeName):
cmdargs["sys_id"] = options.nodeName
- if (options.numCores or options.clockSpeed or options.numMemory or options.numProcs or options.cpuFlags) and not options.showResources:
- usage = "MISSING OPTION: When specifying hardware parameters, you need the -s or --showResources switch"
- print usage
- parser.print_help()
- exit()
+ if (options.numCores or options.clockSpeed or options.numMemory or options.numProcs or options.cpuFlags) and not options.showResources:
+ usage = "MISSING OPTION: When specifying hardware parameters, you need the -s or --showResources switch"
+ print usage
+ parser.print_help()
+ exit()
if options.getResources:
print "ALL resources"
Modified: incubator/tashi/branches/stroucki-rpyc/src/zoni/extra/util.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/zoni/extra/util.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/zoni/extra/util.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/zoni/extra/util.py Wed Feb 29 03:23:15 2012
@@ -19,6 +19,7 @@
#
import os
+import sys
import string
import ConfigParser
import time
@@ -218,19 +219,25 @@ def createKey(name):
return val
-
+def __getShellFn():
+ if sys.version_info < (2, 6, 1):
+ from IPython.Shell import IPShellEmbed
+ return IPShellEmbed()
+ else:
+ import IPython
+ return IPython.embed()
def debugConsole(globalDict):
"""A debugging console that optionally uses pysh"""
def realDebugConsole(globalDict):
try :
import atexit
- from IPython.Shell import IPShellEmbed
+ shellfn = __getShellFn()
def resetConsole():
# XXXpipe: make input window sane
(stdin, stdout) = os.popen2("reset")
stdout.read()
- dbgshell = IPShellEmbed()
+ dbgshell = shellfn()
atexit.register(resetConsole)
dbgshell(local_ns=globalDict, global_ns=globalDict)
except Exception:
Modified: incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/delldrac.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/delldrac.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/delldrac.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/delldrac.py Wed Feb 29 03:23:15 2012
@@ -147,7 +147,7 @@ class dellDrac(SystemManagementInterface
for val in fout.readlines():
if "OK" in val:
code = 1
- if "CURRENTLY POWER-OFF" in val:
+ if "CURRENTLY POWER-OFF" in val:
self.log.info("Hardware already power off : %s", self.hostname)
code = 1
if code < 1:
@@ -171,7 +171,7 @@ class dellDrac(SystemManagementInterface
for val in fout.readlines():
if "OK" in val:
code = 1
- if "CURRENTLY POWER-OFF" in val:
+ if "CURRENTLY POWER-OFF" in val:
self.log.info("Hardware already power off : %s", self.hostname)
code = 1
if code < 1:
Modified: incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/dellswitch.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/dellswitch.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/dellswitch.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/dellswitch.py Wed Feb 29 03:23:15 2012
@@ -54,7 +54,7 @@ class HwDellSwitch(HwSwitchInterface):
pass
- def setVerbose(self, verbose):
+ def setVerbose(self, verbose):
self.verbose = verbose
def __login(self):
Modified: incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/hpswitch.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/hpswitch.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/hpswitch.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/zoni/hardware/hpswitch.py Wed Feb 29 03:23:15 2012
@@ -74,10 +74,10 @@ class HwHPSwitch(HwSwitchInterface):
child.sendline(cmd)
opt = child.expect(["Confirm(.*)", "No save(.*)", pexpect.EOF, pexpect.TIMEOUT])
if opt == 0:
- print "saving to flash"
- child.sendline("y\n")
+ print "saving to flash"
+ child.sendline("y\n")
if opt == 1:
- print "no save needed"
+ print "no save needed"
child.sendline('exit')
child.terminate()
Modified: incubator/tashi/branches/stroucki-rpyc/src/zoni/install/db/zoniDbSetup.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/zoni/install/db/zoniDbSetup.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/zoni/install/db/zoniDbSetup.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/zoni/install/db/zoniDbSetup.py Wed Feb 29 03:23:15 2012
@@ -27,8 +27,8 @@ try:
import optparse
import getpass
except ImportError, e:
- print "Module not installed : %s" % e
- exit()
+ print "Module not installed : %s" % e
+ exit()
a = os.path.join("../")
@@ -406,7 +406,7 @@ def execQuery(conn, query):
def entryExists(conn, table, col, checkVal):
query = "select * from " + table + " where " + col + " = '" + checkVal + "'"
- r = execQuery(conn, query)
+ r = execQuery(conn, query)
res = r.fetchall()
if len(res) > 0:
return (1, res)
@@ -415,5 +415,5 @@ def entryExists(conn, table, col, checkV
if __name__ == "__main__":
- main()
+ main()
Modified: incubator/tashi/branches/stroucki-rpyc/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py Wed Feb 29 03:23:15 2012
@@ -79,5 +79,5 @@ def main():
if __name__ == "__main__":
- main()
+ main()