You are viewing a plain text version of this content. The canonical link for it is here.
Posted to tashi-commits@incubator.apache.org by st...@apache.org on 2012/02/29 03:23:18 UTC

svn commit: r1294960 [2/3] - in /incubator/tashi/branches: stroucki-accounting/ stroucki-accounting/src/tashi/ stroucki-accounting/src/tashi/accounting/ stroucki-accounting/src/tashi/agents/ stroucki-accounting/src/tashi/client/ stroucki-accounting/src...

Modified: incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/nodemanagerservice.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/nodemanagerservice.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/nodemanagerservice.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/nodemanagerservice.py Wed Feb 29 03:23:15 2012
@@ -5,15 +5,15 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
-# under the License.    
+# under the License.
 
 import logging
 import socket
@@ -28,10 +28,10 @@ import tashi
 
 class NodeManagerService(object):
 	"""RPC handler for the NodeManager
-	   
-	   Perhaps in the future I can hide the dfs from the 
+
+	   Perhaps in the future I can hide the dfs from the
 	   VmControlInterface and do all dfs operations here?"""
-	
+
 	def __init__(self, config, vmm):
 		self.config = config
 		self.vmm = vmm
@@ -83,18 +83,18 @@ class NodeManagerService(object):
 		# start service threads
 		threading.Thread(target=self.__registerWithClusterManager).start()
 		threading.Thread(target=self.__statsThread).start()
-	
+
 	def __initAccounting(self):
-                self.accountBuffer = []
-                self.accountLines = 0
-                self.accountingClient = None
-                try:
-                        if (self.accountingHost is not None) and \
-                                    (self.accountingPort is not None):
-                                self.accountingClient=rpycservices.client(self.accountingHost, self.accountingPort)
-                except:
-                        self.log.exception("Could not init accounting")
-			
+		self.accountBuffer = []
+		self.accountLines = 0
+		self.accountingClient = None
+		try:
+			if (self.accountingHost is not None) and \
+						(self.accountingPort is not None):
+				self.accountingClient=rpycservices.client(self.accountingHost, self.accountingPort)
+		except:
+			self.log.exception("Could not init accounting")
+
 	def __loadVmInfo(self):
 		try:
 			self.instances = self.vmm.getInstances()
@@ -135,7 +135,7 @@ class NodeManagerService(object):
 		#if (toSleep > 0):
 			#time.sleep(toSleep)
 
-        def __ACCOUNTFLUSH(self):
+	def __ACCOUNTFLUSH(self):
 		try:
 			if (self.accountingClient is not None):
 				self.accountingClient.record(self.accountBuffer)
@@ -145,33 +145,33 @@ class NodeManagerService(object):
 			self.log.exception("Failed to flush accounting data")
 
 
-        def __ACCOUNT(self, text, instance=None, host=None):
-                now = time.time()
-                instanceText = None
-                hostText = None
+	def __ACCOUNT(self, text, instance=None, host=None):
+		now = time.time()
+		instanceText = None
+		hostText = None
 
-                if instance is not None:
+		if instance is not None:
 			try:
-                        	instanceText = 'Instance(%s)' % (instance)
+				instanceText = 'Instance(%s)' % (instance)
 			except:
 				self.log.exception("Invalid instance data")
 
-                if host is not None:
+		if host is not None:
 			try:
-                        	hostText = "Host(%s)" % (host)
+				hostText = "Host(%s)" % (host)
 			except:
 				self.log.exception("Invalid host data")
 
-                secondary = ','.join(filter(None, (hostText, instanceText)))
+		secondary = ','.join(filter(None, (hostText, instanceText)))
 
-                line = "%s|%s|%s" % (now, text, secondary)
+		line = "%s|%s|%s" % (now, text, secondary)
 
-                self.accountBuffer.append(line)
-                self.accountLines += 1
+		self.accountBuffer.append(line)
+		self.accountLines += 1
 
 		# XXXstroucki think about force flush every so often
-                if (self.accountLines > 0):
-                        self.__ACCOUNTFLUSH()
+		if (self.accountLines > 0):
+			self.__ACCOUNTFLUSH()
 
 
 	# service thread function
@@ -213,14 +213,14 @@ class NodeManagerService(object):
 				self.log.exception('statsThread threw an exception')
 			time.sleep(self.statsInterval)
 
-        def __registerHost(self):
-                hostname = socket.gethostname()
+	def __registerHost(self):
+		hostname = socket.gethostname()
 		# populate some defaults
 		# XXXstroucki: I think it's better if the nodemanager fills these in properly when registering with the clustermanager
 		memory = 0
 		cores = 0
 		version = "empty"
-                #self.cm.registerHost(hostname, memory, cores, version)
+		#self.cm.registerHost(hostname, memory, cores, version)
 
 	def __getInstance(self, vmId):
 		instance = self.instances.get(vmId, None)
@@ -235,7 +235,7 @@ class NodeManagerService(object):
 
 
 		raise TashiException(d={'errno':Errors.NoSuchVmId,'msg':"There is no vmId %d on this host" % (vmId)})
-	
+
 	# remote
 	# Called from VMM to update self.instances
 	# but only changes are Exited, MigrateTrans and Running
@@ -252,11 +252,11 @@ class NodeManagerService(object):
 			# make a note of mismatch, but go on.
 			# the VMM should know best
 			self.log.warning('VM state was %s, call indicated %s' % (vmStates[instance.state], vmStates[old]))
-                        
+
 		instance.state = cur
 
 		self.__ACCOUNT("NM VM STATE CHANGE", instance=instance)
-			      
+
 		newInst = Instance(d={'state':cur})
 		success = lambda: None
 		# send the state change up to the CM
@@ -278,8 +278,8 @@ class NodeManagerService(object):
 	def createInstance(self, instance):
 		vmId = instance.vmId
 		self.instances[vmId] = instance
-		
-	
+
+
 	# remote
 	def instantiateVm(self, instance):
 		self.__ACCOUNT("NM VM INSTANTIATE", instance=instance)
@@ -291,7 +291,7 @@ class NodeManagerService(object):
 			return vmId
 		except:
 			self.log.exception("Failed to start instance")
-	
+
 	# remote
 	def suspendVm(self, vmId, destination):
 		instance = self.__getInstance(vmId)
@@ -300,7 +300,7 @@ class NodeManagerService(object):
 		instance.state = InstanceState.Suspending
 		self.instances[vmId] = instance
 		threading.Thread(target=self.vmm.suspendVm, args=(vmId, destination)).start()
-	
+
 	# called by resumeVm as thread
 	def __resumeVmHelper(self, instance, name):
 		self.vmm.resumeVmHelper(instance, name)
@@ -323,7 +323,7 @@ class NodeManagerService(object):
 			self.log.exception('resumeVm failed')
 			raise TashiException(d={'errno':Errors.UnableToResume,'msg':"resumeVm failed on the node manager"})
 		return instance.vmId
-	
+
 	# remote
 	def prepReceiveVm(self, instance, source):
 		self.__ACCOUNT("NM VM MIGRATE RECEIVE PREP")
@@ -353,7 +353,7 @@ class NodeManagerService(object):
 		self.instances[vmId] = instance
 		threading.Thread(target=self.__migrateVmHelper, args=(instance, target, transportCookie)).start()
 		return
-	
+
 	# called by receiveVm as thread
 	# XXXstroucki migrate in?
 	def __receiveVmHelper(self, instance, transportCookie):
@@ -429,4 +429,3 @@ class NodeManagerService(object):
 	# remote
 	def liveCheck(self):
 		return "alive"
-	

Modified: incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/qemu.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/qemu.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/qemu.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/qemu.py Wed Feb 29 03:23:15 2012
@@ -210,7 +210,7 @@ class Qemu(VmControlInterface):
 					if self.scratchVg is not None:
 						log.info("Removing any scratch for %s" % (name))
 						cmd = "/sbin/lvremove --quiet -f %s" % self.scratchVg
-    						result = subprocess.Popen(cmd.split(), executable=cmd.split()[0], stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), close_fds=True).wait()
+						result = subprocess.Popen(cmd.split(), executable=cmd.split()[0], stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), close_fds=True).wait()
 				except:
 					log.warning("Problem cleaning scratch volumes")
 					pass

Modified: incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py Wed Feb 29 03:23:15 2012
@@ -28,8 +28,8 @@ class VmControlInterface(object):
 		self.dfs = dfs
 		self.nm = nm
 
-        def getInstances(self):
-                """Will return a dict of instances by vmId to the caller"""
+	def getInstances(self):
+		"""Will return a dict of instances by vmId to the caller"""
 		raise NotImplementedError
 	
 	def instantiateVm(self, instance):

Modified: incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/xenpv.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/xenpv.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/xenpv.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/tashi/nodemanager/vmcontrol/xenpv.py Wed Feb 29 03:23:15 2012
@@ -168,7 +168,7 @@ class XenPV(VmControlInterface, threadin
 		vmType = hints.get('vmtype', self.defaultVmType)
 		print 'starting vm with type: ', vmType
 
-                disk0 = 'tap:%s' % self.disktype
+		disk0 = 'tap:%s' % self.disktype
 		diskU = 'xvda1'
 
 		try:
@@ -313,10 +313,10 @@ extra='xencons=tty'
 	@synchronizedmethod
 	def instantiateVm(self, instance):
 
-                try:
-                   disktype = self.config.get('XenPV', 'defaultDiskType')
-                except:
-                   disktype = 'vhd'
+		try:
+			disktype = self.config.get('XenPV', 'defaultDiskType')
+		except:
+			disktype = 'vhd'
 
 		# FIXME: this is NOT the right way to get out hostId
 		self.hostId = instance.hostId

Modified: incubator/tashi/branches/stroucki-dropthrift/src/tashi/util.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/tashi/util.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/tashi/util.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/tashi/util.py Wed Feb 29 03:23:15 2012
@@ -210,17 +210,25 @@ def getConfig(additionalNames=[], additi
 		raise Exception("No config file could be found: %s" % (str(allLocations)))
 	return (config, configFiles)
 
+def __getShellFn():
+	if sys.version_info < (2, 6, 1):
+		from IPython.Shell import IPShellEmbed
+		return IPShellEmbed()
+	else:
+		import IPython
+		return IPython.embed()
+
 def debugConsole(globalDict):
 	"""A debugging console that optionally uses pysh"""
 	def realDebugConsole(globalDict):
 		try :
 			import atexit
-			from IPython.Shell import IPShellEmbed
+			shellfn = __getShellFn()
 			def resetConsole():
 # XXXpipe: make input window sane
 				(stdin, stdout) = os.popen2("reset")
 				stdout.read()
-			dbgshell = IPShellEmbed()
+			dbgshell = shellfn()
 			atexit.register(resetConsole)
 			dbgshell(local_ns=globalDict, global_ns=globalDict)
 		except Exception:

Modified: incubator/tashi/branches/stroucki-dropthrift/src/utils/nmd.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/utils/nmd.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/utils/nmd.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/utils/nmd.py Wed Feb 29 03:23:15 2012
@@ -16,9 +16,10 @@
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
- * under the License.    
+ * under the License.	 
  */
 """
+# XXXstroucki: why not use something like supervise instead?
 
 import os
 import sys
@@ -36,81 +37,81 @@ LOG_FILE="/var/log/nodemanager.log"
  */
 """
 def make_invincible():
-   # dependent on linux
-   try:
-      oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
-   except IOError:
-      pass
-   else:
-      os.write(oom_adj_fd, "-17\n")
-      os.close(oom_adj_fd)
+	# dependent on linux
+	try:
+		oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
+	except IOError:
+		pass
+	else:
+		os.write(oom_adj_fd, "-17\n")
+		os.close(oom_adj_fd)
 
 """
 /* This function resets (on Linux!) its oom scoring to default
  */
 """
 def make_vulnerable():
-   # dependent on linux
-   try:
-      oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
-   except IOError:
-      pass
-   else:
-      os.write(oom_adj_fd, "0\n")
-      os.close(oom_adj_fd)
+	# dependent on linux
+	try:
+		oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
+	except IOError:
+		pass
+	else:
+		os.write(oom_adj_fd, "0\n")
+		os.close(oom_adj_fd)
 
 def main(argv=None):
-   if argv is None:
-      argv = sys.argv
-   try:
-      opts, args = getopt.getopt(argv[1:], "f", ["foreground"])
-   except getopt.GetoptError, err:
-      # print help information and exit:
-      print str(err) # will print something like "option -a not recognized"
-      # usage()
-      return 2
-   foreground = False
-   for o, a in opts:
-      if o in ("-f", "--foreground"):
-         foreground = True
-      else:
-         assert False, "unhandled option"
-   if foreground == False:
-      pid = os.fork();
-      if pid != 0:
-         os._exit(0)
-      os.close(0)
-      os.close(1)
-      os.close(2)
-
-   # adjust oom preference
-   make_invincible()
-
-   # configure environment of children
-   env = {"PYTHONPATH":TASHI_PATH+"/src"}
-   while True:
-      pid = os.fork();
-      if pid == 0:
-         # child
-         # nodemanagers are vulnerable, not the supervisor
-         make_vulnerable()
-         if foreground == False:
-            try:
-               lfd = os.open(LOG_FILE, os.O_APPEND|os.O_CREAT|os.O_WRONLY)
-            except IOError:
-               lfd = os.open("/dev/null", os.O_WRONLY)
-            # make this fd stdout and stderr
-            os.dup2(lfd, 1)
-            os.dup2(lfd, 2)
-            # close stdin
-            os.close(0)
-         os.chdir(TASHI_PATH)
-         os.execle("./bin/nodemanager.py", "./bin/nodemanager.py", env)
-         os._exit(-1)
-      # sleep before checking child status
-      time.sleep(SLEEP_INTERVAL)
-      os.waitpid(pid, 0)
-   return 0
+	if argv is None:
+		argv = sys.argv
+	try:
+		opts, args = getopt.getopt(argv[1:], "f", ["foreground"])
+	except getopt.GetoptError, err:
+		# print help information and exit:
+		print str(err) # will print something like "option -a not recognized"
+		# usage()
+		return 2
+	foreground = False
+	for o, a in opts:
+		if o in ("-f", "--foreground"):
+			foreground = True
+		else:
+			assert False, "unhandled option"
+	if foreground == False:
+		pid = os.fork();
+		if pid != 0:
+			os._exit(0)
+		os.close(0)
+		os.close(1)
+		os.close(2)
+
+	# adjust oom preference
+	make_invincible()
+
+	# configure environment of children
+	env = {"PYTHONPATH":TASHI_PATH+"/src"}
+	while True:
+		pid = os.fork();
+		if pid == 0:
+			# child
+			# nodemanagers are vulnerable, not the supervisor
+			make_vulnerable()
+			if foreground == False:
+				try:
+					lfd = os.open(LOG_FILE, os.O_APPEND|os.O_CREAT|os.O_WRONLY)
+				except IOError:
+					lfd = os.open("/dev/null", os.O_WRONLY)
+				# make this fd stdout and stderr
+				os.dup2(lfd, 1)
+				os.dup2(lfd, 2)
+				# close stdin
+				os.close(0)
+			os.chdir(TASHI_PATH)
+			os.execle("./bin/nodemanager.py", "./bin/nodemanager.py", env)
+			os._exit(-1)
+		# sleep before checking child status
+		time.sleep(SLEEP_INTERVAL)
+		os.waitpid(pid, 0)
+	return 0
 
 if __name__ == "__main__":
-   sys.exit(main())
+	sys.exit(main())

Modified: incubator/tashi/branches/stroucki-dropthrift/src/zoni/client/zoni-cli.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/zoni/client/zoni-cli.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/zoni/client/zoni-cli.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/zoni/client/zoni-cli.py Wed Feb 29 03:23:15 2012
@@ -327,11 +327,11 @@ def main():
 	if (options.nodeName):
 		cmdargs["sys_id"] = options.nodeName
 
-	if 	(options.numCores or options.clockSpeed or options.numMemory or options.numProcs or options.cpuFlags) and not options.showResources:
-			usage = "MISSING OPTION: When specifying hardware parameters, you need the -s or --showResources switch"
-			print usage
-			parser.print_help()	
-			exit()
+	if (options.numCores or options.clockSpeed or options.numMemory or options.numProcs or options.cpuFlags) and not options.showResources:
+		usage = "MISSING OPTION: When specifying hardware parameters, you need the -s or --showResources switch"
+		print usage
+		parser.print_help()	
+		exit()
 
 	if options.getResources:
 		print "ALL resources"

Modified: incubator/tashi/branches/stroucki-dropthrift/src/zoni/extra/util.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/zoni/extra/util.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/zoni/extra/util.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/zoni/extra/util.py Wed Feb 29 03:23:15 2012
@@ -19,6 +19,7 @@
 #
 
 import os
+import sys
 import string
 import ConfigParser
 import time
@@ -218,19 +219,25 @@ def createKey(name):
 	return val
 	
 
-
+def __getShellFn():
+	if sys.version_info < (2, 6, 1):
+		from IPython.Shell import IPShellEmbed
+		return IPShellEmbed()
+	else:
+		import IPython
+		return IPython.embed()
 
 def debugConsole(globalDict):
 	"""A debugging console that optionally uses pysh"""
 	def realDebugConsole(globalDict):
 		try :
 			import atexit
-			from IPython.Shell import IPShellEmbed
+			shellfn = __getShellFn()
 			def resetConsole():
 # XXXpipe: make input window sane
 				(stdin, stdout) = os.popen2("reset")
 				stdout.read()
-			dbgshell = IPShellEmbed()
+			dbgshell = shellfn()
 			atexit.register(resetConsole)
 			dbgshell(local_ns=globalDict, global_ns=globalDict)
 		except Exception:

Modified: incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/delldrac.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/delldrac.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/delldrac.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/delldrac.py Wed Feb 29 03:23:15 2012
@@ -147,7 +147,7 @@ class dellDrac(SystemManagementInterface
 		for val in fout.readlines():
 			if "OK" in val:
 				code = 1
- 			if "CURRENTLY POWER-OFF" in val:
+			if "CURRENTLY POWER-OFF" in val:
 				self.log.info("Hardware already power off : %s", self.hostname)
 				code = 1
 		if code < 1:
@@ -171,7 +171,7 @@ class dellDrac(SystemManagementInterface
 		for val in fout.readlines():
 			if "OK" in val:
 				code = 1
- 			if "CURRENTLY POWER-OFF" in val:
+			if "CURRENTLY POWER-OFF" in val:
 				self.log.info("Hardware already power off : %s", self.hostname)
 				code = 1
 		if code < 1:

Modified: incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/dellswitch.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/dellswitch.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/dellswitch.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/dellswitch.py Wed Feb 29 03:23:15 2012
@@ -54,7 +54,7 @@ class HwDellSwitch(HwSwitchInterface):
 			pass
 
 
- 	def setVerbose(self, verbose):
+	def setVerbose(self, verbose):
 		self.verbose = verbose
 
 	def __login(self):

Modified: incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/hpswitch.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/hpswitch.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/hpswitch.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/zoni/hardware/hpswitch.py Wed Feb 29 03:23:15 2012
@@ -74,10 +74,10 @@ class HwHPSwitch(HwSwitchInterface):
 		child.sendline(cmd)
 		opt = child.expect(["Confirm(.*)", "No save(.*)", pexpect.EOF, pexpect.TIMEOUT])
 		if opt == 0:
-				print "saving to flash"
-				child.sendline("y\n")
+			print "saving to flash"
+			child.sendline("y\n")
 		if opt == 1:
-				print "no save needed"
+			print "no save needed"
 		child.sendline('exit')
 		child.terminate()
 

Modified: incubator/tashi/branches/stroucki-dropthrift/src/zoni/install/db/zoniDbSetup.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/zoni/install/db/zoniDbSetup.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/zoni/install/db/zoniDbSetup.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/zoni/install/db/zoniDbSetup.py Wed Feb 29 03:23:15 2012
@@ -27,8 +27,8 @@ try:
 	import optparse
 	import getpass
 except ImportError, e:
-        print "Module not installed : %s" % e
-        exit()
+	print "Module not installed : %s" % e
+	exit()
 
 
 a = os.path.join("../")
@@ -406,7 +406,7 @@ def execQuery(conn, query):
 
 def entryExists(conn, table, col, checkVal):
 	query = "select * from " + table + " where " + col + " = '" + checkVal + "'"
- 	r = execQuery(conn, query)
+	r = execQuery(conn, query)
 	res = r.fetchall()
 	if len(res) > 0:
 		return (1, res)
@@ -415,5 +415,5 @@ def entryExists(conn, table, col, checkV
 
 
 if __name__ == "__main__":
-    main()
+	main()
 

Modified: incubator/tashi/branches/stroucki-dropthrift/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-dropthrift/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-dropthrift/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py (original)
+++ incubator/tashi/branches/stroucki-dropthrift/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py Wed Feb 29 03:23:15 2012
@@ -79,5 +79,5 @@ def main():
 
 
 if __name__ == "__main__":
-    main()
+	main()
 

Propchange: incubator/tashi/branches/stroucki-registration/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Feb 29 03:23:15 2012
@@ -7,6 +7,7 @@
 /incubator/tashi/branches/stroucki-slotsbug:1244839-1245041
 /incubator/tashi/branches/stroucki-tashi10:1294393-1294727
 /incubator/tashi/branches/stroucki-tashi11:1294393-1294415
+/incubator/tashi/branches/stroucki-tashi2:1294935-1294944
 /incubator/tashi/branches/stroucki-tashi8:1294393-1294427
 /incubator/tashi/branches/zoni-dev/trunk:1034098-1177646
-/incubator/tashi/trunk:1241775-1294840
+/incubator/tashi/trunk:1241775-1294945

Modified: incubator/tashi/branches/stroucki-registration/Makefile
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/Makefile?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/Makefile (original)
+++ incubator/tashi/branches/stroucki-registration/Makefile Wed Feb 29 03:23:15 2012
@@ -128,11 +128,11 @@ rmzoni-cli:
 	if test -e /usr/local/bin/zoni; then echo Removing zoni...; rm /usr/local/bin/zoni; fi
 
 ## for now only print warnings having to do with bad indentation. pylint doesn't make it easy to enable only 1,2 checks
-disabled_warnings=$(shell pylint --list-msgs|grep :W0| awk -F: '{ORS=","; if ($$2 != "W0311" && $$2 != "W0312"){ print $$2}}')
+disabled_warnings=$(shell pylint --list-msgs|grep :W0| awk -F: '{ORS=","; if ($$2 != "W0311" && $$2 != "W0312"){ print $$2}}')",F0401"
 pysrc=$(shell find . \! -path '*gen-py*' \! -path '*services*' \! -path '*messagingthrift*' \! -name '__init__.py' -name "*.py")
 tidy: $(addprefix tidyfile/,$(pysrc))
-	@echo Insuring .py files are nice and tidy!
+	@echo Ensured .py files are nice and tidy!
 
 tidyfile/%: %
 	@echo Checking tidy for $*
-	pylint --report=no --disable-msg-cat=R,C,E --disable-msg=$(disabled_warnings) --indent-string="\t" $* 2> /dev/null; 
+	pylint --report=no --disable=R,C,E --disable=$(disabled_warnings) --indent-string="\t" $* 2> /dev/null; 

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/accounting/accountingservice.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/accounting/accountingservice.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/accounting/accountingservice.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/accounting/accountingservice.py Wed Feb 29 03:23:15 2012
@@ -5,15 +5,15 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
-# under the License.    
+# under the License.
 
 import logging
 import threading
@@ -22,43 +22,43 @@ import time
 from tashi import createClient
 
 class AccountingService(object):
-        """RPC service for the Accounting service"""
-        
-        def __init__(self, config):
-            self.log = logging.getLogger(__name__)
-            self.log.setLevel(logging.INFO)
-
-	    self.config = config
-
-	    self.pollSleep = None
-
-	    # XXXstroucki new python has fallback values
-	    try:
-		    self.pollSleep = self.config.getint("AccountingService", "pollSleep")
-	    except:
-		    pass
+	"""RPC service for the Accounting service"""
 
-	    if self.pollSleep is None:
-		    self.pollSleep = 600
+	def __init__(self, config):
+		self.log = logging.getLogger(__name__)
+		self.log.setLevel(logging.INFO)
 
-            self.cm = createClient(config)
-            threading.Thread(target=self.__start).start()
+		self.config = config
+
+		self.pollSleep = None
+
+		# XXXstroucki new python has fallback values
+		try:
+			self.pollSleep = self.config.getint("AccountingService", "pollSleep")
+		except:
+			pass
+
+		if self.pollSleep is None:
+			self.pollSleep = 600
+
+		self.cm = createClient(config)
+		threading.Thread(target=self.__start).start()
 
 	# remote
-        def record(self, strings):
-            for string in strings:
-                self.log.info("Remote: %s" % (string))
-
-        def __start(self):
-            while True:
-                try:
-                    instances = self.cm.getInstances()
-                    for instance in instances:
-                        # XXXstroucki this currently duplicates what the CM was doing.
-                        self.log.info('Accounting: id %d host %d vmId %d user %d cores %d memory %d' % (instance.id, instance.hostId, instance.vmId, instance.userId, instance.cores, instance.memory))
-                except:
-                    self.log.warning("Accounting iteration failed")
-
-                        
-                # wait to do the next iteration
-                time.sleep(self.pollSleep)
+	def record(self, strings):
+		for string in strings:
+			self.log.info("Remote: %s" % (string))
+
+	def __start(self):
+		while True:
+			try:
+				instances = self.cm.getInstances()
+				for instance in instances:
+					# XXXstroucki this currently duplicates what the CM was doing.
+					self.log.info('Accounting: id %d host %d vmId %d user %d cores %d memory %d' % (instance.id, instance.hostId, instance.vmId, instance.userId, instance.cores, instance.memory))
+			except:
+				self.log.warning("Accounting iteration failed")
+
+
+			# wait to do the next iteration
+			time.sleep(self.pollSleep)

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/agents/locality-server.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/agents/locality-server.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/agents/locality-server.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/agents/locality-server.py Wed Feb 29 03:23:15 2012
@@ -6,15 +6,15 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
-# under the License. 
+# under the License.
 
 # this module provides a service to locate servers that are close
 # to a VM. Uses all-pairs shortest path algorithm. Need to provide
@@ -45,184 +45,184 @@ from scipy import *
 
 cnames = {}
 def cannonicalName(hn):
-       try:
-               if cnames.has_key(hn):
-                       return cnames[hn]
-               r = socket.gethostbyname_ex(hn)[0]
-               cnames[hn] = r
-               return r
-       except:
-               return hn
+	try:
+		if cnames.has_key(hn):
+			return cnames[hn]
+		r = socket.gethostbyname_ex(hn)[0]
+		cnames[hn] = r
+		return r
+	except:
+		return hn
 
 # define matrix multiplication that can be used to calculate a min-plus
 # distance product
 def genMul(A, B, add, mult):
-       '''generalized matrix multiplication'''
-       C = zeros((shape(A)[0], shape(B)[1]))
-       for i in range(shape(C)[0]):
-               for j in range(shape(C)[1]):
-                       C[i,j] = add(mult(A[i,:], B[:,j]))
-       return C
+	'''generalized matrix multiplication'''
+	C = zeros((shape(A)[0], shape(B)[1]))
+	for i in range(shape(C)[0]):
+		for j in range(shape(C)[1]):
+			C[i,j] = add(mult(A[i,:], B[:,j]))
+	return C
 
 def addHost(graph, hostVals, host):
-       if not graph.has_key(host):
-               graph[host] = []
-       if not hostVals.has_key(host):
-               hostVals[host] = len(hostVals)
+	if not graph.has_key(host):
+		graph[host] = []
+	if not hostVals.has_key(host):
+		hostVals[host] = len(hostVals)
 
 def graphConnect(graph, h1, h2):
-       if not h1 in graph[h2]:
-               graph[h2].append(h1)
-       if not h2 in graph[h1]:
-               graph[h1].append(h2)
+	if not h1 in graph[h2]:
+		graph[h2].append(h1)
+	if not h2 in graph[h1]:
+		graph[h1].append(h2)
 
 def graphFromFile(fn = 'serverLayout', graph = {}, hostVals = {}):
-       f = open(fn)
-       for line in f.readlines():
-               line = line.split()
-               if len(line) < 1:
-                       continue
-               server = cannonicalName(line[0].strip())
-
-               addHost(graph, hostVals, server)
-               for peer in line[1:]:
-                       peer = cannonicalName(peer.strip())
-                       addHost(graph, hostVals, peer)
-                       graphConnect(graph, server, peer)
-       return graph, hostVals
+	f = open(fn)
+	for line in f.readlines():
+		line = line.split()
+		if len(line) < 1:
+			continue
+		server = cannonicalName(line[0].strip())
+
+		addHost(graph, hostVals, server)
+		for peer in line[1:]:
+			peer = cannonicalName(peer.strip())
+			addHost(graph, hostVals, peer)
+			graphConnect(graph, server, peer)
+	return graph, hostVals
 
 def graphFromTashi(client, transport, graph={}, hostVals={}):
-       print 'getting graph'
-       if not transport.isOpen():
-               transport.open()
-       hosts = client.getHosts()
-       instances = client.getInstances()
-       for instance in instances:
-               host = [cannonicalName(h.name) for h in hosts if h.id == instance.hostId]
-               if len(host) <1 :
-                       print 'cant find vm host'
-                       continue
-               host = host[0]
-               print 'host is ', host
-               addHost(graph, hostVals, host)
-               print 'added host'
-               vmhost = cannonicalName(instance.name)
-               addHost(graph, hostVals, vmhost)
-               print 'added vm'
-               graphConnect(graph, host, vmhost)
-               print 'connected'
-       print 'returning from graphFromTashi'
-       return graph, hostVals
+	print 'getting graph'
+	if not transport.isOpen():
+		transport.open()
+	hosts = client.getHosts()
+	instances = client.getInstances()
+	for instance in instances:
+		host = [cannonicalName(h.name) for h in hosts if h.id == instance.hostId]
+		if len(host) <1 :
+			print 'cant find vm host'
+			continue
+		host = host[0]
+		print 'host is ', host
+		addHost(graph, hostVals, host)
+		print 'added host'
+		vmhost = cannonicalName(instance.name)
+		addHost(graph, hostVals, vmhost)
+		print 'added vm'
+		graphConnect(graph, host, vmhost)
+		print 'connected'
+	print 'returning from graphFromTashi'
+	return graph, hostVals
 
 
 
 def graphToArray(graph, hostVals):
-       a = zeros((len(hostVals), len(hostVals)))
-       for host in graph.keys():
-               if not hostVals.has_key(host):
-                       continue
-               a[hostVals[host], hostVals[host]] = 1
-               for peer in graph[host]:
-                       if not hostVals.has_key(peer):
-                               continue
-                       a[hostVals[host], hostVals[peer]] = 1
-       a[a==0] = inf
-       for i in range(shape(a)[0]):
-               a[i,i]=0
-       return a
+	a = zeros((len(hostVals), len(hostVals)))
+	for host in graph.keys():
+		if not hostVals.has_key(host):
+			continue
+		a[hostVals[host], hostVals[host]] = 1
+		for peer in graph[host]:
+			if not hostVals.has_key(peer):
+				continue
+			a[hostVals[host], hostVals[peer]] = 1
+	a[a==0] = inf
+	for i in range(shape(a)[0]):
+		a[i,i]=0
+	return a
 
 def shortestPaths(graphArray):
-       a = graphArray
-       for i in range(math.ceil(math.log(shape(a)[0],2))):
-               a = genMul(a,a,min,plus)
-       return a
+	a = graphArray
+	for i in range(math.ceil(math.log(shape(a)[0],2))):
+		a = genMul(a,a,min,plus)
+	return a
 
 def plus(A, B):
-       return A + B
+	return A + B
 
 
 def getHopCountMatrix(sourceHosts, destHosts, array, hostVals):
-       a = zeros((len(sourceHosts), len(destHosts)))
-       a[a==0] = inf
-       for i in range(len(sourceHosts)):
-               sh = cannonicalName(sourceHosts[i])
-               shv = None
-               if hostVals.has_key(sh):
-                       shv = hostVals[sh]
-               else:
-                       print 'host not found', sh
-                       continue
-               for j in range(len(destHosts)):
-                       dh = cannonicalName(destHosts[j])
-                       dhv = None
-                       if hostVals.has_key(dh):
-                               dhv = hostVals[dh]
-                       else:
-                               print 'dest not found', dh
-                               continue
-                       print sh, dh, i,j, shv, dhv, array[shv, dhv]
-                       a[i,j] = array[shv, dhv]
-       return a
+	a = zeros((len(sourceHosts), len(destHosts)))
+	a[a==0] = inf
+	for i in range(len(sourceHosts)):
+		sh = cannonicalName(sourceHosts[i])
+		shv = None
+		if hostVals.has_key(sh):
+			shv = hostVals[sh]
+		else:
+			print 'host not found', sh
+			continue
+		for j in range(len(destHosts)):
+			dh = cannonicalName(destHosts[j])
+			dhv = None
+			if hostVals.has_key(dh):
+				dhv = hostVals[dh]
+			else:
+				print 'dest not found', dh
+				continue
+			print sh, dh, i,j, shv, dhv, array[shv, dhv]
+			a[i,j] = array[shv, dhv]
+	return a
 
 
 class LocalityService:
-       def __init__(self):
-               (config, configFiles) = getConfig(["Agent"])
-               self.port = int(config.get('LocalityService', 'port'))
-               print 'Locality service on port %i' % self.port
-               self.processor = localityservice.Processor(self)
-               self.transport = TSocket.TServerSocket(self.port)
-               self.tfactory = TTransport.TBufferedTransportFactory()
-               self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
-               self.server = TServer.TThreadedServer(self.processor,
-                                                     self.transport,
-                                                     self.tfactory,
-                                                     self.pfactory)
-
-               self.hostVals =[]
-               self.array = array([[]])
-               self.rtime = 0
-
-
-               self.fileName = os.path.expanduser(config.get("LocalityService", "staticLayout"))
-               (self.client, self.transport) = createClient(config)
-
-               self.server.serve()
-
-       @synchronizedmethod
-       def refresh(self):
-               if time.time() - self.rtime < 10:
-                       return
-               g, self.hostVals = graphFromFile(self.fileName)
-               try:
-                       g, self.hostVals = graphFromTashi(self.client, self.transport, g, self.hostVals)
-               except e:
-                       print e
-                       print 'could not get instance list from cluster manager'
-               print 'graph to array'
-               a = graphToArray(g, self.hostVals)
-               print 'calling shortest paths ', a.shape
-               self.array = shortestPaths(a)
-               print 'computed shortest paths'
-               print self.array
-               print self.hostVals
-       @synchronizedmethod
-       def getHopCountMatrix(self, sourceHosts, destHosts):
-               self.refresh()
-               print 'getting hop count matrix for', sourceHosts, destHosts
-               hcm =  getHopCountMatrix(sourceHosts, destHosts, self.array, self.hostVals)
-               print hcm
-               return hcm
+	def __init__(self):
+		(config, configFiles) = getConfig(["Agent"])
+		self.port = int(config.get('LocalityService', 'port'))
+		print 'Locality service on port %i' % self.port
+		self.processor = localityservice.Processor(self)
+		self.transport = TSocket.TServerSocket(self.port)
+		self.tfactory = TTransport.TBufferedTransportFactory()
+		self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
+		self.server = TServer.TThreadedServer(self.processor,
+							self.transport,
+							self.tfactory,
+							self.pfactory)
+                
+		self.hostVals =[]
+		self.array = array([[]])
+		self.rtime = 0
+
+
+		self.fileName = os.path.expanduser(config.get("LocalityService", "staticLayout"))
+		(self.client, self.transport) = createClient(config)
+
+		self.server.serve()
+
+	@synchronizedmethod
+	def refresh(self):
+		if time.time() - self.rtime < 10:
+			return
+		g, self.hostVals = graphFromFile(self.fileName)
+		try:
+			g, self.hostVals = graphFromTashi(self.client, self.transport, g, self.hostVals)
+		except e:
+			print e
+			print 'could not get instance list from cluster manager'
+		print 'graph to array'
+		a = graphToArray(g, self.hostVals)
+		print 'calling shortest paths ', a.shape
+		self.array = shortestPaths(a)
+		print 'computed shortest paths'
+		print self.array
+		print self.hostVals
+	@synchronizedmethod
+	def getHopCountMatrix(self, sourceHosts, destHosts):
+		self.refresh()
+		print 'getting hop count matrix for', sourceHosts, destHosts
+		hcm =  getHopCountMatrix(sourceHosts, destHosts, self.array, self.hostVals)
+		print hcm
+		return hcm
 
 
 def main():
 
-       #XXXstroucki This code has not been updated for several years.
-       # It may still be useful as an example.
-       import sys
-       sys.exit(0);
+	#XXXstroucki This code has not been updated for several years.
+	# It may still be useful as an example.
+	import sys
+	sys.exit(0);
 
-       ls = LocalityService()
+	ls = LocalityService()
 
 if __name__ == "__main__":
-       main()
+	main()

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/agents/primitive.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/agents/primitive.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/agents/primitive.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/agents/primitive.py Wed Feb 29 03:23:15 2012
@@ -43,7 +43,7 @@ class Primitive(object):
 					self.hooks.append(instantiateImplementation(value, config, cmclient, False))
 				except:
 					self.log.exception("Failed to load hook %s" % (value))
-	        self.hosts = {}
+		self.hosts = {}
 		self.load = {}
 		self.instances = {}
 		self.muffle = {}
@@ -62,9 +62,9 @@ class Primitive(object):
 		for h in self.cm.getHosts():
 			#XXXstroucki get all hosts here?
 			#if (h.up == True and h.state == HostState.Normal):
-				hosts[ctr] = h
-				ctr = ctr + 1
-				load[h.id] = []
+			hosts[ctr] = h
+			ctr = ctr + 1
+			load[h.id] = []
 			
 		load[None] = []
 		_instances = self.cm.getInstances()
@@ -199,7 +199,7 @@ class Primitive(object):
 							if myDisk == i.disks[0].uri and i.disks[0].persistent == True:
 								count += 1
 						if count > 1:
-								minMaxHost = None
+							minMaxHost = None
 
 			if (minMaxHost):
 				# found a host

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/clustermanagerservice.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/clustermanagerservice.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/clustermanagerservice.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/clustermanagerservice.py Wed Feb 29 03:23:15 2012
@@ -19,7 +19,7 @@ import logging
 import threading
 import time
 
-from tashi.rpycservices import rpycservices             
+from tashi.rpycservices import rpycservices	     
 from tashi.rpycservices.rpyctypes import Errors, InstanceState, HostState, TashiException
 from tashi import boolean, ConnectionManager, vmStates, version, scrubString
 
@@ -126,7 +126,7 @@ class ClusterManagerService(object):
 			except:
 				self.log.exception("Invalid host data")
 
-                secondary = ','.join(filter(None, (hostText, instanceText)))
+		secondary = ','.join(filter(None, (hostText, instanceText)))
 
 		line = "%s|%s|%s" % (now, text, secondary)
 
@@ -756,12 +756,12 @@ class ClusterManagerService(object):
 		self.data.releaseInstance(instance)
 		return "success"
 
-        def registerHost(self, hostname, memory, cores, version):
-                hostId, alreadyRegistered = self.data.registerHost(hostname, memory, cores, version)
-                if alreadyRegistered:
-                        self.log.info("Host %s is already registered, it was updated now" % hostname)
-                else:
-                        self.log.info("A host was registered - hostname: %s, version: %s, memory: %s, cores: %s" % (hostname, version, memory, cores))
+	def registerHost(self, hostname, memory, cores, version):
+		hostId, alreadyRegistered = self.data.registerHost(hostname, memory, cores, version)
+		if alreadyRegistered:
+			self.log.info("Host %s is already registered, it was updated now" % hostname)
+		else:
+			self.log.info("A host was registered - hostname: %s, version: %s, memory: %s, cores: %s" % (hostname, version, memory, cores))
 
 		try:
 			host = self.data.getHost(hostId)
@@ -769,9 +769,9 @@ class ClusterManagerService(object):
 		except:
 			self.log.warning("Failed to lookup host %s" % hostId)
 
-                return hostId
+		return hostId
 
-        def unregisterHost(self, hostId):
+	def unregisterHost(self, hostId):
 		try:
 			host = self.data.getHost(hostId)
 			self.__ACCOUNT("CM HOST UNREGISTER", host=host)
@@ -779,9 +779,9 @@ class ClusterManagerService(object):
 			self.log.warning("Failed to lookup host %s" % hostId)
 			return
 
-                self.data.unregisterHost(hostId)
-                self.log.info("Host %s was unregistered" % hostId)
-                return
+		self.data.unregisterHost(hostId)
+		self.log.info("Host %s was unregistered" % hostId)
+		return
 
 	# service thread
 	def __monitorCluster(self):

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/data/ldapoverride.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/data/ldapoverride.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/data/ldapoverride.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/data/ldapoverride.py Wed Feb 29 03:23:15 2012
@@ -72,16 +72,16 @@ class LdapOverride(DataInterface):
 	def getNetwork(self, id):
 		return self.baseDataObject.getNetwork(id)
 
-        def getImages(self):
-                count = 0
-                myList = []
-                for i in self.dfs.list("images"):
-                        myFile = self.dfs.getLocalHandle("images/" + i)
-                        if os.path.isfile(myFile):
-                                image = LocalImages(d={'id':count, 'imageName':i, 'imageSize':humanReadable(self.dfs.stat(myFile)[6])})
-                                myList.append(image)
-                                count += 1
-                return myList
+	def getImages(self):
+		count = 0
+		myList = []
+		for i in self.dfs.list("images"):
+			myFile = self.dfs.getLocalHandle("images/" + i)
+			if os.path.isfile(myFile):
+				image = LocalImages(d={'id':count, 'imageName':i, 'imageSize':humanReadable(self.dfs.stat(myFile)[6])})
+				myList.append(image)
+				count += 1
+		return myList
 
 	def fetchFromLdap(self):
 		now = time.time()

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/data/sql.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/data/sql.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/data/sql.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/clustermanager/data/sql.py Wed Feb 29 03:23:15 2012
@@ -284,16 +284,17 @@ class SQL(DataInterface):
 		network = Network(d={'id':r[0], 'name':r[1]})
 		return network
 
-        def getImages(self):
-                count = 0
-                myList = []
-                for i in self.dfs.list("images"):
-                        myFile = self.dfs.getLocalHandle("images/" + i)
-                        if os.path.isfile(myFile):
-                                image = LocalImages(d={'id':count, 'imageName':i, 'imageSize':humanReadable(self.dfs.stat(myFile)[6])})
-                                myList.append(image)
-                                count += 1
-                return myList
+	def getImages(self):
+		count = 0
+		myList = []
+		for i in self.dfs.list("images"):
+			myFile = self.dfs.getLocalHandle("images/" + i)
+			if os.path.isfile(myFile):
+				image = LocalImages(d={'id':count, 'imageName':i, 'imageSize':humanReadable(self.dfs.stat(myFile)[6])})
+				myList.append(image)
+				count += 1
+
+		return myList
 	
 	def getUsers(self):
 		cur = self.executeStatement("SELECT * from users")

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/nodemanagerservice.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/nodemanagerservice.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/nodemanagerservice.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/nodemanagerservice.py Wed Feb 29 03:23:15 2012
@@ -5,15 +5,15 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
-# under the License.    
+# under the License.
 
 import logging
 import socket
@@ -28,10 +28,10 @@ import tashi
 
 class NodeManagerService(object):
 	"""RPC handler for the NodeManager
-	   
-	   Perhaps in the future I can hide the dfs from the 
+
+	   Perhaps in the future I can hide the dfs from the
 	   VmControlInterface and do all dfs operations here?"""
-	
+
 	def __init__(self, config, vmm):
 		self.config = config
 		self.vmm = vmm
@@ -83,18 +83,18 @@ class NodeManagerService(object):
 		# start service threads
 		threading.Thread(target=self.__registerWithClusterManager).start()
 		threading.Thread(target=self.__statsThread).start()
-	
+
 	def __initAccounting(self):
-                self.accountBuffer = []
-                self.accountLines = 0
-                self.accountingClient = None
-                try:
-                        if (self.accountingHost is not None) and \
-                                    (self.accountingPort is not None):
-                                self.accountingClient=rpycservices.client(self.accountingHost, self.accountingPort)
-                except:
-                        self.log.exception("Could not init accounting")
-			
+		self.accountBuffer = []
+		self.accountLines = 0
+		self.accountingClient = None
+		try:
+			if (self.accountingHost is not None) and \
+						(self.accountingPort is not None):
+				self.accountingClient=rpycservices.client(self.accountingHost, self.accountingPort)
+		except:
+			self.log.exception("Could not init accounting")
+
 	def __loadVmInfo(self):
 		try:
 			self.instances = self.vmm.getInstances()
@@ -135,7 +135,7 @@ class NodeManagerService(object):
 		#if (toSleep > 0):
 			#time.sleep(toSleep)
 
-        def __ACCOUNTFLUSH(self):
+	def __ACCOUNTFLUSH(self):
 		try:
 			if (self.accountingClient is not None):
 				self.accountingClient.record(self.accountBuffer)
@@ -145,33 +145,33 @@ class NodeManagerService(object):
 			self.log.exception("Failed to flush accounting data")
 
 
-        def __ACCOUNT(self, text, instance=None, host=None):
-                now = time.time()
-                instanceText = None
-                hostText = None
+	def __ACCOUNT(self, text, instance=None, host=None):
+		now = time.time()
+		instanceText = None
+		hostText = None
 
-                if instance is not None:
+		if instance is not None:
 			try:
-                        	instanceText = 'Instance(%s)' % (instance)
+				instanceText = 'Instance(%s)' % (instance)
 			except:
 				self.log.exception("Invalid instance data")
 
-                if host is not None:
+		if host is not None:
 			try:
-                        	hostText = "Host(%s)" % (host)
+				hostText = "Host(%s)" % (host)
 			except:
 				self.log.exception("Invalid host data")
 
-                secondary = ','.join(filter(None, (hostText, instanceText)))
+		secondary = ','.join(filter(None, (hostText, instanceText)))
 
-                line = "%s|%s|%s" % (now, text, secondary)
+		line = "%s|%s|%s" % (now, text, secondary)
 
-                self.accountBuffer.append(line)
-                self.accountLines += 1
+		self.accountBuffer.append(line)
+		self.accountLines += 1
 
 		# XXXstroucki think about force flush every so often
-                if (self.accountLines > 0):
-                        self.__ACCOUNTFLUSH()
+		if (self.accountLines > 0):
+			self.__ACCOUNTFLUSH()
 
 
 	# service thread function
@@ -213,14 +213,14 @@ class NodeManagerService(object):
 				self.log.exception('statsThread threw an exception')
 			time.sleep(self.statsInterval)
 
-        def __registerHost(self):
-                hostname = socket.gethostname()
+	def __registerHost(self):
+		hostname = socket.gethostname()
 		# populate some defaults
 		# XXXstroucki: I think it's better if the nodemanager fills these in properly when registering with the clustermanager
 		memory = 0
 		cores = 0
 		version = "empty"
-                #self.cm.registerHost(hostname, memory, cores, version)
+		#self.cm.registerHost(hostname, memory, cores, version)
 
 	def __getInstance(self, vmId):
 		instance = self.instances.get(vmId, None)
@@ -235,7 +235,7 @@ class NodeManagerService(object):
 
 
 		raise TashiException(d={'errno':Errors.NoSuchVmId,'msg':"There is no vmId %d on this host" % (vmId)})
-	
+
 	# remote
 	# Called from VMM to update self.instances
 	# but only changes are Exited, MigrateTrans and Running
@@ -252,11 +252,11 @@ class NodeManagerService(object):
 			# make a note of mismatch, but go on.
 			# the VMM should know best
 			self.log.warning('VM state was %s, call indicated %s' % (vmStates[instance.state], vmStates[old]))
-                        
+
 		instance.state = cur
 
 		self.__ACCOUNT("NM VM STATE CHANGE", instance=instance)
-			      
+
 		newInst = Instance(d={'state':cur})
 		success = lambda: None
 		# send the state change up to the CM
@@ -278,8 +278,8 @@ class NodeManagerService(object):
 	def createInstance(self, instance):
 		vmId = instance.vmId
 		self.instances[vmId] = instance
-		
-	
+
+
 	# remote
 	def instantiateVm(self, instance):
 		self.__ACCOUNT("NM VM INSTANTIATE", instance=instance)
@@ -291,7 +291,7 @@ class NodeManagerService(object):
 			return vmId
 		except:
 			self.log.exception("Failed to start instance")
-	
+
 	# remote
 	def suspendVm(self, vmId, destination):
 		instance = self.__getInstance(vmId)
@@ -300,7 +300,7 @@ class NodeManagerService(object):
 		instance.state = InstanceState.Suspending
 		self.instances[vmId] = instance
 		threading.Thread(target=self.vmm.suspendVm, args=(vmId, destination)).start()
-	
+
 	# called by resumeVm as thread
 	def __resumeVmHelper(self, instance, name):
 		self.vmm.resumeVmHelper(instance, name)
@@ -323,7 +323,7 @@ class NodeManagerService(object):
 			self.log.exception('resumeVm failed')
 			raise TashiException(d={'errno':Errors.UnableToResume,'msg':"resumeVm failed on the node manager"})
 		return instance.vmId
-	
+
 	# remote
 	def prepReceiveVm(self, instance, source):
 		self.__ACCOUNT("NM VM MIGRATE RECEIVE PREP")
@@ -353,7 +353,7 @@ class NodeManagerService(object):
 		self.instances[vmId] = instance
 		threading.Thread(target=self.__migrateVmHelper, args=(instance, target, transportCookie)).start()
 		return
-	
+
 	# called by receiveVm as thread
 	# XXXstroucki migrate in?
 	def __receiveVmHelper(self, instance, transportCookie):
@@ -429,4 +429,3 @@ class NodeManagerService(object):
 	# remote
 	def liveCheck(self):
 		return "alive"
-	

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/qemu.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/qemu.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/qemu.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/qemu.py Wed Feb 29 03:23:15 2012
@@ -210,7 +210,7 @@ class Qemu(VmControlInterface):
 					if self.scratchVg is not None:
 						log.info("Removing any scratch for %s" % (name))
 						cmd = "/sbin/lvremove --quiet -f %s" % self.scratchVg
-    						result = subprocess.Popen(cmd.split(), executable=cmd.split()[0], stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), close_fds=True).wait()
+						result = subprocess.Popen(cmd.split(), executable=cmd.split()[0], stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), close_fds=True).wait()
 				except:
 					log.warning("Problem cleaning scratch volumes")
 					pass

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/vmcontrolinterface.py Wed Feb 29 03:23:15 2012
@@ -28,8 +28,8 @@ class VmControlInterface(object):
 		self.dfs = dfs
 		self.nm = nm
 
-        def getInstances(self):
-                """Will return a dict of instances by vmId to the caller"""
+	def getInstances(self):
+		"""Will return a dict of instances by vmId to the caller"""
 		raise NotImplementedError
 	
 	def instantiateVm(self, instance):

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/xenpv.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/xenpv.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/xenpv.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/nodemanager/vmcontrol/xenpv.py Wed Feb 29 03:23:15 2012
@@ -168,7 +168,7 @@ class XenPV(VmControlInterface, threadin
 		vmType = hints.get('vmtype', self.defaultVmType)
 		print 'starting vm with type: ', vmType
 
-                disk0 = 'tap:%s' % self.disktype
+		disk0 = 'tap:%s' % self.disktype
 		diskU = 'xvda1'
 
 		try:
@@ -313,10 +313,10 @@ extra='xencons=tty'
 	@synchronizedmethod
 	def instantiateVm(self, instance):
 
-                try:
-                   disktype = self.config.get('XenPV', 'defaultDiskType')
-                except:
-                   disktype = 'vhd'
+		try:
+			disktype = self.config.get('XenPV', 'defaultDiskType')
+		except:
+			disktype = 'vhd'
 
 		# FIXME: this is NOT the right way to get out hostId
 		self.hostId = instance.hostId

Modified: incubator/tashi/branches/stroucki-registration/src/tashi/util.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/tashi/util.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/tashi/util.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/tashi/util.py Wed Feb 29 03:23:15 2012
@@ -218,17 +218,25 @@ def getConfig(additionalNames=[], additi
 		raise Exception("No config file could be found: %s" % (str(allLocations)))
 	return (config, configFiles)
 
+def __getShellFn():
+	if sys.version_info < (2, 6, 1):
+		from IPython.Shell import IPShellEmbed
+		return IPShellEmbed()
+	else:
+		import IPython
+		return IPython.embed()
+
 def debugConsole(globalDict):
 	"""A debugging console that optionally uses pysh"""
 	def realDebugConsole(globalDict):
 		try :
 			import atexit
-			from IPython.Shell import IPShellEmbed
+			shellfn = __getShellFn()
 			def resetConsole():
 # XXXpipe: make input window sane
 				(stdin, stdout) = os.popen2("reset")
 				stdout.read()
-			dbgshell = IPShellEmbed()
+			dbgshell = shellfn()
 			atexit.register(resetConsole)
 			dbgshell(local_ns=globalDict, global_ns=globalDict)
 		except Exception:

Modified: incubator/tashi/branches/stroucki-registration/src/utils/nmd.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/utils/nmd.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/utils/nmd.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/utils/nmd.py Wed Feb 29 03:23:15 2012
@@ -16,9 +16,10 @@
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
- * under the License.    
+ * under the License.	 
  */
 """
+# XXXstroucki: why not use something like supervise instead?
 
 import os
 import sys
@@ -36,81 +37,81 @@ LOG_FILE="/var/log/nodemanager.log"
  */
 """
 def make_invincible():
-   # dependent on linux
-   try:
-      oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
-   except IOError:
-      pass
-   else:
-      os.write(oom_adj_fd, "-17\n")
-      os.close(oom_adj_fd)
+	# dependent on linux
+	try:
+		oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
+	except IOError:
+		pass
+	else:
+		os.write(oom_adj_fd, "-17\n")
+		os.close(oom_adj_fd)
 
 """
 /* This function resets (on Linux!) its oom scoring to default
  */
 """
 def make_vulnerable():
-   # dependent on linux
-   try:
-      oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
-   except IOError:
-      pass
-   else:
-      os.write(oom_adj_fd, "0\n")
-      os.close(oom_adj_fd)
+	# dependent on linux
+	try:
+		oom_adj_fd = os.open("/proc/self/oom_adj", os.O_WRONLY)
+	except IOError:
+		pass
+	else:
+		os.write(oom_adj_fd, "0\n")
+		os.close(oom_adj_fd)
 
 def main(argv=None):
-   if argv is None:
-      argv = sys.argv
-   try:
-      opts, args = getopt.getopt(argv[1:], "f", ["foreground"])
-   except getopt.GetoptError, err:
-      # print help information and exit:
-      print str(err) # will print something like "option -a not recognized"
-      # usage()
-      return 2
-   foreground = False
-   for o, a in opts:
-      if o in ("-f", "--foreground"):
-         foreground = True
-      else:
-         assert False, "unhandled option"
-   if foreground == False:
-      pid = os.fork();
-      if pid != 0:
-         os._exit(0)
-      os.close(0)
-      os.close(1)
-      os.close(2)
-
-   # adjust oom preference
-   make_invincible()
-
-   # configure environment of children
-   env = {"PYTHONPATH":TASHI_PATH+"/src"}
-   while True:
-      pid = os.fork();
-      if pid == 0:
-         # child
-         # nodemanagers are vulnerable, not the supervisor
-         make_vulnerable()
-         if foreground == False:
-            try:
-               lfd = os.open(LOG_FILE, os.O_APPEND|os.O_CREAT|os.O_WRONLY)
-            except IOError:
-               lfd = os.open("/dev/null", os.O_WRONLY)
-            # make this fd stdout and stderr
-            os.dup2(lfd, 1)
-            os.dup2(lfd, 2)
-            # close stdin
-            os.close(0)
-         os.chdir(TASHI_PATH)
-         os.execle("./bin/nodemanager.py", "./bin/nodemanager.py", env)
-         os._exit(-1)
-      # sleep before checking child status
-      time.sleep(SLEEP_INTERVAL)
-      os.waitpid(pid, 0)
-   return 0
+	if argv is None:
+		argv = sys.argv
+	try:
+		opts, args = getopt.getopt(argv[1:], "f", ["foreground"])
+	except getopt.GetoptError, err:
+		# print help information and exit:
+		print str(err) # will print something like "option -a not recognized"
+		# usage()
+		return 2
+	foreground = False
+	for o, a in opts:
+		if o in ("-f", "--foreground"):
+			foreground = True
+		else:
+			assert False, "unhandled option"
+	if foreground == False:
+		pid = os.fork();
+		if pid != 0:
+			os._exit(0)
+		os.close(0)
+		os.close(1)
+		os.close(2)
+
+	# adjust oom preference
+	make_invincible()
+
+	# configure environment of children
+	env = {"PYTHONPATH":TASHI_PATH+"/src"}
+	while True:
+		pid = os.fork();
+		if pid == 0:
+			# child
+			# nodemanagers are vulnerable, not the supervisor
+			make_vulnerable()
+			if foreground == False:
+				try:
+					lfd = os.open(LOG_FILE, os.O_APPEND|os.O_CREAT|os.O_WRONLY)
+				except IOError:
+					lfd = os.open("/dev/null", os.O_WRONLY)
+				# make this fd stdout and stderr
+				os.dup2(lfd, 1)
+				os.dup2(lfd, 2)
+				# close stdin
+				os.close(0)
+			os.chdir(TASHI_PATH)
+			os.execle("./bin/nodemanager.py", "./bin/nodemanager.py", env)
+			os._exit(-1)
+		# sleep before checking child status
+		time.sleep(SLEEP_INTERVAL)
+		os.waitpid(pid, 0)
+	return 0
 
 if __name__ == "__main__":
-   sys.exit(main())
+	sys.exit(main())

Modified: incubator/tashi/branches/stroucki-registration/src/zoni/client/zoni-cli.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/zoni/client/zoni-cli.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/zoni/client/zoni-cli.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/zoni/client/zoni-cli.py Wed Feb 29 03:23:15 2012
@@ -327,11 +327,11 @@ def main():
 	if (options.nodeName):
 		cmdargs["sys_id"] = options.nodeName
 
-	if 	(options.numCores or options.clockSpeed or options.numMemory or options.numProcs or options.cpuFlags) and not options.showResources:
-			usage = "MISSING OPTION: When specifying hardware parameters, you need the -s or --showResources switch"
-			print usage
-			parser.print_help()	
-			exit()
+	if (options.numCores or options.clockSpeed or options.numMemory or options.numProcs or options.cpuFlags) and not options.showResources:
+		usage = "MISSING OPTION: When specifying hardware parameters, you need the -s or --showResources switch"
+		print usage
+		parser.print_help()	
+		exit()
 
 	if options.getResources:
 		print "ALL resources"

Modified: incubator/tashi/branches/stroucki-registration/src/zoni/extra/util.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/zoni/extra/util.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/zoni/extra/util.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/zoni/extra/util.py Wed Feb 29 03:23:15 2012
@@ -19,6 +19,7 @@
 #
 
 import os
+import sys
 import string
 import ConfigParser
 import time
@@ -218,19 +219,25 @@ def createKey(name):
 	return val
 	
 
-
+def __getShellFn():
+	if sys.version_info < (2, 6, 1):
+		from IPython.Shell import IPShellEmbed
+		return IPShellEmbed()
+	else:
+		import IPython
+		return IPython.embed()
 
 def debugConsole(globalDict):
 	"""A debugging console that optionally uses pysh"""
 	def realDebugConsole(globalDict):
 		try :
 			import atexit
-			from IPython.Shell import IPShellEmbed
+			shellfn = __getShellFn()
 			def resetConsole():
 # XXXpipe: make input window sane
 				(stdin, stdout) = os.popen2("reset")
 				stdout.read()
-			dbgshell = IPShellEmbed()
+			dbgshell = shellfn()
 			atexit.register(resetConsole)
 			dbgshell(local_ns=globalDict, global_ns=globalDict)
 		except Exception:

Modified: incubator/tashi/branches/stroucki-registration/src/zoni/hardware/delldrac.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/zoni/hardware/delldrac.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/zoni/hardware/delldrac.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/zoni/hardware/delldrac.py Wed Feb 29 03:23:15 2012
@@ -147,7 +147,7 @@ class dellDrac(SystemManagementInterface
 		for val in fout.readlines():
 			if "OK" in val:
 				code = 1
- 			if "CURRENTLY POWER-OFF" in val:
+			if "CURRENTLY POWER-OFF" in val:
 				self.log.info("Hardware already power off : %s", self.hostname)
 				code = 1
 		if code < 1:
@@ -171,7 +171,7 @@ class dellDrac(SystemManagementInterface
 		for val in fout.readlines():
 			if "OK" in val:
 				code = 1
- 			if "CURRENTLY POWER-OFF" in val:
+			if "CURRENTLY POWER-OFF" in val:
 				self.log.info("Hardware already power off : %s", self.hostname)
 				code = 1
 		if code < 1:

Modified: incubator/tashi/branches/stroucki-registration/src/zoni/hardware/dellswitch.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/zoni/hardware/dellswitch.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/zoni/hardware/dellswitch.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/zoni/hardware/dellswitch.py Wed Feb 29 03:23:15 2012
@@ -54,7 +54,7 @@ class HwDellSwitch(HwSwitchInterface):
 			pass
 
 
- 	def setVerbose(self, verbose):
+	def setVerbose(self, verbose):
 		self.verbose = verbose
 
 	def __login(self):

Modified: incubator/tashi/branches/stroucki-registration/src/zoni/hardware/hpswitch.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/zoni/hardware/hpswitch.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/zoni/hardware/hpswitch.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/zoni/hardware/hpswitch.py Wed Feb 29 03:23:15 2012
@@ -74,10 +74,10 @@ class HwHPSwitch(HwSwitchInterface):
 		child.sendline(cmd)
 		opt = child.expect(["Confirm(.*)", "No save(.*)", pexpect.EOF, pexpect.TIMEOUT])
 		if opt == 0:
-				print "saving to flash"
-				child.sendline("y\n")
+			print "saving to flash"
+			child.sendline("y\n")
 		if opt == 1:
-				print "no save needed"
+			print "no save needed"
 		child.sendline('exit')
 		child.terminate()
 

Modified: incubator/tashi/branches/stroucki-registration/src/zoni/install/db/zoniDbSetup.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/zoni/install/db/zoniDbSetup.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/zoni/install/db/zoniDbSetup.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/zoni/install/db/zoniDbSetup.py Wed Feb 29 03:23:15 2012
@@ -27,8 +27,8 @@ try:
 	import optparse
 	import getpass
 except ImportError, e:
-        print "Module not installed : %s" % e
-        exit()
+	print "Module not installed : %s" % e
+	exit()
 
 
 a = os.path.join("../")
@@ -406,7 +406,7 @@ def execQuery(conn, query):
 
 def entryExists(conn, table, col, checkVal):
 	query = "select * from " + table + " where " + col + " = '" + checkVal + "'"
- 	r = execQuery(conn, query)
+	r = execQuery(conn, query)
 	res = r.fetchall()
 	if len(res) > 0:
 		return (1, res)
@@ -415,5 +415,5 @@ def entryExists(conn, table, col, checkV
 
 
 if __name__ == "__main__":
-    main()
+	main()
 

Modified: incubator/tashi/branches/stroucki-registration/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-registration/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-registration/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py (original)
+++ incubator/tashi/branches/stroucki-registration/src/zoni/install/dnsdhcp/zoniDnsDhcpSetup.py Wed Feb 29 03:23:15 2012
@@ -79,5 +79,5 @@ def main():
 
 
 if __name__ == "__main__":
-    main()
+	main()
 

Propchange: incubator/tashi/branches/stroucki-rpyc/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Feb 29 03:23:15 2012
@@ -1,4 +1,13 @@
 /incubator/tashi/branches/cmu:1178106-1187632
+/incubator/tashi/branches/stable:1241774-1245856
+/incubator/tashi/branches/stablefix:1203848-1241770
 /incubator/tashi/branches/stroucki-accounting:1221525-1241770
 /incubator/tashi/branches/stroucki-accounting/branches/stroucki-accounting:1221525-1235607
+/incubator/tashi/branches/stroucki-irpbugs:1245857-1292894
+/incubator/tashi/branches/stroucki-slotsbug:1244839-1245041
+/incubator/tashi/branches/stroucki-tashi10:1294393-1294727
+/incubator/tashi/branches/stroucki-tashi11:1294393-1294415
+/incubator/tashi/branches/stroucki-tashi2:1294935-1294944
+/incubator/tashi/branches/stroucki-tashi8:1294393-1294427
 /incubator/tashi/branches/zoni-dev/trunk:1034098-1177646
+/incubator/tashi/trunk:1241775-1294945

Modified: incubator/tashi/branches/stroucki-rpyc/Makefile
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/Makefile?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/Makefile (original)
+++ incubator/tashi/branches/stroucki-rpyc/Makefile Wed Feb 29 03:23:15 2012
@@ -33,6 +33,7 @@ package: src DISCLAIMER INSTALL LICENSE 
 	mkdir apache-tashi
 	cp -rp doc etc Makefile src DISCLAIMER INSTALL LICENSE NOTICE README apache-tashi/
 	find apache-tashi -type d -name ".svn"|xargs rm -rf
+	-chgrp -R incubator apache-tashi
 	tar zcf apache-tashi.tar.gz apache-tashi
 	rm -rf apache-tashi
 
@@ -117,7 +118,7 @@ rmdoc:
 #  Zoni 
 bin/zoni-cli:
 	@echo Symlinking in zoni-cli...
-	(cd bin; ln -s ../src/zoni/client/zoni-cli .)
+	(cd bin; ln -s ../src/zoni/client/zoni-cli.py zoni-client)
 # why necessarily put this in /usr/local/bin like nothing else?
 usr/local/bin/zoni:
 	@echo Creating /usr/local/bin/zoni
@@ -127,11 +128,11 @@ rmzoni-cli:
 	if test -e /usr/local/bin/zoni; then echo Removing zoni...; rm /usr/local/bin/zoni; fi
 
 ## for now only print warnings having to do with bad indentation. pylint doesn't make it easy to enable only 1,2 checks
-disabled_warnings=$(shell pylint --list-msgs|grep :W0| awk -F: '{ORS=","; if ($$2 != "W0311" && $$2 != "W0312"){ print $$2}}')
+disabled_warnings=$(shell pylint --list-msgs|grep :W0| awk -F: '{ORS=","; if ($$2 != "W0311" && $$2 != "W0312"){ print $$2}}')",F0401"
 pysrc=$(shell find . \! -path '*gen-py*' \! -path '*services*' \! -path '*messagingthrift*' \! -name '__init__.py' -name "*.py")
 tidy: $(addprefix tidyfile/,$(pysrc))
-	@echo Insuring .py files are nice and tidy!
+	@echo Ensured .py files are nice and tidy!
 
 tidyfile/%: %
 	@echo Checking tidy for $*
-	pylint --report=no --disable-msg-cat=R,C,E --disable-msg=$(disabled_warnings) --indent-string="\t" $* 2> /dev/null; 
+	pylint --report=no --disable=R,C,E --disable=$(disabled_warnings) --indent-string="\t" $* 2> /dev/null; 

Modified: incubator/tashi/branches/stroucki-rpyc/src/tashi/accounting/accountingservice.py
URL: http://svn.apache.org/viewvc/incubator/tashi/branches/stroucki-rpyc/src/tashi/accounting/accountingservice.py?rev=1294960&r1=1294959&r2=1294960&view=diff
==============================================================================
--- incubator/tashi/branches/stroucki-rpyc/src/tashi/accounting/accountingservice.py (original)
+++ incubator/tashi/branches/stroucki-rpyc/src/tashi/accounting/accountingservice.py Wed Feb 29 03:23:15 2012
@@ -5,15 +5,15 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
-# under the License.    
+# under the License.
 
 import logging
 import threading
@@ -22,43 +22,43 @@ import time
 from tashi import createClient
 
 class AccountingService(object):
-        """RPC service for the Accounting service"""
-        
-        def __init__(self, config):
-            self.log = logging.getLogger(__name__)
-            self.log.setLevel(logging.INFO)
-
-	    self.config = config
-
-	    self.pollSleep = None
-
-	    # XXXstroucki new python has fallback values
-	    try:
-		    self.pollSleep = self.config.getint("AccountingService", "pollSleep")
-	    except:
-		    pass
+	"""RPC service for the Accounting service"""
 
-	    if self.pollSleep is None:
-		    self.pollSleep = 600
+	def __init__(self, config):
+		self.log = logging.getLogger(__name__)
+		self.log.setLevel(logging.INFO)
 
-            self.cm = createClient(config)
-            threading.Thread(target=self.__start).start()
+		self.config = config
+
+		self.pollSleep = None
+
+		# XXXstroucki new python has fallback values
+		try:
+			self.pollSleep = self.config.getint("AccountingService", "pollSleep")
+		except:
+			pass
+
+		if self.pollSleep is None:
+			self.pollSleep = 600
+
+		self.cm = createClient(config)
+		threading.Thread(target=self.__start).start()
 
 	# remote
-        def record(self, strings):
-            for string in strings:
-                self.log.info("Remote: %s" % (string))
-
-        def __start(self):
-            while True:
-                try:
-                    instances = self.cm.getInstances()
-                    for instance in instances:
-                        # XXXstroucki this currently duplicates what the CM was doing.
-                        self.log.info('Accounting: id %d host %d vmId %d user %d cores %d memory %d' % (instance.id, instance.hostId, instance.vmId, instance.userId, instance.cores, instance.memory))
-                except:
-                    self.log.warning("Accounting iteration failed")
-
-                        
-                # wait to do the next iteration
-                time.sleep(self.pollSleep)
+	def record(self, strings):
+		for string in strings:
+			self.log.info("Remote: %s" % (string))
+
+	def __start(self):
+		while True:
+			try:
+				instances = self.cm.getInstances()
+				for instance in instances:
+					# XXXstroucki this currently duplicates what the CM was doing.
+					self.log.info('Accounting: id %d host %d vmId %d user %d cores %d memory %d' % (instance.id, instance.hostId, instance.vmId, instance.userId, instance.cores, instance.memory))
+			except:
+				self.log.warning("Accounting iteration failed")
+
+
+			# wait to do the next iteration
+			time.sleep(self.pollSleep)