You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@airavata.apache.org by ma...@apache.org on 2017/08/07 14:01:18 UTC

[42/51] [abbrv] [partial] airavata git commit: AIRAVATA-2505 Upgrade Airavata to Thrift 0.10.0

http://git-wip-us.apache.org/repos/asf/airavata/blob/14ff0916/airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/lib/apache/airavata/model/appcatalog/computeresource/ttypes.py
----------------------------------------------------------------------
diff --git a/airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/lib/apache/airavata/model/appcatalog/computeresource/ttypes.py b/airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/lib/apache/airavata/model/appcatalog/computeresource/ttypes.py
index 9d9aa0d..cb5e083 100644
--- a/airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/lib/apache/airavata/model/appcatalog/computeresource/ttypes.py
+++ b/airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/lib/apache/airavata/model/appcatalog/computeresource/ttypes.py
@@ -1,1784 +1,1687 @@
 #
-# Autogenerated by Thrift Compiler (0.9.3)
+# Autogenerated by Thrift Compiler (0.10.0)
 #
 # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
 #
 #  options string: py
 #
 
-from thrift.Thrift import TType, TMessageType, TException, TApplicationException
+from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
+from thrift.protocol.TProtocol import TProtocolException
+import sys
 import apache.airavata.model.commons.ttypes
 import apache.airavata.model.appcatalog.parallelism.ttypes
 import apache.airavata.model.data.movement.ttypes
 
-
 from thrift.transport import TTransport
-from thrift.protocol import TBinaryProtocol, TProtocol
-try:
-  from thrift.protocol import fastbinary
-except:
-  fastbinary = None
-
-
-class ResourceJobManagerType:
-  """
-  * Enumeration of local resource job manager types supported by Airavata
-  *
-  * FORK:
-  *  Forking of commands without any job manager
-  *
-  * PBS:
-  *  Job manager supporting the Portal Batch System (PBS) protocol. Some examples include TORQUE, PBSPro, Grid Engine.
-  *
-  * SLURM:
-  *  The Simple Linux Utility for Resource Management is a open source workload manager.
-   *
-   * UGE:
-   *  Univa Grid Engine, a variation of PBS implementation.
-   *
-   * LSF:
-   *  IBM Platform Load Sharing Facility is dominantly installed on IBM clusters.
-  *
-  """
-  FORK = 0
-  PBS = 1
-  SLURM = 2
-  LSF = 3
-  UGE = 4
-  CLOUD = 5
-  AIRAVATA_CUSTOM = 6
-
-  _VALUES_TO_NAMES = {
-    0: "FORK",
-    1: "PBS",
-    2: "SLURM",
-    3: "LSF",
-    4: "UGE",
-    5: "CLOUD",
-    6: "AIRAVATA_CUSTOM",
-  }
-
-  _NAMES_TO_VALUES = {
-    "FORK": 0,
-    "PBS": 1,
-    "SLURM": 2,
-    "LSF": 3,
-    "UGE": 4,
-    "CLOUD": 5,
-    "AIRAVATA_CUSTOM": 6,
-  }
-
-class JobManagerCommand:
-  """
-  Enumeration of resource job manager commands
-
-  SUBMISSION:
-   Ex: qsub, sbatch
-
-  JOBMONITORING:
-   Ex: qstat, squeue
-
-  DELETION:
-   Ex: qdel, scancel
-
-  CHECK_JOB:
-   Detailed Status about the Job. Ex: checkjob
-
-  SHOW_QUEUE:
-   List of Queued Job by the schedular. Ex: showq
-
-  SHOW_RESERVATION:
-   List all reservations. Ex:showres, show_res
-
-  SHOW_START:
-   Display the start time of the specified job. Ex: showstart
-
-  """
-  SUBMISSION = 0
-  JOB_MONITORING = 1
-  DELETION = 2
-  CHECK_JOB = 3
-  SHOW_QUEUE = 4
-  SHOW_RESERVATION = 5
-  SHOW_START = 6
-
-  _VALUES_TO_NAMES = {
-    0: "SUBMISSION",
-    1: "JOB_MONITORING",
-    2: "DELETION",
-    3: "CHECK_JOB",
-    4: "SHOW_QUEUE",
-    5: "SHOW_RESERVATION",
-    6: "SHOW_START",
-  }
-
-  _NAMES_TO_VALUES = {
-    "SUBMISSION": 0,
-    "JOB_MONITORING": 1,
-    "DELETION": 2,
-    "CHECK_JOB": 3,
-    "SHOW_QUEUE": 4,
-    "SHOW_RESERVATION": 5,
-    "SHOW_START": 6,
-  }
-
-class FileSystems:
-  """
-  Enumeration of File Systems on the resource
-
-  FORK:
-   Forking of commands without any job manager
-
-  PBS:
-   Job manager supporting the Portal Batch System (PBS) protocol. Some examples include TORQUE, PBSPro, Grid Engine.
-
-  UGE:
-   Univa Grid Engine, a variation of PBS implementation.
-
-  SLURM:
-   The Simple Linux Utility for Resource Management is a open source workload manager.
-
-  """
-  HOME = 0
-  WORK = 1
-  LOCALTMP = 2
-  SCRATCH = 3
-  ARCHIVE = 4
-
-  _VALUES_TO_NAMES = {
-    0: "HOME",
-    1: "WORK",
-    2: "LOCALTMP",
-    3: "SCRATCH",
-    4: "ARCHIVE",
-  }
-
-  _NAMES_TO_VALUES = {
-    "HOME": 0,
-    "WORK": 1,
-    "LOCALTMP": 2,
-    "SCRATCH": 3,
-    "ARCHIVE": 4,
-  }
-
-class JobSubmissionProtocol:
-  """
-  Enumeration of Airavata supported Job Submission Mechanisms for High Performance Computing Clusters.
-
-  SSH:
-   Execute remote job submission commands using via secure shell protocol.
-
-  GRAM:
-   Execute remote jobs via Globus GRAM service.
-
-  UNICORE:
-   Execute remote jobs via Unicore services
-
-  """
-  LOCAL = 0
-  SSH = 1
-  GLOBUS = 2
-  UNICORE = 3
-  CLOUD = 4
-  SSH_FORK = 5
-  LOCAL_FORK = 6
-
-  _VALUES_TO_NAMES = {
-    0: "LOCAL",
-    1: "SSH",
-    2: "GLOBUS",
-    3: "UNICORE",
-    4: "CLOUD",
-    5: "SSH_FORK",
-    6: "LOCAL_FORK",
-  }
-
-  _NAMES_TO_VALUES = {
-    "LOCAL": 0,
-    "SSH": 1,
-    "GLOBUS": 2,
-    "UNICORE": 3,
-    "CLOUD": 4,
-    "SSH_FORK": 5,
-    "LOCAL_FORK": 6,
-  }
-
-class MonitorMode:
-  """
-  Monitoring modes
-
-  POLL_JOB_MANAGER:
-  GFac need to pull job status changes.
-
-  XSEDE_AMQP_SUBSCRIBE:
-  Server will publish job status changes to amqp servert.
-
-
-  """
-  POLL_JOB_MANAGER = 0
-  CLOUD_JOB_MONITOR = 1
-  JOB_EMAIL_NOTIFICATION_MONITOR = 2
-  XSEDE_AMQP_SUBSCRIBE = 3
-  FORK = 4
-  LOCAL = 5
-
-  _VALUES_TO_NAMES = {
-    0: "POLL_JOB_MANAGER",
-    1: "CLOUD_JOB_MONITOR",
-    2: "JOB_EMAIL_NOTIFICATION_MONITOR",
-    3: "XSEDE_AMQP_SUBSCRIBE",
-    4: "FORK",
-    5: "LOCAL",
-  }
-
-  _NAMES_TO_VALUES = {
-    "POLL_JOB_MANAGER": 0,
-    "CLOUD_JOB_MONITOR": 1,
-    "JOB_EMAIL_NOTIFICATION_MONITOR": 2,
-    "XSEDE_AMQP_SUBSCRIBE": 3,
-    "FORK": 4,
-    "LOCAL": 5,
-  }
-
-class DMType:
-  COMPUTE_RESOURCE = 0
-  STORAGE_RESOURCE = 1
-
-  _VALUES_TO_NAMES = {
-    0: "COMPUTE_RESOURCE",
-    1: "STORAGE_RESOURCE",
-  }
-
-  _NAMES_TO_VALUES = {
-    "COMPUTE_RESOURCE": 0,
-    "STORAGE_RESOURCE": 1,
-  }
-
-class ProviderName:
-  """
-  Provider name
-
-  """
-  EC2 = 0
-  AWSEC2 = 1
-  RACKSPACE = 2
-
-  _VALUES_TO_NAMES = {
-    0: "EC2",
-    1: "AWSEC2",
-    2: "RACKSPACE",
-  }
-
-  _NAMES_TO_VALUES = {
-    "EC2": 0,
-    "AWSEC2": 1,
-    "RACKSPACE": 2,
-  }
-
-
-class ResourceJobManager:
-  """
-  Resource Job Manager Information
-
-  resourceJobManagerType:
-   A typical HPC cluster has a single Job Manager to manage the resources.
-
-  pushMonitoringEndpoint:
-   If the job manager pushes out state changes to a database or bus, specify the service endpoint.
-    Ex: Moab Web Service, Moab MongoDB URL, AMQP (GLUE2) Broker
-
-  jobManagerBinPath:
-   Path to the Job Manager Installation Binary directory.
-
-  jobManagerCommands:
-   An enumeration of commonly used manager commands.
-
-
-  Attributes:
-   - resourceJobManagerId
-   - resourceJobManagerType
-   - pushMonitoringEndpoint
-   - jobManagerBinPath
-   - jobManagerCommands
-   - parallelismPrefix
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'resourceJobManagerId', None, "DO_NOT_SET_AT_CLIENTS", ), # 1
-    (2, TType.I32, 'resourceJobManagerType', None, None, ), # 2
-    (3, TType.STRING, 'pushMonitoringEndpoint', None, None, ), # 3
-    (4, TType.STRING, 'jobManagerBinPath', None, None, ), # 4
-    (5, TType.MAP, 'jobManagerCommands', (TType.I32,None,TType.STRING,None), None, ), # 5
-    (6, TType.MAP, 'parallelismPrefix', (TType.I32,None,TType.STRING,None), None, ), # 6
-  )
-
-  def __init__(self, resourceJobManagerId=thrift_spec[1][4], resourceJobManagerType=None, pushMonitoringEndpoint=None, jobManagerBinPath=None, jobManagerCommands=None, parallelismPrefix=None,):
-    self.resourceJobManagerId = resourceJobManagerId
-    self.resourceJobManagerType = resourceJobManagerType
-    self.pushMonitoringEndpoint = pushMonitoringEndpoint
-    self.jobManagerBinPath = jobManagerBinPath
-    self.jobManagerCommands = jobManagerCommands
-    self.parallelismPrefix = parallelismPrefix
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.resourceJobManagerId = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I32:
-          self.resourceJobManagerType = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRING:
-          self.pushMonitoringEndpoint = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.STRING:
-          self.jobManagerBinPath = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.MAP:
-          self.jobManagerCommands = {}
-          (_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
-          for _i4 in xrange(_size0):
-            _key5 = iprot.readI32()
-            _val6 = iprot.readString()
-            self.jobManagerCommands[_key5] = _val6
-          iprot.readMapEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 6:
-        if ftype == TType.MAP:
-          self.parallelismPrefix = {}
-          (_ktype8, _vtype9, _size7 ) = iprot.readMapBegin()
-          for _i11 in xrange(_size7):
-            _key12 = iprot.readI32()
-            _val13 = iprot.readString()
-            self.parallelismPrefix[_key12] = _val13
-          iprot.readMapEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('ResourceJobManager')
-    if self.resourceJobManagerId is not None:
-      oprot.writeFieldBegin('resourceJobManagerId', TType.STRING, 1)
-      oprot.writeString(self.resourceJobManagerId)
-      oprot.writeFieldEnd()
-    if self.resourceJobManagerType is not None:
-      oprot.writeFieldBegin('resourceJobManagerType', TType.I32, 2)
-      oprot.writeI32(self.resourceJobManagerType)
-      oprot.writeFieldEnd()
-    if self.pushMonitoringEndpoint is not None:
-      oprot.writeFieldBegin('pushMonitoringEndpoint', TType.STRING, 3)
-      oprot.writeString(self.pushMonitoringEndpoint)
-      oprot.writeFieldEnd()
-    if self.jobManagerBinPath is not None:
-      oprot.writeFieldBegin('jobManagerBinPath', TType.STRING, 4)
-      oprot.writeString(self.jobManagerBinPath)
-      oprot.writeFieldEnd()
-    if self.jobManagerCommands is not None:
-      oprot.writeFieldBegin('jobManagerCommands', TType.MAP, 5)
-      oprot.writeMapBegin(TType.I32, TType.STRING, len(self.jobManagerCommands))
-      for kiter14,viter15 in self.jobManagerCommands.items():
-        oprot.writeI32(kiter14)
-        oprot.writeString(viter15)
-      oprot.writeMapEnd()
-      oprot.writeFieldEnd()
-    if self.parallelismPrefix is not None:
-      oprot.writeFieldBegin('parallelismPrefix', TType.MAP, 6)
-      oprot.writeMapBegin(TType.I32, TType.STRING, len(self.parallelismPrefix))
-      for kiter16,viter17 in self.parallelismPrefix.items():
-        oprot.writeI32(kiter16)
-        oprot.writeString(viter17)
-      oprot.writeMapEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.resourceJobManagerId is None:
-      raise TProtocol.TProtocolException(message='Required field resourceJobManagerId is unset!')
-    if self.resourceJobManagerType is None:
-      raise TProtocol.TProtocolException(message='Required field resourceJobManagerType is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.resourceJobManagerId)
-    value = (value * 31) ^ hash(self.resourceJobManagerType)
-    value = (value * 31) ^ hash(self.pushMonitoringEndpoint)
-    value = (value * 31) ^ hash(self.jobManagerBinPath)
-    value = (value * 31) ^ hash(self.jobManagerCommands)
-    value = (value * 31) ^ hash(self.parallelismPrefix)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class BatchQueue:
-  """
-  Batch Queue Information on SuperComputers
-
-  maxRunTime:
-   Maximum allowed run time in hours.
-
-  Attributes:
-   - queueName
-   - queueDescription
-   - maxRunTime
-   - maxNodes
-   - maxProcessors
-   - maxJobsInQueue
-   - maxMemory
-   - cpuPerNode
-   - defaultNodeCount
-   - defaultCPUCount
-   - defaultWalltime
-   - queueSpecificMacros
-   - isDefaultQueue
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'queueName', None, None, ), # 1
-    (2, TType.STRING, 'queueDescription', None, None, ), # 2
-    (3, TType.I32, 'maxRunTime', None, None, ), # 3
-    (4, TType.I32, 'maxNodes', None, None, ), # 4
-    (5, TType.I32, 'maxProcessors', None, None, ), # 5
-    (6, TType.I32, 'maxJobsInQueue', None, None, ), # 6
-    (7, TType.I32, 'maxMemory', None, None, ), # 7
-    (8, TType.I32, 'cpuPerNode', None, None, ), # 8
-    (9, TType.I32, 'defaultNodeCount', None, None, ), # 9
-    (10, TType.I32, 'defaultCPUCount', None, None, ), # 10
-    (11, TType.I32, 'defaultWalltime', None, None, ), # 11
-    (12, TType.STRING, 'queueSpecificMacros', None, None, ), # 12
-    (13, TType.BOOL, 'isDefaultQueue', None, None, ), # 13
-  )
-
-  def __init__(self, queueName=None, queueDescription=None, maxRunTime=None, maxNodes=None, maxProcessors=None, maxJobsInQueue=None, maxMemory=None, cpuPerNode=None, defaultNodeCount=None, defaultCPUCount=None, defaultWalltime=None, queueSpecificMacros=None, isDefaultQueue=None,):
-    self.queueName = queueName
-    self.queueDescription = queueDescription
-    self.maxRunTime = maxRunTime
-    self.maxNodes = maxNodes
-    self.maxProcessors = maxProcessors
-    self.maxJobsInQueue = maxJobsInQueue
-    self.maxMemory = maxMemory
-    self.cpuPerNode = cpuPerNode
-    self.defaultNodeCount = defaultNodeCount
-    self.defaultCPUCount = defaultCPUCount
-    self.defaultWalltime = defaultWalltime
-    self.queueSpecificMacros = queueSpecificMacros
-    self.isDefaultQueue = isDefaultQueue
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.queueName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.queueDescription = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.I32:
-          self.maxRunTime = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.I32:
-          self.maxNodes = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.I32:
-          self.maxProcessors = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 6:
-        if ftype == TType.I32:
-          self.maxJobsInQueue = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 7:
-        if ftype == TType.I32:
-          self.maxMemory = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 8:
-        if ftype == TType.I32:
-          self.cpuPerNode = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 9:
-        if ftype == TType.I32:
-          self.defaultNodeCount = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 10:
-        if ftype == TType.I32:
-          self.defaultCPUCount = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 11:
-        if ftype == TType.I32:
-          self.defaultWalltime = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 12:
-        if ftype == TType.STRING:
-          self.queueSpecificMacros = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 13:
-        if ftype == TType.BOOL:
-          self.isDefaultQueue = iprot.readBool()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('BatchQueue')
-    if self.queueName is not None:
-      oprot.writeFieldBegin('queueName', TType.STRING, 1)
-      oprot.writeString(self.queueName)
-      oprot.writeFieldEnd()
-    if self.queueDescription is not None:
-      oprot.writeFieldBegin('queueDescription', TType.STRING, 2)
-      oprot.writeString(self.queueDescription)
-      oprot.writeFieldEnd()
-    if self.maxRunTime is not None:
-      oprot.writeFieldBegin('maxRunTime', TType.I32, 3)
-      oprot.writeI32(self.maxRunTime)
-      oprot.writeFieldEnd()
-    if self.maxNodes is not None:
-      oprot.writeFieldBegin('maxNodes', TType.I32, 4)
-      oprot.writeI32(self.maxNodes)
-      oprot.writeFieldEnd()
-    if self.maxProcessors is not None:
-      oprot.writeFieldBegin('maxProcessors', TType.I32, 5)
-      oprot.writeI32(self.maxProcessors)
-      oprot.writeFieldEnd()
-    if self.maxJobsInQueue is not None:
-      oprot.writeFieldBegin('maxJobsInQueue', TType.I32, 6)
-      oprot.writeI32(self.maxJobsInQueue)
-      oprot.writeFieldEnd()
-    if self.maxMemory is not None:
-      oprot.writeFieldBegin('maxMemory', TType.I32, 7)
-      oprot.writeI32(self.maxMemory)
-      oprot.writeFieldEnd()
-    if self.cpuPerNode is not None:
-      oprot.writeFieldBegin('cpuPerNode', TType.I32, 8)
-      oprot.writeI32(self.cpuPerNode)
-      oprot.writeFieldEnd()
-    if self.defaultNodeCount is not None:
-      oprot.writeFieldBegin('defaultNodeCount', TType.I32, 9)
-      oprot.writeI32(self.defaultNodeCount)
-      oprot.writeFieldEnd()
-    if self.defaultCPUCount is not None:
-      oprot.writeFieldBegin('defaultCPUCount', TType.I32, 10)
-      oprot.writeI32(self.defaultCPUCount)
-      oprot.writeFieldEnd()
-    if self.defaultWalltime is not None:
-      oprot.writeFieldBegin('defaultWalltime', TType.I32, 11)
-      oprot.writeI32(self.defaultWalltime)
-      oprot.writeFieldEnd()
-    if self.queueSpecificMacros is not None:
-      oprot.writeFieldBegin('queueSpecificMacros', TType.STRING, 12)
-      oprot.writeString(self.queueSpecificMacros)
-      oprot.writeFieldEnd()
-    if self.isDefaultQueue is not None:
-      oprot.writeFieldBegin('isDefaultQueue', TType.BOOL, 13)
-      oprot.writeBool(self.isDefaultQueue)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.queueName is None:
-      raise TProtocol.TProtocolException(message='Required field queueName is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.queueName)
-    value = (value * 31) ^ hash(self.queueDescription)
-    value = (value * 31) ^ hash(self.maxRunTime)
-    value = (value * 31) ^ hash(self.maxNodes)
-    value = (value * 31) ^ hash(self.maxProcessors)
-    value = (value * 31) ^ hash(self.maxJobsInQueue)
-    value = (value * 31) ^ hash(self.maxMemory)
-    value = (value * 31) ^ hash(self.cpuPerNode)
-    value = (value * 31) ^ hash(self.defaultNodeCount)
-    value = (value * 31) ^ hash(self.defaultCPUCount)
-    value = (value * 31) ^ hash(self.defaultWalltime)
-    value = (value * 31) ^ hash(self.queueSpecificMacros)
-    value = (value * 31) ^ hash(self.isDefaultQueue)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class LOCALSubmission:
-  """
-  Locally Fork Jobs as OS processes
-
-  alternativeSSHHostName:
-   If the login to ssh is different than the hostname itself, specify it here
-
-  sshPort:
-   If a non-default port needs to used, specify it.
-
-  Attributes:
-   - jobSubmissionInterfaceId
-   - resourceJobManager
-   - securityProtocol
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'jobSubmissionInterfaceId', None, "DO_NOT_SET_AT_CLIENTS", ), # 1
-    (2, TType.STRUCT, 'resourceJobManager', (ResourceJobManager, ResourceJobManager.thrift_spec), None, ), # 2
-    (3, TType.I32, 'securityProtocol', None, None, ), # 3
-  )
-
-  def __init__(self, jobSubmissionInterfaceId=thrift_spec[1][4], resourceJobManager=None, securityProtocol=None,):
-    self.jobSubmissionInterfaceId = jobSubmissionInterfaceId
-    self.resourceJobManager = resourceJobManager
-    self.securityProtocol = securityProtocol
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.jobSubmissionInterfaceId = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRUCT:
-          self.resourceJobManager = ResourceJobManager()
-          self.resourceJobManager.read(iprot)
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.I32:
-          self.securityProtocol = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('LOCALSubmission')
-    if self.jobSubmissionInterfaceId is not None:
-      oprot.writeFieldBegin('jobSubmissionInterfaceId', TType.STRING, 1)
-      oprot.writeString(self.jobSubmissionInterfaceId)
-      oprot.writeFieldEnd()
-    if self.resourceJobManager is not None:
-      oprot.writeFieldBegin('resourceJobManager', TType.STRUCT, 2)
-      self.resourceJobManager.write(oprot)
-      oprot.writeFieldEnd()
-    if self.securityProtocol is not None:
-      oprot.writeFieldBegin('securityProtocol', TType.I32, 3)
-      oprot.writeI32(self.securityProtocol)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.jobSubmissionInterfaceId is None:
-      raise TProtocol.TProtocolException(message='Required field jobSubmissionInterfaceId is unset!')
-    if self.resourceJobManager is None:
-      raise TProtocol.TProtocolException(message='Required field resourceJobManager is unset!')
-    if self.securityProtocol is None:
-      raise TProtocol.TProtocolException(message='Required field securityProtocol is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.jobSubmissionInterfaceId)
-    value = (value * 31) ^ hash(self.resourceJobManager)
-    value = (value * 31) ^ hash(self.securityProtocol)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class SSHJobSubmission:
-  """
-  Authenticate using Secured Shell
-
-  alternativeSSHHostName:
-   If the login to ssh is different than the hostname itself, specify it here
-
-  sshPort:
-   If a non-default port needs to used, specify it.
-
-  batchQueueEmailSenders:
-   If a resource always sends the monitoring from a specific address, specify the
-    full email address. If a resource sends emails from multiple addresses (
-     example: based on the submitted login node) then use the wildchar * to indicate
-     the same. Example: *@*.example.com or *@example.com
-
-
-  Attributes:
-   - jobSubmissionInterfaceId
-   - securityProtocol
-   - resourceJobManager
-   - alternativeSSHHostName
-   - sshPort
-   - monitorMode
-   - batchQueueEmailSenders
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'jobSubmissionInterfaceId', None, "DO_NOT_SET_AT_CLIENTS", ), # 1
-    (2, TType.I32, 'securityProtocol', None, None, ), # 2
-    (3, TType.STRUCT, 'resourceJobManager', (ResourceJobManager, ResourceJobManager.thrift_spec), None, ), # 3
-    (4, TType.STRING, 'alternativeSSHHostName', None, None, ), # 4
-    (5, TType.I32, 'sshPort', None, 22, ), # 5
-    (6, TType.I32, 'monitorMode', None, None, ), # 6
-    (7, TType.LIST, 'batchQueueEmailSenders', (TType.STRING,None), None, ), # 7
-  )
-
-  def __init__(self, jobSubmissionInterfaceId=thrift_spec[1][4], securityProtocol=None, resourceJobManager=None, alternativeSSHHostName=None, sshPort=thrift_spec[5][4], monitorMode=None, batchQueueEmailSenders=None,):
-    self.jobSubmissionInterfaceId = jobSubmissionInterfaceId
-    self.securityProtocol = securityProtocol
-    self.resourceJobManager = resourceJobManager
-    self.alternativeSSHHostName = alternativeSSHHostName
-    self.sshPort = sshPort
-    self.monitorMode = monitorMode
-    self.batchQueueEmailSenders = batchQueueEmailSenders
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.jobSubmissionInterfaceId = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I32:
-          self.securityProtocol = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRUCT:
-          self.resourceJobManager = ResourceJobManager()
-          self.resourceJobManager.read(iprot)
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.STRING:
-          self.alternativeSSHHostName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.I32:
-          self.sshPort = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 6:
-        if ftype == TType.I32:
-          self.monitorMode = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 7:
-        if ftype == TType.LIST:
-          self.batchQueueEmailSenders = []
-          (_etype21, _size18) = iprot.readListBegin()
-          for _i22 in xrange(_size18):
-            _elem23 = iprot.readString()
-            self.batchQueueEmailSenders.append(_elem23)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('SSHJobSubmission')
-    if self.jobSubmissionInterfaceId is not None:
-      oprot.writeFieldBegin('jobSubmissionInterfaceId', TType.STRING, 1)
-      oprot.writeString(self.jobSubmissionInterfaceId)
-      oprot.writeFieldEnd()
-    if self.securityProtocol is not None:
-      oprot.writeFieldBegin('securityProtocol', TType.I32, 2)
-      oprot.writeI32(self.securityProtocol)
-      oprot.writeFieldEnd()
-    if self.resourceJobManager is not None:
-      oprot.writeFieldBegin('resourceJobManager', TType.STRUCT, 3)
-      self.resourceJobManager.write(oprot)
-      oprot.writeFieldEnd()
-    if self.alternativeSSHHostName is not None:
-      oprot.writeFieldBegin('alternativeSSHHostName', TType.STRING, 4)
-      oprot.writeString(self.alternativeSSHHostName)
-      oprot.writeFieldEnd()
-    if self.sshPort is not None:
-      oprot.writeFieldBegin('sshPort', TType.I32, 5)
-      oprot.writeI32(self.sshPort)
-      oprot.writeFieldEnd()
-    if self.monitorMode is not None:
-      oprot.writeFieldBegin('monitorMode', TType.I32, 6)
-      oprot.writeI32(self.monitorMode)
-      oprot.writeFieldEnd()
-    if self.batchQueueEmailSenders is not None:
-      oprot.writeFieldBegin('batchQueueEmailSenders', TType.LIST, 7)
-      oprot.writeListBegin(TType.STRING, len(self.batchQueueEmailSenders))
-      for iter24 in self.batchQueueEmailSenders:
-        oprot.writeString(iter24)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.jobSubmissionInterfaceId is None:
-      raise TProtocol.TProtocolException(message='Required field jobSubmissionInterfaceId is unset!')
-    if self.securityProtocol is None:
-      raise TProtocol.TProtocolException(message='Required field securityProtocol is unset!')
-    if self.resourceJobManager is None:
-      raise TProtocol.TProtocolException(message='Required field resourceJobManager is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.jobSubmissionInterfaceId)
-    value = (value * 31) ^ hash(self.securityProtocol)
-    value = (value * 31) ^ hash(self.resourceJobManager)
-    value = (value * 31) ^ hash(self.alternativeSSHHostName)
-    value = (value * 31) ^ hash(self.sshPort)
-    value = (value * 31) ^ hash(self.monitorMode)
-    value = (value * 31) ^ hash(self.batchQueueEmailSenders)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class GlobusJobSubmission:
-  """
-  Attributes:
-   - jobSubmissionInterfaceId
-   - securityProtocol
-   - globusGateKeeperEndPoint
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'jobSubmissionInterfaceId', None, "DO_NOT_SET_AT_CLIENTS", ), # 1
-    (2, TType.I32, 'securityProtocol', None, None, ), # 2
-    (3, TType.LIST, 'globusGateKeeperEndPoint', (TType.STRING,None), None, ), # 3
-  )
-
-  def __init__(self, jobSubmissionInterfaceId=thrift_spec[1][4], securityProtocol=None, globusGateKeeperEndPoint=None,):
-    self.jobSubmissionInterfaceId = jobSubmissionInterfaceId
-    self.securityProtocol = securityProtocol
-    self.globusGateKeeperEndPoint = globusGateKeeperEndPoint
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.jobSubmissionInterfaceId = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I32:
-          self.securityProtocol = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.LIST:
-          self.globusGateKeeperEndPoint = []
-          (_etype28, _size25) = iprot.readListBegin()
-          for _i29 in xrange(_size25):
-            _elem30 = iprot.readString()
-            self.globusGateKeeperEndPoint.append(_elem30)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('GlobusJobSubmission')
-    if self.jobSubmissionInterfaceId is not None:
-      oprot.writeFieldBegin('jobSubmissionInterfaceId', TType.STRING, 1)
-      oprot.writeString(self.jobSubmissionInterfaceId)
-      oprot.writeFieldEnd()
-    if self.securityProtocol is not None:
-      oprot.writeFieldBegin('securityProtocol', TType.I32, 2)
-      oprot.writeI32(self.securityProtocol)
-      oprot.writeFieldEnd()
-    if self.globusGateKeeperEndPoint is not None:
-      oprot.writeFieldBegin('globusGateKeeperEndPoint', TType.LIST, 3)
-      oprot.writeListBegin(TType.STRING, len(self.globusGateKeeperEndPoint))
-      for iter31 in self.globusGateKeeperEndPoint:
-        oprot.writeString(iter31)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.jobSubmissionInterfaceId is None:
-      raise TProtocol.TProtocolException(message='Required field jobSubmissionInterfaceId is unset!')
-    if self.securityProtocol is None:
-      raise TProtocol.TProtocolException(message='Required field securityProtocol is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.jobSubmissionInterfaceId)
-    value = (value * 31) ^ hash(self.securityProtocol)
-    value = (value * 31) ^ hash(self.globusGateKeeperEndPoint)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class UnicoreJobSubmission:
-  """
-  Unicore Job Submission
-
-  unicoreEndPointURL:
-   unicoreGateway End Point. The provider will query this service to fetch required service end points.
-  authenticationMode
-   The authenticationMode defines the way certificate is fetched.
-
-  Attributes:
-   - jobSubmissionInterfaceId
-   - securityProtocol
-   - unicoreEndPointURL
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'jobSubmissionInterfaceId', None, "DO_NOT_SET_AT_CLIENTS", ), # 1
-    (2, TType.I32, 'securityProtocol', None, None, ), # 2
-    (3, TType.STRING, 'unicoreEndPointURL', None, None, ), # 3
-  )
-
-  def __init__(self, jobSubmissionInterfaceId=thrift_spec[1][4], securityProtocol=None, unicoreEndPointURL=None,):
-    self.jobSubmissionInterfaceId = jobSubmissionInterfaceId
-    self.securityProtocol = securityProtocol
-    self.unicoreEndPointURL = unicoreEndPointURL
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.jobSubmissionInterfaceId = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I32:
-          self.securityProtocol = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRING:
-          self.unicoreEndPointURL = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('UnicoreJobSubmission')
-    if self.jobSubmissionInterfaceId is not None:
-      oprot.writeFieldBegin('jobSubmissionInterfaceId', TType.STRING, 1)
-      oprot.writeString(self.jobSubmissionInterfaceId)
-      oprot.writeFieldEnd()
-    if self.securityProtocol is not None:
-      oprot.writeFieldBegin('securityProtocol', TType.I32, 2)
-      oprot.writeI32(self.securityProtocol)
-      oprot.writeFieldEnd()
-    if self.unicoreEndPointURL is not None:
-      oprot.writeFieldBegin('unicoreEndPointURL', TType.STRING, 3)
-      oprot.writeString(self.unicoreEndPointURL)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.jobSubmissionInterfaceId is None:
-      raise TProtocol.TProtocolException(message='Required field jobSubmissionInterfaceId is unset!')
-    if self.securityProtocol is None:
-      raise TProtocol.TProtocolException(message='Required field securityProtocol is unset!')
-    if self.unicoreEndPointURL is None:
-      raise TProtocol.TProtocolException(message='Required field unicoreEndPointURL is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.jobSubmissionInterfaceId)
-    value = (value * 31) ^ hash(self.securityProtocol)
-    value = (value * 31) ^ hash(self.unicoreEndPointURL)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class CloudJobSubmission:
-  """
-  Cloud Job Submission
-
-
-
-  Attributes:
-   - jobSubmissionInterfaceId
-   - securityProtocol
-   - nodeId
-   - executableType
-   - providerName
-   - userAccountName
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'jobSubmissionInterfaceId', None, "DO_NOT_SET_AT_CLIENTS", ), # 1
-    (2, TType.I32, 'securityProtocol', None, None, ), # 2
-    (3, TType.STRING, 'nodeId', None, None, ), # 3
-    (4, TType.STRING, 'executableType', None, None, ), # 4
-    (5, TType.I32, 'providerName', None, None, ), # 5
-    (6, TType.STRING, 'userAccountName', None, None, ), # 6
-  )
-
-  def __init__(self, jobSubmissionInterfaceId=thrift_spec[1][4], securityProtocol=None, nodeId=None, executableType=None, providerName=None, userAccountName=None,):
-    self.jobSubmissionInterfaceId = jobSubmissionInterfaceId
-    self.securityProtocol = securityProtocol
-    self.nodeId = nodeId
-    self.executableType = executableType
-    self.providerName = providerName
-    self.userAccountName = userAccountName
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.jobSubmissionInterfaceId = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I32:
-          self.securityProtocol = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.STRING:
-          self.nodeId = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.STRING:
-          self.executableType = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.I32:
-          self.providerName = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 6:
-        if ftype == TType.STRING:
-          self.userAccountName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('CloudJobSubmission')
-    if self.jobSubmissionInterfaceId is not None:
-      oprot.writeFieldBegin('jobSubmissionInterfaceId', TType.STRING, 1)
-      oprot.writeString(self.jobSubmissionInterfaceId)
-      oprot.writeFieldEnd()
-    if self.securityProtocol is not None:
-      oprot.writeFieldBegin('securityProtocol', TType.I32, 2)
-      oprot.writeI32(self.securityProtocol)
-      oprot.writeFieldEnd()
-    if self.nodeId is not None:
-      oprot.writeFieldBegin('nodeId', TType.STRING, 3)
-      oprot.writeString(self.nodeId)
-      oprot.writeFieldEnd()
-    if self.executableType is not None:
-      oprot.writeFieldBegin('executableType', TType.STRING, 4)
-      oprot.writeString(self.executableType)
-      oprot.writeFieldEnd()
-    if self.providerName is not None:
-      oprot.writeFieldBegin('providerName', TType.I32, 5)
-      oprot.writeI32(self.providerName)
-      oprot.writeFieldEnd()
-    if self.userAccountName is not None:
-      oprot.writeFieldBegin('userAccountName', TType.STRING, 6)
-      oprot.writeString(self.userAccountName)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.jobSubmissionInterfaceId is None:
-      raise TProtocol.TProtocolException(message='Required field jobSubmissionInterfaceId is unset!')
-    if self.securityProtocol is None:
-      raise TProtocol.TProtocolException(message='Required field securityProtocol is unset!')
-    if self.nodeId is None:
-      raise TProtocol.TProtocolException(message='Required field nodeId is unset!')
-    if self.executableType is None:
-      raise TProtocol.TProtocolException(message='Required field executableType is unset!')
-    if self.providerName is None:
-      raise TProtocol.TProtocolException(message='Required field providerName is unset!')
-    if self.userAccountName is None:
-      raise TProtocol.TProtocolException(message='Required field userAccountName is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.jobSubmissionInterfaceId)
-    value = (value * 31) ^ hash(self.securityProtocol)
-    value = (value * 31) ^ hash(self.nodeId)
-    value = (value * 31) ^ hash(self.executableType)
-    value = (value * 31) ^ hash(self.providerName)
-    value = (value * 31) ^ hash(self.userAccountName)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class JobSubmissionInterface:
-  """
-  Job Submission Interfaces
-
-  jobSubmissionInterfaceId: The Job Submission Interface has to be previously registered and referenced here.
-
-  priorityOrder:
-   For resources with multiple interfaces, the priority order should be selected.
-    Lower the numerical number, higher the priority
-
-
-  Attributes:
-   - jobSubmissionInterfaceId
-   - jobSubmissionProtocol
-   - priorityOrder
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'jobSubmissionInterfaceId', None, None, ), # 1
-    (2, TType.I32, 'jobSubmissionProtocol', None, None, ), # 2
-    (3, TType.I32, 'priorityOrder', None, 0, ), # 3
-  )
-
-  def __init__(self, jobSubmissionInterfaceId=None, jobSubmissionProtocol=None, priorityOrder=thrift_spec[3][4],):
-    self.jobSubmissionInterfaceId = jobSubmissionInterfaceId
-    self.jobSubmissionProtocol = jobSubmissionProtocol
-    self.priorityOrder = priorityOrder
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.jobSubmissionInterfaceId = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.I32:
-          self.jobSubmissionProtocol = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.I32:
-          self.priorityOrder = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('JobSubmissionInterface')
-    if self.jobSubmissionInterfaceId is not None:
-      oprot.writeFieldBegin('jobSubmissionInterfaceId', TType.STRING, 1)
-      oprot.writeString(self.jobSubmissionInterfaceId)
-      oprot.writeFieldEnd()
-    if self.jobSubmissionProtocol is not None:
-      oprot.writeFieldBegin('jobSubmissionProtocol', TType.I32, 2)
-      oprot.writeI32(self.jobSubmissionProtocol)
-      oprot.writeFieldEnd()
-    if self.priorityOrder is not None:
-      oprot.writeFieldBegin('priorityOrder', TType.I32, 3)
-      oprot.writeI32(self.priorityOrder)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.jobSubmissionInterfaceId is None:
-      raise TProtocol.TProtocolException(message='Required field jobSubmissionInterfaceId is unset!')
-    if self.jobSubmissionProtocol is None:
-      raise TProtocol.TProtocolException(message='Required field jobSubmissionProtocol is unset!')
-    if self.priorityOrder is None:
-      raise TProtocol.TProtocolException(message='Required field priorityOrder is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.jobSubmissionInterfaceId)
-    value = (value * 31) ^ hash(self.jobSubmissionProtocol)
-    value = (value * 31) ^ hash(self.priorityOrder)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
-
-class ComputeResourceDescription:
-  """
-  Computational Resource Description
-
-  computeResourceId: Airavata Internal Unique Identifier to distinguish Compute Resource.
-
-  hostName:
-    Fully Qualified Host Name.
-
-  hostAliases:
-    Aliases if any.
-
-  ipAddress:
-    IP Addresses of the Resource.
-
-  resourceDescription:
-   A user friendly description of the resource.
-
-  JobSubmissionProtocols:
-   A computational resources may have one or more ways of submitting Jobs. This structure
-     will hold all available mechanisms to interact with the resource.
-   The key is the priority
-
-  DataMovementProtocol:
-   Option to specify a prefered data movement mechanism of the available options.
-
-  fileSystems:
-   Map of file systems type and the path.
-
-
-  Attributes:
-   - computeResourceId
-   - hostName
-   - hostAliases
-   - ipAddresses
-   - resourceDescription
-   - enabled
-   - batchQueues
-   - fileSystems
-   - jobSubmissionInterfaces
-   - dataMovementInterfaces
-   - maxMemoryPerNode
-   - gatewayUsageReporting
-   - gatewayUsageModuleLoadCommand
-   - gatewayUsageExecutable
-   - cpusPerNode
-   - defaultNodeCount
-   - defaultCPUCount
-   - defaultWalltime
-  """
-
-  thrift_spec = (
-    None, # 0
-    (1, TType.STRING, 'computeResourceId', None, "DO_NOT_SET_AT_CLIENTS", ), # 1
-    (2, TType.STRING, 'hostName', None, None, ), # 2
-    (3, TType.LIST, 'hostAliases', (TType.STRING,None), None, ), # 3
-    (4, TType.LIST, 'ipAddresses', (TType.STRING,None), None, ), # 4
-    (5, TType.STRING, 'resourceDescription', None, None, ), # 5
-    (6, TType.BOOL, 'enabled', None, None, ), # 6
-    (7, TType.LIST, 'batchQueues', (TType.STRUCT,(BatchQueue, BatchQueue.thrift_spec)), None, ), # 7
-    (8, TType.MAP, 'fileSystems', (TType.I32,None,TType.STRING,None), None, ), # 8
-    (9, TType.LIST, 'jobSubmissionInterfaces', (TType.STRUCT,(JobSubmissionInterface, JobSubmissionInterface.thrift_spec)), None, ), # 9
-    (10, TType.LIST, 'dataMovementInterfaces', (TType.STRUCT,(apache.airavata.model.data.movement.ttypes.DataMovementInterface, apache.airavata.model.data.movement.ttypes.DataMovementInterface.thrift_spec)), None, ), # 10
-    (11, TType.I32, 'maxMemoryPerNode', None, None, ), # 11
-    (12, TType.BOOL, 'gatewayUsageReporting', None, None, ), # 12
-    (13, TType.STRING, 'gatewayUsageModuleLoadCommand', None, None, ), # 13
-    (14, TType.STRING, 'gatewayUsageExecutable', None, None, ), # 14
-    (15, TType.I32, 'cpusPerNode', None, None, ), # 15
-    (16, TType.I32, 'defaultNodeCount', None, None, ), # 16
-    (17, TType.I32, 'defaultCPUCount', None, None, ), # 17
-    (18, TType.I32, 'defaultWalltime', None, None, ), # 18
-  )
-
-  def __init__(self, computeResourceId=thrift_spec[1][4], hostName=None, hostAliases=None, ipAddresses=None, resourceDescription=None, enabled=None, batchQueues=None, fileSystems=None, jobSubmissionInterfaces=None, dataMovementInterfaces=None, maxMemoryPerNode=None, gatewayUsageReporting=None, gatewayUsageModuleLoadCommand=None, gatewayUsageExecutable=None, cpusPerNode=None, defaultNodeCount=None, defaultCPUCount=None, defaultWalltime=None,):
-    self.computeResourceId = computeResourceId
-    self.hostName = hostName
-    self.hostAliases = hostAliases
-    self.ipAddresses = ipAddresses
-    self.resourceDescription = resourceDescription
-    self.enabled = enabled
-    self.batchQueues = batchQueues
-    self.fileSystems = fileSystems
-    self.jobSubmissionInterfaces = jobSubmissionInterfaces
-    self.dataMovementInterfaces = dataMovementInterfaces
-    self.maxMemoryPerNode = maxMemoryPerNode
-    self.gatewayUsageReporting = gatewayUsageReporting
-    self.gatewayUsageModuleLoadCommand = gatewayUsageModuleLoadCommand
-    self.gatewayUsageExecutable = gatewayUsageExecutable
-    self.cpusPerNode = cpusPerNode
-    self.defaultNodeCount = defaultNodeCount
-    self.defaultCPUCount = defaultCPUCount
-    self.defaultWalltime = defaultWalltime
-
-  def read(self, iprot):
-    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
-      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
-      return
-    iprot.readStructBegin()
-    while True:
-      (fname, ftype, fid) = iprot.readFieldBegin()
-      if ftype == TType.STOP:
-        break
-      if fid == 1:
-        if ftype == TType.STRING:
-          self.computeResourceId = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.hostName = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.LIST:
-          self.hostAliases = []
-          (_etype35, _size32) = iprot.readListBegin()
-          for _i36 in xrange(_size32):
-            _elem37 = iprot.readString()
-            self.hostAliases.append(_elem37)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.LIST:
-          self.ipAddresses = []
-          (_etype41, _size38) = iprot.readListBegin()
-          for _i42 in xrange(_size38):
-            _elem43 = iprot.readString()
-            self.ipAddresses.append(_elem43)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.STRING:
-          self.resourceDescription = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 6:
-        if ftype == TType.BOOL:
-          self.enabled = iprot.readBool()
-        else:
-          iprot.skip(ftype)
-      elif fid == 7:
-        if ftype == TType.LIST:
-          self.batchQueues = []
-          (_etype47, _size44) = iprot.readListBegin()
-          for _i48 in xrange(_size44):
-            _elem49 = BatchQueue()
-            _elem49.read(iprot)
-            self.batchQueues.append(_elem49)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 8:
-        if ftype == TType.MAP:
-          self.fileSystems = {}
-          (_ktype51, _vtype52, _size50 ) = iprot.readMapBegin()
-          for _i54 in xrange(_size50):
-            _key55 = iprot.readI32()
-            _val56 = iprot.readString()
-            self.fileSystems[_key55] = _val56
-          iprot.readMapEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 9:
-        if ftype == TType.LIST:
-          self.jobSubmissionInterfaces = []
-          (_etype60, _size57) = iprot.readListBegin()
-          for _i61 in xrange(_size57):
-            _elem62 = JobSubmissionInterface()
-            _elem62.read(iprot)
-            self.jobSubmissionInterfaces.append(_elem62)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 10:
-        if ftype == TType.LIST:
-          self.dataMovementInterfaces = []
-          (_etype66, _size63) = iprot.readListBegin()
-          for _i67 in xrange(_size63):
-            _elem68 = apache.airavata.model.data.movement.ttypes.DataMovementInterface()
-            _elem68.read(iprot)
-            self.dataMovementInterfaces.append(_elem68)
-          iprot.readListEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 11:
-        if ftype == TType.I32:
-          self.maxMemoryPerNode = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 12:
-        if ftype == TType.BOOL:
-          self.gatewayUsageReporting = iprot.readBool()
-        else:
-          iprot.skip(ftype)
-      elif fid == 13:
-        if ftype == TType.STRING:
-          self.gatewayUsageModuleLoadCommand = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 14:
-        if ftype == TType.STRING:
-          self.gatewayUsageExecutable = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 15:
-        if ftype == TType.I32:
-          self.cpusPerNode = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 16:
-        if ftype == TType.I32:
-          self.defaultNodeCount = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 17:
-        if ftype == TType.I32:
-          self.defaultCPUCount = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      elif fid == 18:
-        if ftype == TType.I32:
-          self.defaultWalltime = iprot.readI32()
-        else:
-          iprot.skip(ftype)
-      else:
-        iprot.skip(ftype)
-      iprot.readFieldEnd()
-    iprot.readStructEnd()
-
-  def write(self, oprot):
-    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
-      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
-      return
-    oprot.writeStructBegin('ComputeResourceDescription')
-    if self.computeResourceId is not None:
-      oprot.writeFieldBegin('computeResourceId', TType.STRING, 1)
-      oprot.writeString(self.computeResourceId)
-      oprot.writeFieldEnd()
-    if self.hostName is not None:
-      oprot.writeFieldBegin('hostName', TType.STRING, 2)
-      oprot.writeString(self.hostName)
-      oprot.writeFieldEnd()
-    if self.hostAliases is not None:
-      oprot.writeFieldBegin('hostAliases', TType.LIST, 3)
-      oprot.writeListBegin(TType.STRING, len(self.hostAliases))
-      for iter69 in self.hostAliases:
-        oprot.writeString(iter69)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.ipAddresses is not None:
-      oprot.writeFieldBegin('ipAddresses', TType.LIST, 4)
-      oprot.writeListBegin(TType.STRING, len(self.ipAddresses))
-      for iter70 in self.ipAddresses:
-        oprot.writeString(iter70)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.resourceDescription is not None:
-      oprot.writeFieldBegin('resourceDescription', TType.STRING, 5)
-      oprot.writeString(self.resourceDescription)
-      oprot.writeFieldEnd()
-    if self.enabled is not None:
-      oprot.writeFieldBegin('enabled', TType.BOOL, 6)
-      oprot.writeBool(self.enabled)
-      oprot.writeFieldEnd()
-    if self.batchQueues is not None:
-      oprot.writeFieldBegin('batchQueues', TType.LIST, 7)
-      oprot.writeListBegin(TType.STRUCT, len(self.batchQueues))
-      for iter71 in self.batchQueues:
-        iter71.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.fileSystems is not None:
-      oprot.writeFieldBegin('fileSystems', TType.MAP, 8)
-      oprot.writeMapBegin(TType.I32, TType.STRING, len(self.fileSystems))
-      for kiter72,viter73 in self.fileSystems.items():
-        oprot.writeI32(kiter72)
-        oprot.writeString(viter73)
-      oprot.writeMapEnd()
-      oprot.writeFieldEnd()
-    if self.jobSubmissionInterfaces is not None:
-      oprot.writeFieldBegin('jobSubmissionInterfaces', TType.LIST, 9)
-      oprot.writeListBegin(TType.STRUCT, len(self.jobSubmissionInterfaces))
-      for iter74 in self.jobSubmissionInterfaces:
-        iter74.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.dataMovementInterfaces is not None:
-      oprot.writeFieldBegin('dataMovementInterfaces', TType.LIST, 10)
-      oprot.writeListBegin(TType.STRUCT, len(self.dataMovementInterfaces))
-      for iter75 in self.dataMovementInterfaces:
-        iter75.write(oprot)
-      oprot.writeListEnd()
-      oprot.writeFieldEnd()
-    if self.maxMemoryPerNode is not None:
-      oprot.writeFieldBegin('maxMemoryPerNode', TType.I32, 11)
-      oprot.writeI32(self.maxMemoryPerNode)
-      oprot.writeFieldEnd()
-    if self.gatewayUsageReporting is not None:
-      oprot.writeFieldBegin('gatewayUsageReporting', TType.BOOL, 12)
-      oprot.writeBool(self.gatewayUsageReporting)
-      oprot.writeFieldEnd()
-    if self.gatewayUsageModuleLoadCommand is not None:
-      oprot.writeFieldBegin('gatewayUsageModuleLoadCommand', TType.STRING, 13)
-      oprot.writeString(self.gatewayUsageModuleLoadCommand)
-      oprot.writeFieldEnd()
-    if self.gatewayUsageExecutable is not None:
-      oprot.writeFieldBegin('gatewayUsageExecutable', TType.STRING, 14)
-      oprot.writeString(self.gatewayUsageExecutable)
-      oprot.writeFieldEnd()
-    if self.cpusPerNode is not None:
-      oprot.writeFieldBegin('cpusPerNode', TType.I32, 15)
-      oprot.writeI32(self.cpusPerNode)
-      oprot.writeFieldEnd()
-    if self.defaultNodeCount is not None:
-      oprot.writeFieldBegin('defaultNodeCount', TType.I32, 16)
-      oprot.writeI32(self.defaultNodeCount)
-      oprot.writeFieldEnd()
-    if self.defaultCPUCount is not None:
-      oprot.writeFieldBegin('defaultCPUCount', TType.I32, 17)
-      oprot.writeI32(self.defaultCPUCount)
-      oprot.writeFieldEnd()
-    if self.defaultWalltime is not None:
-      oprot.writeFieldBegin('defaultWalltime', TType.I32, 18)
-      oprot.writeI32(self.defaultWalltime)
-      oprot.writeFieldEnd()
-    oprot.writeFieldStop()
-    oprot.writeStructEnd()
-
-  def validate(self):
-    if self.computeResourceId is None:
-      raise TProtocol.TProtocolException(message='Required field computeResourceId is unset!')
-    if self.hostName is None:
-      raise TProtocol.TProtocolException(message='Required field hostName is unset!')
-    return
-
-
-  def __hash__(self):
-    value = 17
-    value = (value * 31) ^ hash(self.computeResourceId)
-    value = (value * 31) ^ hash(self.hostName)
-    value = (value * 31) ^ hash(self.hostAliases)
-    value = (value * 31) ^ hash(self.ipAddresses)
-    value = (value * 31) ^ hash(self.resourceDescription)
-    value = (value * 31) ^ hash(self.enabled)
-    value = (value * 31) ^ hash(self.batchQueues)
-    value = (value * 31) ^ hash(self.fileSystems)
-    value = (value * 31) ^ hash(self.jobSubmissionInterfaces)
-    value = (value * 31) ^ hash(self.dataMovementInterfaces)
-    value = (value * 31) ^ hash(self.maxMemoryPerNode)
-    value = (value * 31) ^ hash(self.gatewayUsageReporting)
-    value = (value * 31) ^ hash(self.gatewayUsageModuleLoadCommand)
-    value = (value * 31) ^ hash(self.gatewayUsageExecutable)
-    value = (value * 31) ^ hash(self.cpusPerNode)
-    value = (value * 31) ^ hash(self.defaultNodeCount)
-    value = (value * 31) ^ hash(self.defaultCPUCount)
-    value = (value * 31) ^ hash(self.defaultWalltime)
-    return value
-
-  def __repr__(self):
-    L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
-    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
-  def __eq__(self, other):
-    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
-  def __ne__(self, other):
-    return not (self == other)
+
+
+class ResourceJobManagerType(object):
+    """
+    * Enumeration of local resource job manager types supported by Airavata
+    *
+    * FORK:
+    *  Forking of commands without any job manager
+    *
+    * PBS:
+    *  Job manager supporting the Portal Batch System (PBS) protocol. Some examples include TORQUE, PBSPro, Grid Engine.
+    *
+    * SLURM:
+    *  The Simple Linux Utility for Resource Management is a open source workload manager.
+     *
+     * UGE:
+     *  Univa Grid Engine, a variation of PBS implementation.
+     *
+     * LSF:
+     *  IBM Platform Load Sharing Facility is dominantly installed on IBM clusters.
+    *
+    """
+    FORK = 0
+    PBS = 1
+    SLURM = 2
+    LSF = 3
+    UGE = 4
+    CLOUD = 5
+    AIRAVATA_CUSTOM = 6
+
+    _VALUES_TO_NAMES = {
+        0: "FORK",
+        1: "PBS",
+        2: "SLURM",
+        3: "LSF",
+        4: "UGE",
+        5: "CLOUD",
+        6: "AIRAVATA_CUSTOM",
+    }
+
+    _NAMES_TO_VALUES = {
+        "FORK": 0,
+        "PBS": 1,
+        "SLURM": 2,
+        "LSF": 3,
+        "UGE": 4,
+        "CLOUD": 5,
+        "AIRAVATA_CUSTOM": 6,
+    }
+
+
+class JobManagerCommand(object):
+    """
+    Enumeration of resource job manager commands
+
+    SUBMISSION:
+     Ex: qsub, sbatch
+
+    JOBMONITORING:
+     Ex: qstat, squeue
+
+    DELETION:
+     Ex: qdel, scancel
+
+    CHECK_JOB:
+     Detailed Status about the Job. Ex: checkjob
+
+    SHOW_QUEUE:
+     List of Queued Job by the schedular. Ex: showq
+
+    SHOW_RESERVATION:
+     List all reservations. Ex:showres, show_res
+
+    SHOW_START:
+     Display the start time of the specified job. Ex: showstart
+
+    """
+    SUBMISSION = 0
+    JOB_MONITORING = 1
+    DELETION = 2
+    CHECK_JOB = 3
+    SHOW_QUEUE = 4
+    SHOW_RESERVATION = 5
+    SHOW_START = 6
+
+    _VALUES_TO_NAMES = {
+        0: "SUBMISSION",
+        1: "JOB_MONITORING",
+        2: "DELETION",
+        3: "CHECK_JOB",
+        4: "SHOW_QUEUE",
+        5: "SHOW_RESERVATION",
+        6: "SHOW_START",
+    }
+
+    _NAMES_TO_VALUES = {
+        "SUBMISSION": 0,
+        "JOB_MONITORING": 1,
+        "DELETION": 2,
+        "CHECK_JOB": 3,
+        "SHOW_QUEUE": 4,
+        "SHOW_RESERVATION": 5,
+        "SHOW_START": 6,
+    }
+
+
+class FileSystems(object):
+    """
+    Enumeration of File Systems on the resource
+
+    FORK:
+     Forking of commands without any job manager
+
+    PBS:
+     Job manager supporting the Portal Batch System (PBS) protocol. Some examples include TORQUE, PBSPro, Grid Engine.
+
+    UGE:
+     Univa Grid Engine, a variation of PBS implementation.
+
+    SLURM:
+     The Simple Linux Utility for Resource Management is a open source workload manager.
+
+    """
+    HOME = 0
+    WORK = 1
+    LOCALTMP = 2
+    SCRATCH = 3
+    ARCHIVE = 4
+
+    _VALUES_TO_NAMES = {
+        0: "HOME",
+        1: "WORK",
+        2: "LOCALTMP",
+        3: "SCRATCH",
+        4: "ARCHIVE",
+    }
+
+    _NAMES_TO_VALUES = {
+        "HOME": 0,
+        "WORK": 1,
+        "LOCALTMP": 2,
+        "SCRATCH": 3,
+        "ARCHIVE": 4,
+    }
+
+
+class JobSubmissionProtocol(object):
+    """
+    Enumeration of Airavata supported Job Submission Mechanisms for High Performance Computing Clusters.
+
+    SSH:
+     Execute remote job submission commands using via secure shell protocol.
+
+    GRAM:
+     Execute remote jobs via Globus GRAM service.
+
+    UNICORE:
+     Execute remote jobs via Unicore services
+
+    """
+    LOCAL = 0
+    SSH = 1
+    GLOBUS = 2
+    UNICORE = 3
+    CLOUD = 4
+    SSH_FORK = 5
+    LOCAL_FORK = 6
+
+    _VALUES_TO_NAMES = {
+        0: "LOCAL",
+        1: "SSH",
+        2: "GLOBUS",
+        3: "UNICORE",
+        4: "CLOUD",
+        5: "SSH_FORK",
+        6: "LOCAL_FORK",
+    }
+
+    _NAMES_TO_VALUES = {
+        "LOCAL": 0,
+        "SSH": 1,
+        "GLOBUS": 2,
+        "UNICORE": 3,
+        "CLOUD": 4,
+        "SSH_FORK": 5,
+        "LOCAL_FORK": 6,
+    }
+
+
+class MonitorMode(object):
+    """
+    Monitoring modes
+
+    POLL_JOB_MANAGER:
+    GFac need to pull job status changes.
+
+    XSEDE_AMQP_SUBSCRIBE:
+    Server will publish job status changes to amqp servert.
+
+
+    """
+    POLL_JOB_MANAGER = 0
+    CLOUD_JOB_MONITOR = 1
+    JOB_EMAIL_NOTIFICATION_MONITOR = 2
+    XSEDE_AMQP_SUBSCRIBE = 3
+    FORK = 4
+    LOCAL = 5
+
+    _VALUES_TO_NAMES = {
+        0: "POLL_JOB_MANAGER",
+        1: "CLOUD_JOB_MONITOR",
+        2: "JOB_EMAIL_NOTIFICATION_MONITOR",
+        3: "XSEDE_AMQP_SUBSCRIBE",
+        4: "FORK",
+        5: "LOCAL",
+    }
+
+    _NAMES_TO_VALUES = {
+        "POLL_JOB_MANAGER": 0,
+        "CLOUD_JOB_MONITOR": 1,
+        "JOB_EMAIL_NOTIFICATION_MONITOR": 2,
+        "XSEDE_AMQP_SUBSCRIBE": 3,
+        "FORK": 4,
+        "LOCAL": 5,
+    }
+
+
+class DMType(object):
+    COMPUTE_RESOURCE = 0
+    STORAGE_RESOURCE = 1
+
+    _VALUES_TO_NAMES = {
+        0: "COMPUTE_RESOURCE",
+        1: "STORAGE_RESOURCE",
+    }
+
+    _NAMES_TO_VALUES = {
+        "COMPUTE_RESOURCE": 0,
+        "STORAGE_RESOURCE": 1,
+    }
+
+
+class ProviderName(object):
+    """
+    Provider name
+
+    """
+    EC2 = 0
+    AWSEC2 = 1
+    RACKSPACE = 2
+
+    _VALUES_TO_NAMES = {
+        0: "EC2",
+        1: "AWSEC2",
+        2: "RACKSPACE",
+    }
+
+    _NAMES_TO_VALUES = {
+        "EC2": 0,
+        "AWSEC2": 1,
+        "RACKSPACE": 2,
+    }
+
+
+class ResourceJobManager(object):
+    """
+    Resource Job Manager Information
+
+    resourceJobManagerType:
+     A typical HPC cluster has a single Job Manager to manage the resources.
+
+    pushMonitoringEndpoint:
+     If the job manager pushes out state changes to a database or bus, specify the service endpoint.
+      Ex: Moab Web Service, Moab MongoDB URL, AMQP (GLUE2) Broker
+
+    jobManagerBinPath:
+     Path to the Job Manager Installation Binary directory.
+
+    jobManagerCommands:
+     An enumeration of commonly used manager commands.
+
+
+    Attributes:
+     - resourceJobManagerId
+     - resourceJobManagerType
+     - pushMonitoringEndpoint
+     - jobManagerBinPath
+     - jobManagerCommands
+     - parallelismPrefix
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'resourceJobManagerId', 'UTF8', "DO_NOT_SET_AT_CLIENTS", ),  # 1
+        (2, TType.I32, 'resourceJobManagerType', None, None, ),  # 2
+        (3, TType.STRING, 'pushMonitoringEndpoint', 'UTF8', None, ),  # 3
+        (4, TType.STRING, 'jobManagerBinPath', 'UTF8', None, ),  # 4
+        (5, TType.MAP, 'jobManagerCommands', (TType.I32, None, TType.STRING, 'UTF8', False), None, ),  # 5
+        (6, TType.MAP, 'parallelismPrefix', (TType.I32, None, TType.STRING, 'UTF8', False), None, ),  # 6
+    )
+
+    def __init__(self, resourceJobManagerId=thrift_spec[1][4], resourceJobManagerType=None, pushMonitoringEndpoint=None, jobManagerBinPath=None, jobManagerCommands=None, parallelismPrefix=None,):
+        self.resourceJobManagerId = resourceJobManagerId
+        self.resourceJobManagerType = resourceJobManagerType
+        self.pushMonitoringEndpoint = pushMonitoringEndpoint
+        self.jobManagerBinPath = jobManagerBinPath
+        self.jobManagerCommands = jobManagerCommands
+        self.parallelismPrefix = parallelismPrefix
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.resourceJobManagerId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.I32:
+                    self.resourceJobManagerType = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.STRING:
+                    self.pushMonitoringEndpoint = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 4:
+                if ftype == TType.STRING:
+                    self.jobManagerBinPath = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 5:
+                if ftype == TType.MAP:
+                    self.jobManagerCommands = {}
+                    (_ktype1, _vtype2, _size0) = iprot.readMapBegin()
+                    for _i4 in range(_size0):
+                        _key5 = iprot.readI32()
+                        _val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                        self.jobManagerCommands[_key5] = _val6
+                    iprot.readMapEnd()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 6:
+                if ftype == TType.MAP:
+                    self.parallelismPrefix = {}
+                    (_ktype8, _vtype9, _size7) = iprot.readMapBegin()
+                    for _i11 in range(_size7):
+                        _key12 = iprot.readI32()
+                        _val13 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                        self.parallelismPrefix[_key12] = _val13
+                    iprot.readMapEnd()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('ResourceJobManager')
+        if self.resourceJobManagerId is not None:
+            oprot.writeFieldBegin('resourceJobManagerId', TType.STRING, 1)
+            oprot.writeString(self.resourceJobManagerId.encode('utf-8') if sys.version_info[0] == 2 else self.resourceJobManagerId)
+            oprot.writeFieldEnd()
+        if self.resourceJobManagerType is not None:
+            oprot.writeFieldBegin('resourceJobManagerType', TType.I32, 2)
+            oprot.writeI32(self.resourceJobManagerType)
+            oprot.writeFieldEnd()
+        if self.pushMonitoringEndpoint is not None:
+            oprot.writeFieldBegin('pushMonitoringEndpoint', TType.STRING, 3)
+            oprot.writeString(self.pushMonitoringEndpoint.encode('utf-8') if sys.version_info[0] == 2 else self.pushMonitoringEndpoint)
+            oprot.writeFieldEnd()
+        if self.jobManagerBinPath is not None:
+            oprot.writeFieldBegin('jobManagerBinPath', TType.STRING, 4)
+            oprot.writeString(self.jobManagerBinPath.encode('utf-8') if sys.version_info[0] == 2 else self.jobManagerBinPath)
+            oprot.writeFieldEnd()
+        if self.jobManagerCommands is not None:
+            oprot.writeFieldBegin('jobManagerCommands', TType.MAP, 5)
+            oprot.writeMapBegin(TType.I32, TType.STRING, len(self.jobManagerCommands))
+            for kiter14, viter15 in self.jobManagerCommands.items():
+                oprot.writeI32(kiter14)
+                oprot.writeString(viter15.encode('utf-8') if sys.version_info[0] == 2 else viter15)
+            oprot.writeMapEnd()
+            oprot.writeFieldEnd()
+        if self.parallelismPrefix is not None:
+            oprot.writeFieldBegin('parallelismPrefix', TType.MAP, 6)
+            oprot.writeMapBegin(TType.I32, TType.STRING, len(self.parallelismPrefix))
+            for kiter16, viter17 in self.parallelismPrefix.items():
+                oprot.writeI32(kiter16)
+                oprot.writeString(viter17.encode('utf-8') if sys.version_info[0] == 2 else viter17)
+            oprot.writeMapEnd()
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.resourceJobManagerId is None:
+            raise TProtocolException(message='Required field resourceJobManagerId is unset!')
+        if self.resourceJobManagerType is None:
+            raise TProtocolException(message='Required field resourceJobManagerType is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class BatchQueue(object):
+    """
+    Batch Queue Information on SuperComputers
+
+    maxRunTime:
+     Maximum allowed run time in hours.
+
+    Attributes:
+     - queueName
+     - queueDescription
+     - maxRunTime
+     - maxNodes
+     - maxProcessors
+     - maxJobsInQueue
+     - maxMemory
+     - cpuPerNode
+     - defaultNodeCount
+     - defaultCPUCount
+     - defaultWalltime
+     - queueSpecificMacros
+     - isDefaultQueue
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'queueName', 'UTF8', None, ),  # 1
+        (2, TType.STRING, 'queueDescription', 'UTF8', None, ),  # 2
+        (3, TType.I32, 'maxRunTime', None, None, ),  # 3
+        (4, TType.I32, 'maxNodes', None, None, ),  # 4
+        (5, TType.I32, 'maxProcessors', None, None, ),  # 5
+        (6, TType.I32, 'maxJobsInQueue', None, None, ),  # 6
+        (7, TType.I32, 'maxMemory', None, None, ),  # 7
+        (8, TType.I32, 'cpuPerNode', None, None, ),  # 8
+        (9, TType.I32, 'defaultNodeCount', None, None, ),  # 9
+        (10, TType.I32, 'defaultCPUCount', None, None, ),  # 10
+        (11, TType.I32, 'defaultWalltime', None, None, ),  # 11
+        (12, TType.STRING, 'queueSpecificMacros', 'UTF8', None, ),  # 12
+        (13, TType.BOOL, 'isDefaultQueue', None, None, ),  # 13
+    )
+
+    def __init__(self, queueName=None, queueDescription=None, maxRunTime=None, maxNodes=None, maxProcessors=None, maxJobsInQueue=None, maxMemory=None, cpuPerNode=None, defaultNodeCount=None, defaultCPUCount=None, defaultWalltime=None, queueSpecificMacros=None, isDefaultQueue=None,):
+        self.queueName = queueName
+        self.queueDescription = queueDescription
+        self.maxRunTime = maxRunTime
+        self.maxNodes = maxNodes
+        self.maxProcessors = maxProcessors
+        self.maxJobsInQueue = maxJobsInQueue
+        self.maxMemory = maxMemory
+        self.cpuPerNode = cpuPerNode
+        self.defaultNodeCount = defaultNodeCount
+        self.defaultCPUCount = defaultCPUCount
+        self.defaultWalltime = defaultWalltime
+        self.queueSpecificMacros = queueSpecificMacros
+        self.isDefaultQueue = isDefaultQueue
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.queueName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRING:
+                    self.queueDescription = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.I32:
+                    self.maxRunTime = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 4:
+                if ftype == TType.I32:
+                    self.maxNodes = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 5:
+                if ftype == TType.I32:
+                    self.maxProcessors = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 6:
+                if ftype == TType.I32:
+                    self.maxJobsInQueue = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 7:
+                if ftype == TType.I32:
+                    self.maxMemory = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 8:
+                if ftype == TType.I32:
+                    self.cpuPerNode = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 9:
+                if ftype == TType.I32:
+                    self.defaultNodeCount = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 10:
+                if ftype == TType.I32:
+                    self.defaultCPUCount = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 11:
+                if ftype == TType.I32:
+                    self.defaultWalltime = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 12:
+                if ftype == TType.STRING:
+                    self.queueSpecificMacros = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 13:
+                if ftype == TType.BOOL:
+                    self.isDefaultQueue = iprot.readBool()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('BatchQueue')
+        if self.queueName is not None:
+            oprot.writeFieldBegin('queueName', TType.STRING, 1)
+            oprot.writeString(self.queueName.encode('utf-8') if sys.version_info[0] == 2 else self.queueName)
+            oprot.writeFieldEnd()
+        if self.queueDescription is not None:
+            oprot.writeFieldBegin('queueDescription', TType.STRING, 2)
+            oprot.writeString(self.queueDescription.encode('utf-8') if sys.version_info[0] == 2 else self.queueDescription)
+            oprot.writeFieldEnd()
+        if self.maxRunTime is not None:
+            oprot.writeFieldBegin('maxRunTime', TType.I32, 3)
+            oprot.writeI32(self.maxRunTime)
+            oprot.writeFieldEnd()
+        if self.maxNodes is not None:
+            oprot.writeFieldBegin('maxNodes', TType.I32, 4)
+            oprot.writeI32(self.maxNodes)
+            oprot.writeFieldEnd()
+        if self.maxProcessors is not None:
+            oprot.writeFieldBegin('maxProcessors', TType.I32, 5)
+            oprot.writeI32(self.maxProcessors)
+            oprot.writeFieldEnd()
+        if self.maxJobsInQueue is not None:
+            oprot.writeFieldBegin('maxJobsInQueue', TType.I32, 6)
+            oprot.writeI32(self.maxJobsInQueue)
+            oprot.writeFieldEnd()
+        if self.maxMemory is not None:
+            oprot.writeFieldBegin('maxMemory', TType.I32, 7)
+            oprot.writeI32(self.maxMemory)
+            oprot.writeFieldEnd()
+        if self.cpuPerNode is not None:
+            oprot.writeFieldBegin('cpuPerNode', TType.I32, 8)
+            oprot.writeI32(self.cpuPerNode)
+            oprot.writeFieldEnd()
+        if self.defaultNodeCount is not None:
+            oprot.writeFieldBegin('defaultNodeCount', TType.I32, 9)
+            oprot.writeI32(self.defaultNodeCount)
+            oprot.writeFieldEnd()
+        if self.defaultCPUCount is not None:
+            oprot.writeFieldBegin('defaultCPUCount', TType.I32, 10)
+            oprot.writeI32(self.defaultCPUCount)
+            oprot.writeFieldEnd()
+        if self.defaultWalltime is not None:
+            oprot.writeFieldBegin('defaultWalltime', TType.I32, 11)
+            oprot.writeI32(self.defaultWalltime)
+            oprot.writeFieldEnd()
+        if self.queueSpecificMacros is not None:
+            oprot.writeFieldBegin('queueSpecificMacros', TType.STRING, 12)
+            oprot.writeString(self.queueSpecificMacros.encode('utf-8') if sys.version_info[0] == 2 else self.queueSpecificMacros)
+            oprot.writeFieldEnd()
+        if self.isDefaultQueue is not None:
+            oprot.writeFieldBegin('isDefaultQueue', TType.BOOL, 13)
+            oprot.writeBool(self.isDefaultQueue)
+            oprot.writeFieldEnd()
+        oprot.writeFieldStop()
+        oprot.writeStructEnd()
+
+    def validate(self):
+        if self.queueName is None:
+            raise TProtocolException(message='Required field queueName is unset!')
+        return
+
+    def __repr__(self):
+        L = ['%s=%r' % (key, value)
+             for key, value in self.__dict__.items()]
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class LOCALSubmission(object):
+    """
+    Locally Fork Jobs as OS processes
+
+    alternativeSSHHostName:
+     If the login to ssh is different than the hostname itself, specify it here
+
+    sshPort:
+     If a non-default port needs to used, specify it.
+
+    Attributes:
+     - jobSubmissionInterfaceId
+     - resourceJobManager
+     - securityProtocol
+    """
+
+    thrift_spec = (
+        None,  # 0
+        (1, TType.STRING, 'jobSubmissionInterfaceId', 'UTF8', "DO_NOT_SET_AT_CLIENTS", ),  # 1
+        (2, TType.STRUCT, 'resourceJobManager', (ResourceJobManager, ResourceJobManager.thrift_spec), None, ),  # 2
+        (3, TType.I32, 'securityProtocol', None, None, ),  # 3
+    )
+
+    def __init__(self, jobSubmissionInterfaceId=thrift_spec[1][4], resourceJobManager=None, securityProtocol=None,):
+        self.jobSubmissionInterfaceId = jobSubmissionInterfaceId
+        self.resourceJobManager = resourceJobManager
+        self.securityProtocol = securityProtocol
+
+    def read(self, iprot):
+        if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+            iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
+            return
+        iprot.readStructBegin()
+        while True:
+            (fname, ftype, fid) = iprot.readFieldBegin()
+            if ftype == TType.STOP:
+                break
+            if fid == 1:
+                if ftype == TType.STRING:
+                    self.jobSubmissionInterfaceId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 2:
+                if ftype == TType.STRUCT:
+                    self.resourceJobManager = ResourceJobManager()
+                    self.resourceJobManager.read(iprot)
+                else:
+                    iprot.skip(ftype)
+            elif fid == 3:
+                if ftype == TType.I32:
+                    self.securityProtocol = iprot.readI32()
+                else:
+                    iprot.skip(ftype)
+            else:
+                iprot.skip(ftype)
+            iprot.readFieldEnd()
+        iprot.readStructEnd()
+
+    def write(self, oprot):
+        if oprot._fast_encode is not None and self.thrift_spec is not None:
+            oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
+            return
+        oprot.writeStructBegin('LOCALSubmission')
+        if self.jobSubmissionInterfaceId is not None:
+            oprot.writeFieldBegin('jobSubmissionInterfaceId', TType.STRING, 1)
+            oprot

<TRUNCATED>