You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@uima.apache.org by ch...@apache.org on 2013/11/04 19:26:02 UTC

svn commit: r1538703 - in /uima/sandbox/uima-ducc/trunk: src/main/config/activemq-ducc.xml src/main/config/activemq-nojournal5.xml src/main/resources/ducc.properties uima-ducc-duccdocs/src/site/tex/duccbook/part4/admin/ducc-properties.tex

Author: challngr
Date: Mon Nov  4 18:26:01 2013
New Revision: 1538703

URL: http://svn.apache.org/r1538703
Log:
UIMA-3402 Updates for ducc.properties.

Added:
    uima/sandbox/uima-ducc/trunk/src/main/config/activemq-ducc.xml
      - copied, changed from r1537505, uima/sandbox/uima-ducc/trunk/src/main/config/activemq-nojournal5.xml
Removed:
    uima/sandbox/uima-ducc/trunk/src/main/config/activemq-nojournal5.xml
Modified:
    uima/sandbox/uima-ducc/trunk/src/main/resources/ducc.properties
    uima/sandbox/uima-ducc/trunk/uima-ducc-duccdocs/src/site/tex/duccbook/part4/admin/ducc-properties.tex

Copied: uima/sandbox/uima-ducc/trunk/src/main/config/activemq-ducc.xml (from r1537505, uima/sandbox/uima-ducc/trunk/src/main/config/activemq-nojournal5.xml)
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/src/main/config/activemq-ducc.xml?p2=uima/sandbox/uima-ducc/trunk/src/main/config/activemq-ducc.xml&p1=uima/sandbox/uima-ducc/trunk/src/main/config/activemq-nojournal5.xml&r1=1537505&r2=1538703&rev=1538703&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/src/main/config/activemq-nojournal5.xml (original)
+++ uima/sandbox/uima-ducc/trunk/src/main/config/activemq-ducc.xml Mon Nov  4 18:26:01 2013
@@ -83,7 +83,6 @@
 		  
 		  
         <transportConnectors>
-          <!-- transportConnector name="openwire" uri="tcp://0.0.0.0:61616?transport.soWriteTimeout=45000"/ -->
             <transportConnector name="openwire" uri="tcp://0.0.0.0:${DUCC_AMQ_PORT}?${DUCC_AMQ_DECORATION}"/>
         </transportConnectors>
 

Modified: uima/sandbox/uima-ducc/trunk/src/main/resources/ducc.properties
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/src/main/resources/ducc.properties?rev=1538703&r1=1538702&r2=1538703&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/src/main/resources/ducc.properties (original)
+++ uima/sandbox/uima-ducc/trunk/src/main/resources/ducc.properties Mon Nov  4 18:26:01 2013
@@ -13,12 +13,16 @@
 #
 # ====================================================================================
 
-# Identify the node where DUCC runs
+
+# Identify the node where DUCC runs. Updated by ducc_post_install. 
 ducc.head = <head-node>
 
-# Name the JVM
+# Name the JVM.  Updated by ducc_post_install
 ducc.jvm  = <full-path-to-java-command>
 
+# The name of the cluster as shown by the Web Server
+ducc.cluster.name=Apache UIMA-DUCC
+
 # Name any site-local jars
 #ducc.local.jars = 
 
@@ -44,11 +48,19 @@ ducc.jms.provider=activemq
 #
 ducc.broker.protocol=tcp
 ducc.broker.hostname=${ducc.head}
-ducc.broker.port=61616
-# >>> Explain these!
+ducc.broker.port=61617
+# Broker deocrations: JERRY JERRY JERRY
+#    maxInactivityDuration: 
+#    useCompression:
+#    closeAsync:
 ducc.broker.url.decoration=wireFormat.maxInactivityDuration=0&jms.useCompression=true&closeAsync=false
 ducc.broker.name=localhost
 ducc.broker.jmx.port=1100
+#
+# ActiveMQ credentials file used to authenticate DUCC daemons with the broker.
+# 
+ducc.broker.credentials.file=${DUCC_HOME}/activemq/conf/ducc-broker-credentials.properties
+
 # ActiveMQ Auto-management configuration
 #    docc.broker.automanage    - if true, DUCC will start and stop the broker as needed.  
 #                                Otherwise, the installation is responsible for
@@ -58,27 +70,22 @@ ducc.broker.jmx.port=1100
 #    ducc.broker.home          - If automanaged, the location of the ActiveMQ installation
 #    ducc.broker.server.url.decoration - If automanaged, the broker URL decoration
 #
+
 ducc.broker.automanage = true
-ducc.broker.memory.options = -Xmx2G
-ducc.broker.configuration = conf/activemq-nojournal5.xml
+ducc.broker.memory.options = -Xmx1G
+#JERRY JERRY JERRY pls verify the systemUsage part of activemq-ducc.xml
+ducc.broker.configuration = conf/activemq-ducc.xml
 ducc.broker.home = ${DUCC_HOME}/activemq
-ducc.broker.server.url.decoration = transport.soWriteTimeout=45000
-#
-# ActiveMQ credentials file used to authenticate DUCC daemons with the broker.
-# 
-ducc.broker.credentials.file=${DUCC_HOME}/activemq/conf/ducc-broker-credentials.properties
-
 
+# JERRY JERRY JERRY check this out
+ducc.broker.server.url.decoration = transport.soWriteTimeout=45000
 
 # To enable tracing of RM messages arriving in OR and NodeMetrics arriving in WS.
 #ducc.transport.trace = orchestrator:RmStateDuccEvent webserver:NodeMetricsUpdateDuccEvent
 
-ducc.cluster.name=Apache UIMA-DUCC
-
-#ducc.authentication.implementer=an.authentication.Manager
-
-# ducc.runmode=Test
+# ducc.authentication.implementer=an.authentication.Manager
 
+# Access UIMA message catalogs
 ducc.locale.language=en
 ducc.locale.country=us
 
@@ -88,24 +95,27 @@ ducc.locale.country=us
 # inventory and kill it. The value of the parameter below is expressed
 # in Megabytes.
 # Initially disabled by setting the threshold at 0.
+# JERRY JERRY JERRY Check units - MB or other
+#   Set this to 1GB equiv
 ducc.node.min.swap.threshold=0
 
 # administrative endpoint for all ducc components
 ducc.admin.endpoint=ducc.admin.channel
-# endpoint type choices[vm,queue,topic]
+# endpoint type choices[topic]
 ducc.admin.endpoint.type=topic
 
+
 # jmx port number for Ducc process. Each Ducc process will attempt
 # to use this port for its JMX Connector. If the port is not available
 # port+1 will be used until an available port is found.
 ducc.jmx.port=2099
 
-ducc.agent.jvm.args        = -Xmx100M
+ducc.agent.jvm.args        = -Xmx500M
 ducc.orchestrator.jvm.args = -Xmx1G
 ducc.rm.jvm.args           = -Xmx1G
 ducc.pm.jvm.args           = -Xmx1G
 ducc.sm.jvm.args           = -Xmx1G
-ducc.ws.jvm.args           = -Xmx2G
+ducc.ws.jvm.args           = -Xmx2G -Djava.util.Arrays.useLegacyMergeSort=true
 
 # ========== CLI Configuration block ==========
 # These environment values are included on job/service/AP submissions
@@ -121,15 +131,16 @@ ducc.signature.required=on
 
 # ========== Web Server Configuration block ==========
 ducc.ws.configuration.class=org.apache.uima.ducc.ws.config.WebServerConfiguration
-# Optionally configure the webserver node
-#ducc.ws.node = my.node.com
+# Optionally configure the webserver to run on a non-head node
+# ducc.ws.node = my.node.com
 # Optionally configure the webserver IP address
-#ducc.ws.ipaddress = 192.168.3.77
+# ducc.ws.ipaddress = <fill in an IP address>
 # Optionally configure the webserver IP port for HTTP requests, default is 42133
 ducc.ws.port = 42133
 # Optionally configure the webserver IP port for HTTPS requests, default is 42155
 ducc.ws.port.ssl = 42155
 # Optionally configure the webserver ssl pw for  HTTPS requests, default is quackquack
+# LOU LOU LOU change this for bluej
 ducc.ws.port.ssl.pw = quackquack
 # Optionally configure the webserver login session timeout, in minutes, default is 60
 ducc.ws.session.minutes = 60
@@ -146,17 +157,19 @@ ducc.ws.jsp.compilation.directory = /tmp
 # Specify login enabled (default is true)
 ducc.ws.login.enabled = true
 # <for Apache only>
-#uncomment this line and set pw for login to webserver, otherwise no pw required to login.
-#ducc.ws.authentication.pw = ducksoup
+# uncomment this line and set pw for login to webserver, otherwise no pw required to login.
+# LOU LOU LOU check this out
+# ducc.ws.authentication.pw = ducksoup
 # </for Apache only>
 # ========== Web Server Configuration block ==========
 
 # ========== Job Driver Configuration block ==========
 ducc.jd.configuration.class=org.apache.uima.ducc.jd.config.JobDriverConfiguration
 ducc.jd.state.update.endpoint=ducc.jd.state
-# endpoint type choices[vm,queue,topic]
+# endpoint type choices[topic]
 ducc.jd.state.update.endpoint.type=topic
 ducc.jd.state.publish.rate=15000
+# LOU LOU LOU describe this
 ducc.jd.queue.prefix=ducc.jd.queue.
 # After dispatching a work item to UIMA-AS client for processing, the number of minutes that the Job Driver will
 # wait for two callbacks (queued and assigned) before considering the work item lost. The elapsed time for the 
@@ -167,13 +180,15 @@ ducc.jd.queue.timeout.minutes=5
 #   allocation from the DUCC Resource Manager for Job Driver use. The values given below are the defaults.
 ducc.jd.host.class=JobDriver
 ducc.jd.host.description=Job Driver
-ducc.jd.host.memory.size=8GB
+#LOU LOU LOU Make your default match this
+ducc.jd.host.memory.size=2GB
 ducc.jd.host.number.of.machines=1
 ducc.jd.host.user=System
 #   The next 3 values are related - each JD is assigned a piece of the Job Driver host memory which,
 #   along with the size of the CR's type system, limits the number of active work-item CASes in a job.
 #   To avoid swapping the max heap size should also be restricted.
 # Memory size in MB allocated for each JD (default 300)
+# LOU LOU LOU BURN BURN BURN fix this jazz
 ducc.jd.share.quantum = 400
 # Max number of work-item CASes for each job (unlimited?)
 ducc.submit.threads.limit = 500
@@ -181,52 +196,61 @@ ducc.submit.threads.limit = 500
 # less than the quantum size, but the user's request may override this.
 ducc.submit.driver.jvm.args=-Xmx300M
 
-# Extra JVM args to be appended to every JP
-#ducc.submit.process.jvm.args=-XX:+HeapDumpOnOutOfMemoryError
+# BURN BURN BURN Comment this
+# ducc.submit.process.jvm.args=-XX:+HeapDumpOnOutOfMemoryError
 # ========== Job Driver Configuration block ==========
 
-
 # ========== Service Manager Configuration block ========== 
+# Class used to implement the SM - internals only, don't change.
 ducc.sm.configuration.class=org.apache.uima.ducc.sm.config.ServiceManagerConfiguration
+# Name of the AMQ topic used to communicate wiht SM - internals, don't change
 ducc.sm.state.update.endpoint=ducc.sm.state
+# AMQ protocol to use - internals, don't change
 ducc.sm.state.update.endpoint.type=topic
+# How often services are monitored to insure they're responding, in milliseconds
 ducc.sm.meta.ping.rate = 60000
+# How many consecutive missed pings are required to mark a service unresponive
 ducc.sm.meta.ping.stability =  10
-ducc.sm.meta.ping.timeout =  5000
+# How long to wait for a ping to respond, in milliseconds
+ducc.sm.meta.ping.timeout =  15000
+# Port the CLI uses to contact SM
 ducc.sm.http.port=19989
+# Node where SM resides 
 ducc.sm.http.node=${ducc.head}
+# How long to keep a reference-started service alive after the last referencing job exits
 ducc.sm.default.linger=300000
 # === END == Service Manager Configuration block ========== 
 
 # ========== Orchestrator Configuration block ==========
 ducc.orchestrator.configuration.class=org.apache.uima.ducc.orchestrator.config.OrchestratorConfiguration
 #ducc.orchestrator.checkpoint=off
+# LOU LOU LOU comment these
+#   LOU Consider pulling this one LOU
 ducc.orchestrator.checkpoint=on
 #------------------------------------------------------------------------------
-#	cold, // Recover: All is lost					JD host: employ new
-#	warm, // Recover: Reservations only (default) 	JD host: employ new
-#	hot , // Recover: Reservations and Jobs, 		JD host: employ current
+#	cold, // Recover: All is lost			         		    JD host: employ new
+#	warm, // Recover: Unmanaged Reservations only (default) 	JD host: employ new
 ducc.orchestrator.start.type=warm
 #------------------------------------------------------------------------------
 ducc.orchestrator.state.update.endpoint=ducc.orchestrator.state
-# endpoint type choices[vm,queue,topic]
+# endpoint type choices[topic]
 ducc.orchestrator.state.update.endpoint.type=topic
-ducc.orchestrator.state.publish.rate=15000
+ducc.orchestrator.state.publish.rate=10000
 #------------------------------------------------------------------------------
 ducc.orchestrator.abbreviated.state.update.endpoint=ducc.orchestrator.abbreviated.state
-# endpoint type choices[vm,queue,topic]
+# endpoint type choices[topic]
 ducc.orchestrator.abbreviated.state.update.endpoint.type=topic
-ducc.orchestrator.abbreviated.state.publish.rate=15000
+ducc.orchestrator.abbreviated.state.publish.rate=10000
 #------------------------------------------------------------------------------
 ducc.orchestrator.maintenance.rate=60000
 #------------------------------------------------------------------------------
 # orchestrator's jetty http port
 ducc.orchestrator.http.port=19988
-# !!!!!!!! Node where OR is running. This is needed by CLI
-# to compose a URL to access OR jetty server
+# Node where OR is running. This is needed by CLI  to compose a URL to access OR jetty server
 ducc.orchestrator.http.node=${ducc.head}
 #------------------------------------------------------------------------------
 #ducc.orchestrator.unmanaged.reservations.accepted=true
+# EAE EAE EAE comment this
 ducc.orchestrator.unmanaged.reservations.accepted=true
 # ========== Orchestrator Configuration block ==========
 
@@ -237,22 +261,17 @@ ducc.rm.state.update.endpoint=ducc.rm.st
 # If enabled, RM tries to start as soon as it recoveres state from an OR publication,
 # instread of waiting for init.stability for nodes to check in. 
 ducc.rm.state.update.endpoint.type=topic
-# This is the scheduling epoch in milliseconds. We publish at the end of each epoch.
-ducc.rm.fast.recovery = false
-# endpoint type choices[vm,queue,topic]
-ducc.rm.state.publish.rate = 60000
+# Frequency in milliseconds the RM publishes state.  Increase to 60000 for larger systems
+# to avoid thrashing.
+ducc.rm.state.publish.rate = 10000
 # Base size of dram quantum in Gb
 ducc.rm.share.quantum = 1
 # Implementation class for actual scheduling algorithm
 ducc.rm.scheduler = org.apache.uima.ducc.rm.scheduler.NodepoolScheduler
 # File defining thescheduler classes - found in DUCC_HOME/resources
 ducc.rm.class.definitions = ducc.classes
-# default number of questions if not specified in job
-ducc.rm.default.tasks = 10
-# default memory, in GB, if not specified in job
-ducc.rm.default.memory = 15
-#default number of threads, if not specified in job
-ducc.rm.default.threads = 4
+# default memory, in GB, if not specified 
+ducc.rm.default.memory = 4
 # number of node metrics heartbeats to wait for before rm starts up
 ducc.rm.init.stability = 2
 # number of missed node metrics updates to consider node down
@@ -260,7 +279,7 @@ ducc.rm.node.stability = 5
 # which policy to use when shrinking/evicting shares - alternatively, SHRINK_BY_MACHINE
 ducc.rm.eviction.policy = SHRINK_BY_INVESTMENT
 # max nodes to initially allocate until init is complete
-ducc.rm.initialization.cap = 2
+ducc.rm.initialization.cap = 1
 # When true, jobs expand not all at once after init, but a bit slower, doubling each epoch
 # until max fair-share is set.  If false, jobs increase immediately to their fair share,
 # at the cost of mass evictions.
@@ -269,23 +288,19 @@ ducc.rm.expand.by.doubling = true
 ducc.rm.prediction = true
 # Add this fudge factor (milliseconds) to the expansion target when using prediction
 ducc.rm.prediction.fudge = 120000
-# If enabled, RM insures every job has a "foot in the door", some minimum number of processes, and if not
-# attempts to find space for under-provisioned jobs by taking shares from "rich" jobs.  Note this may
-# not always be possible if the cluster itseslf is under-provisioned for the load.
-ducc.rm.defragmentation = true
 # What is minimum number of processes for a job before we do defrag? If a job has less than this amount
 # RM may attempt defragmentation to bring the processes up to this value.
-ducc.rm.fragmentation.threshold = 2
+ducc.rm.fragmentation.threshold = 8
 
 # Agent Configuration block
 ducc.agent.configuration.class=org.apache.uima.ducc.agent.config.AgentConfiguration
 ducc.agent.request.endpoint=ducc.agent
-# endpoint type choices[vm,queue,topic]
+# endpoint type choices[topic]
 ducc.agent.request.endpoint.type=topic
 
 # Endpoint where uima as service wrapper reports status
 ducc.agent.managed.process.state.update.endpoint=ducc.managed.process.state.update
-# endpoint type choices[vm,queue,topic,socket]
+# endpoint type choices[socket]
 ducc.agent.managed.process.state.update.endpoint.type=socket
 # endpoint parameters which are transport specific. For socket
 # transport params are:
@@ -293,9 +308,9 @@ ducc.agent.managed.process.state.update.
 #  - synch=false - use socket transport for one-way messaging (no replies neeeded)
 ducc.agent.managed.process.state.update.endpoint.params=transferExchange=true&sync=false
 
-ducc.agent.node.metrics.publish.rate=60000
+ducc.agent.node.metrics.publish.rate=30000
 ducc.agent.node.metrics.endpoint=ducc.node.metrics
-# endpoint type choices[vm,queue,topic]
+# endpoint type choices[topic]
 ducc.agent.node.metrics.endpoint.type=topic
 # Rate at which an agent publishes its process inventory
 # The agent will only publish at this rate if there are
@@ -304,17 +319,18 @@ ducc.agent.node.inventory.publish.rate=1
 # If no changes in inventory, publish every 30 intervals defined by ducc.agent.node.inventory.publish.rate
 ducc.agent.node.inventory.publish.rate.skip=30
 ducc.agent.node.inventory.endpoint=ducc.node.inventory
-# endpoint type choices[vm,queue,topic]
+# endpoint type choices[topic]
 ducc.agent.node.inventory.endpoint.type=topic
+# JERRY JERRY JERRY Check this out
 ducc.agent.launcher.thread.pool.size=10
 # enable/disable use of ducc_ling
+# JERRY JERRY JERRY what is the default
 ducc.agent.launcher.use.ducc_spawn=true
 # specify location of ducc_ling in the filesystem
 ducc.agent.launcher.ducc_spawn_path=${DUCC_HOME}/admin/ducc_ling
-#ducc.agent.simulation.machine.config = resources/hw4.config
 # Max amount of time (in millis) agent allows the process to stop before issuing kill -9
 ducc.agent.launcher.process.stop.timeout=60000
-# Max tim in millis allowed for AE initialization. Default 2 hours 7200000.                                                                 
+# Max time in millis allowed for AE initialization. Default 2 hours 7200000.                                                                 
 ducc.agent.launcher.process.init.timeout=7200000
 # exclude the following user ids while detecting rogue processes
 ducc.agent.rogue.process.user.exclusion.filter=root,postfix,ntp,nobody,daemon,100
@@ -334,7 +350,7 @@ ducc.agent.launcher.cgroups.enable=true
 ducc.agent.launcher.cgroups.utils.dir=/usr/bin,/bin
 # exclusion file to enable node based exclusion for cgroups and aps
 # syntax:  <node>=cgroups,ap
-# the above will exclude node from using cgroups and prevent deployment of APs
+# the above will exclude node from using cgroups and/or prevent deployment of APs
 ducc.agent.exclusion.file=${DUCC_HOME}/resources/exclusion.nodes
 #
 # Uncomment the following line to support auto reaping of rogue processes by Ducc's Agent
@@ -343,18 +359,18 @@ ducc.agent.exclusion.file=${DUCC_HOME}/r
 # Process Manager Configuration block
 ducc.pm.configuration.class=org.apache.uima.ducc.pm.config.ProcessManagerConfiguration
 ducc.pm.request.endpoint=ducc.pm
-# endpoint type choices[vm,queue,topic]
+# endpoint type choices[queue]
 ducc.pm.request.endpoint.type=queue
 #------------------------------------------------------------------------------
 ducc.pm.state.update.endpoint=ducc.pm.state
-# endpoint type choices[vm,queue,topic]
+# endpoint type choices[topic]
 ducc.pm.state.update.endpoint.type=topic
-ducc.pm.state.publish.rate=25000
+ducc.pm.state.publish.rate=15000
 
 # UIMA AS Managed Process Configuration block
 ducc.uima-as.configuration.class=org.apache.uima.ducc.agent.deploy.uima.UimaAsServiceConfiguration
 ducc.uima-as.endpoint=ducc.job.managed.service
-# endpoint type choices[vm,queue,topic,socket]
+# endpoint type choices[socket]
 ducc.uima-as.endpoint.type=socket
 # endpoint parameters which are transport specific. For socket
 # transport params are:
@@ -362,10 +378,12 @@ ducc.uima-as.endpoint.type=socket
 #  - synch=false - use socket transport for one-way messaging (no replies neeeded)
 ducc.uima-as.endpoint.params=transferExchange=true&sync=false
 
-
+# JERRY JERRY JERRY Document these
 ducc.uima-as.saxon.jar.path=file:${DUCC_HOME}/lib/saxon/saxon8.jar
 ducc.uima-as.dd2spring.xsl.path=${DUCC_HOME}/resources/dd2spring.xsl
-# custom Flow Controller to use for Ducc Job Processes
+
+# Custom Flow Controller to use for Ducc Job Processes
+# LOU LOU LOU Rename this to ducc.flow-controller.specfier 
 ducc.uima-as.flow-controller.specifier=org.apache.uima.ducc.common.uima.DuccJobProcessFC
 
 

Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-duccdocs/src/site/tex/duccbook/part4/admin/ducc-properties.tex
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-duccdocs/src/site/tex/duccbook/part4/admin/ducc-properties.tex?rev=1538703&r1=1538702&r2=1538703&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-duccdocs/src/site/tex/duccbook/part4/admin/ducc-properties.tex (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-duccdocs/src/site/tex/duccbook/part4/admin/ducc-properties.tex Mon Nov  4 18:26:01 2013
@@ -668,11 +668,10 @@
         \end{description}
         
       \item[ducc.sm.instance.failure.max] \hfill \\
-        This is the maximum number of consecutive failures of a service instance permitted before DUCC
-        stops creating new instances.  In the case of submitted services, the instance is no longer
-        restarted and is cleaned up.  In the case of registered services, no more instances are started
-        and the {\em autostart} flag is turned off.  The next manual {\em start} command resets the
-        count to 0.
+        This is the maximum number of consecutive failures of service instance initialization 
+        permitted before DUCC stops creating new instances.  When this cap is hit the SM
+        will disable autostart for the service.  It may be overridden by the service
+        registration's {\em instance\_failures\_limit} parameter.
         \begin{description}
           \item[Default Value] 5
           \item[Type] Tuning 
@@ -834,20 +833,15 @@
         \item[ducc.rm.state.publish.rate] \hfill \\
           This is the rate, in milliseconds, at which the Resource Manager publishes its state to the 
           Orchestrator. 
-          \begin{description}
-            \item[Default Value] 60000 
-            \item[Type] Tuning
-          \end{description} 
-          
-        \item[ducc.rm.fast.recovery] \hfill \\
-          If enabled, RM tries to start as soon as it recoveres state from an OR publication,
-          instread of waiting for {\em init.stability} for nodes to check in. 
 
+          This can directly affect user response.  For small clusters a value of 10000 is often acceptable
+          and results in faster response.  On larger clusters the value should be raised to around
+          60000 (60 seconds) to avoid thrashing.
           \begin{description}
-            \item[Default Value] false
+            \item[Default Value] 10000 
             \item[Type] Tuning
           \end{description} 
-          
+                    
         \item[ducc.rm.share.quantum] \hfill \\
           The share quantum is the smallest amount of RAM that is schedulable for jobs, in GB. 
           Jobs are scheduled based entirely on their memory requirements. Memory is allocated in 
@@ -875,42 +869,6 @@
             \item[Type] Tuning 
           \end{description}
           
-        \item[ducc.rm.default.tasks] \hfill \\
-          In order to calculate the number of processes to allocate to a job, the scheduler must know 
-          how many tasks or work items the job will execute. If the job does not declare that number, 
-          default.tasks is used. 
-          \begin{description}
-            \item[Default Value] 10 
-            \item[Type] Tuning 
-          \end{description}
-          
-        \item[ducc.rm.default.memory] \hfill \\
-          If a job does not declare the amount of memory each process requires, the scheduler uses 
-          default.memory for scheduling. The unit is GB. 
-
-          Note that the Agents enforce the declared memory, so if a process understates its 
-          requirements it will generally be killed. 
-          \begin{description}
-            \item[Default Value] 15 
-            \item[Type] Tuning 
-          \end{description}
-          
-        \item[ducc.rm.default.threads] \hfill \\
-          Each job process will be dispatched with some number of threads such that DUCC will 
-          dispatch work items to these threads. The scheduler uses this number to calculate the 
-          number of processes that must be allocated. 
-
-          The maximum number of processes a job requites is determined by the formula:           
-          $num\_processes = ciel(num\_work\_items / num\_threads)$ 
-          
-          Thus, a job that declares 100 work items and 4 threads is assigned a maximum of          
-          $ciel(100/4) = 25 processes$
-          
-          \begin{description}
-            \item[Default Value] 4 
-            \item[Type] Tuning 
-          \end{description}
-          
         \item[ducc.rm.node.stability] \hfill \\
           The RM receives regular "heartbeats" from the DUCC agents in order to know what 
           nodes are available for scheduling. The node.stability property configures the number of 
@@ -984,7 +942,7 @@
           \hyperref[sec:ducc.classes]{ducc.classes}.
 
           \begin{description}
-            \item[Default Value] 2             
+            \item[Default Value] 1
             \item[Type] Tuning 
           \end{description}
           
@@ -1038,23 +996,7 @@
           \item[Default Value] 120000
           \item[Type] Tuning 
           \end{description}
-          
-        \item[ducc.rm.defragmentation] \hfill \\
-          In certain configurations and under certain loads the resource allocations can get
-          ``fragmented'' so that sufficient resources exists for new work, but only piecemeal, and
-          thus they cannot be allocated.  The Resource Manager will perform a limited defragmentation
-          by searching for ``rich'' jobs (jobs with lots of resources) and evicting one or two
-          procsses in order to make spece for new jobs.  Sufficient space is cleared only to
-          allow as much new work as possible to ``get a foot in the door'' and get an initial
-          resource allocation.
-
-          Local installations may override this behaviour and prevent defragmentation altogether
-          with this property.
-          \begin{description}
-          \item[Default Value] true
-          \item[Type] Tuning 
-          \end{description}
-          
+                    
 
         \item[ducc.rm.defragmentation.threshold] \hfill \\
           If {\em ducc.rm.defragmentation} is enable, limited defragmentation of resources is