You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@uima.apache.org by ch...@apache.org on 2014/11/18 20:57:03 UTC
svn commit: r1640415 - in /uima/sandbox/uima-ducc/trunk:
uima-ducc-common/src/main/java/org/apache/uima/ducc/common/
uima-ducc-common/src/test/java/org/apache/uima/ducc/common/test/
uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/
Author: challngr
Date: Tue Nov 18 19:57:03 2014
New Revision: 1640415
URL: http://svn.apache.org/r1640415
Log:
UIMA-4065 Complete nested nodepools first cut.
Modified:
uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/main/java/org/apache/uima/ducc/common/NodeConfiguration.java
uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/test/java/org/apache/uima/ducc/common/test/NodeConfigurationTest.java
uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/IEntity.java
uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodePool.java
uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodepoolScheduler.java
uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/ResourceClass.java
uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/RmJob.java
uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/Scheduler.java
uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/User.java
Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/main/java/org/apache/uima/ducc/common/NodeConfiguration.java
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/main/java/org/apache/uima/ducc/common/NodeConfiguration.java?rev=1640415&r1=1640414&r2=1640415&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/main/java/org/apache/uima/ducc/common/NodeConfiguration.java (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/main/java/org/apache/uima/ducc/common/NodeConfiguration.java Tue Nov 18 19:57:03 2014
@@ -448,6 +448,7 @@ public class NodeConfiguration
return;
}
+ ArrayList<DuccProperties> independentNodepools = new ArrayList<DuccProperties>();
DuccProperties parseNodepool(String name, String parent)
throws IOException,
IllegalConfigurationException
@@ -491,6 +492,10 @@ public class NodeConfiguration
// duplicates will be checked later
ret.put("nodefile", ducc_nodes);
}
+ if ( ! ret.containsKey("parent") ) {
+ // System.out.println("Add top level nodepool: " + name);
+ independentNodepools.add(ret);
+ }
return ret;
}
@@ -580,7 +585,6 @@ public class NodeConfiguration
}
Map<String, DuccProperties> clmap = new HashMap<String, DuccProperties>();
- ArrayList<DuccProperties> independentNodepools = new ArrayList<DuccProperties>();
ArrayList<String> independentClasses = new ArrayList<String>();
/**
@@ -738,13 +742,8 @@ public class NodeConfiguration
for ( DuccProperties p : nodepools.values() ) {
String parent = p.getStringProperty("parent", null);
String name = p.getStringProperty("name");
- if ( name.equals("npA") ) {
- int stop_here =1 ;
- stop_here++;
- }
- if ( parent == null ) {
- independentNodepools.add(p);
- } else {
+
+ if ( parent != null ) {
DuccProperties par_pool = nodepools.get(parent);
if ( par_pool == null ) {
throw new IllegalConfigurationException("Nodepool " + name+ " parent pool " + parent + " cannot be found.");
@@ -877,24 +876,45 @@ public class NodeConfiguration
* If nothing throws then allNodePools has a map of all node pool files to read and the nodepool props file to attach them to
* @param p
*/
- void checkForDuplicatePoolFiles(DuccProperties p)
+ void checkForDuplicatePoolFiles()
throws IllegalConfigurationException
{
- String npfile = p.getProperty("nodefile");
- if ( poolsByNodefile.containsKey(npfile) ) {
- throw new IllegalConfigurationException("Duplicate nodepool file reference to " + npfile + " from " + p.getProperty("name") + " not allowed "
- + " first reference was from " + poolsByNodefile.get(npfile));
-
+ for ( DuccProperties dp : nodepools.values() ) {
+ String npfile = dp.getProperty("nodefile");
+ if ( poolsByNodefile.containsKey(npfile) ) {
+ throw new IllegalConfigurationException("Duplicate nodepool file reference to " + npfile + " from " + dp.getProperty("name") + " not allowed "
+ + " first reference was from " + poolsByNodefile.get(npfile));
+
+ }
+ if ( npfile != null ) { // pools are not required to have nodes associated, e.g. --default--
+ poolsByNodefile.put(npfile, dp);
+ }
}
- if ( npfile != null ) { // pools are not required to have nodes associated, e.g. --default--
- poolsByNodefile.put(npfile, p);
+ }
+
+ void checkForMissingNodeFile()
+ throws IllegalConfigurationException
+ {
+ List<String> missing = new ArrayList<String>();
+ for ( DuccProperties dp : nodepools.values() ) {
+ if ( ! dp.containsKey("nodefile") ) {
+ missing.add(dp.getProperty("name")); // remember, for possible exception below
+
+ // No nodefile, assign it ducc_nodes
+ // it will crash in a while if this is a conflict
+ if ( ducc_nodes != null ) {
+ dp.setProperty("nodefile", ducc_nodes);
+ }
+ }
}
- @SuppressWarnings("unchecked")
- List<DuccProperties> children = (List<DuccProperties>) p.get("children");
- if ( children != null ) {
- for ( DuccProperties dp : children ) {
- checkForDuplicatePoolFiles(dp);
+
+ if ( missing.size() > 1 ) {
+ StringBuffer sb = new StringBuffer("Multiple nodepools with no associated node file, not allowed: ");
+ for (String s : missing) {
+ sb.append(" ");
+ sb.append(s);
}
+ throw new IllegalConfigurationException(sb.toString());
}
}
@@ -944,48 +964,19 @@ public class NodeConfiguration
throws IllegalConfigurationException
{
- for (DuccProperties p : independentNodepools) { // walk the tree and read the node files
- checkForDuplicatePoolFiles(p);
- }
-
+ checkForMissingNodeFile(); // only one nodepool may be missing its node file
+ // also fills in default nodefile if needed
+ checkForDuplicatePoolFiles(); // only one pool may reference any single file
+
// if we get here without crash the node pool files are not inconsistent
for ( String k : poolsByNodefile.keySet() ) {
readNodepoolNodes(k, (DuccProperties) poolsByNodefile.get(k), domain);
}
+ // TODO: Test above procedures
+ // Assign ducc.nodes to the one allowable top level np with no pool file
-
- // depth-first search, required to deal correctly with nesting
- // @SuppressWarnings("unchecked")
-// List<DuccProperties> children = (List<DuccProperties>) p.get("children");
-// if ( children != null ) {
-// for ( DuccProperties dp : children ) {
-// readNpNodes(dp, domain);
-// }
-// }
-
-// String npfile = p.getProperty("nodefile");
-// if ( npfile != null ) {
-// p.put("nodes", readNodepoolFile(npfile, p.getProperty("name"), domain));
-// }
}
-// void readNodefile(DuccProperties p, String domain)
-// throws IllegalConfigurationException
-// {
-// String npfile = p.getProperty("nodefile");
-// if ( npfile != null ) {
-// p.put("nodes", readNodepoolFile(npfile, domain, false));
-// }
-
-// @SuppressWarnings("unchecked")
-// List<DuccProperties> children = (List<DuccProperties>) p.get("children");
-// if ( children != null ) {
-// for ( DuccProperties pc : children ) {
-// readNodefile(pc, domain);
-// }
-// }
-// }
-
/**
* Read the complete node configuration as defined in. Intended for use from command line, not
* usually elsewhere.
@@ -1090,6 +1081,7 @@ public class NodeConfiguration
sb.append(leader);
for ( String s : nodes.keySet() ) {
+ if ( s.indexOf(".") >= 0 ) continue; // skip domains to make it more readable
if ( cur_width + s.length() + 1 > MAX_WIDTH) {
sb.append("\n");
sb.append(leader);
Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/test/java/org/apache/uima/ducc/common/test/NodeConfigurationTest.java
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/test/java/org/apache/uima/ducc/common/test/NodeConfigurationTest.java?rev=1640415&r1=1640414&r2=1640415&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/test/java/org/apache/uima/ducc/common/test/NodeConfigurationTest.java (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-common/src/test/java/org/apache/uima/ducc/common/test/NodeConfigurationTest.java Tue Nov 18 19:57:03 2014
@@ -32,14 +32,14 @@ public class NodeConfigurationTest
// test name, short description, expected rc
String[] configurations = {
- "test1", "Basic configuration test" , "0",
- "test2", "Test Illegal Nodepool confuration" , "0",
- "test3", "Circular references" , "1",
- "test4", "Duplicate Nodepools" , "1",
- "test5", "Missing parent" , "1",
- "test6", "Toplevel NP, parent is not --default--", "0",
- "test7", "Class references non-existent NP" , "1",
- "test8", "Two NPs with no node file specified" , "1",
+ "test1", "Basic configuration test" , "0", // pass
+ "test2", "Multiple children" , "0", // pass
+ "test3", "Circular references" , "1", // fail
+ "test4", "Duplicate Nodepools" , "1", // fail
+ "test5", "Missing parent" , "1", // fail
+ "test6", "Toplevel NP, parent is not --default--" , "0", // pass
+ "test7", "Class references non-existent NP" , "1", // fail
+ "test8", "Two NPs with no node file specified" , "1", // fail
};
List<String> successes = new ArrayList<String>();
@@ -92,10 +92,6 @@ public class NodeConfigurationTest
int rc = 0;
System.out.println("-------------------------------------- Run Test " + testid + " -----------------------------------------------------");
- if ( testid.equals("test3")) {
- int stop_here = 0;
- stop_here++;
- }
if ( (rc = runConfiguration(testid)) == expected) {
successes.add(testid + ": " + desc + "; expected rc=" + expected + " actual rc=" + rc);
} else {
Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/IEntity.java
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/IEntity.java?rev=1640415&r1=1640414&r2=1640415&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/IEntity.java (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/IEntity.java Tue Nov 18 19:57:03 2014
@@ -54,6 +54,8 @@ interface IEntity
void setGivenByOrder(int[] gbo); // the scheduler uses this to set the allocation after each
// scheduling round
+ boolean canUseBonus(int order); // can I use one more share of this size
+
int calculateCap(int order, int basis); // The entity must work out any caps that may restrict the counts
long getTimestamp(); // for tiebreaks
Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodePool.java
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodePool.java?rev=1640415&r1=1640414&r2=1640415&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodePool.java (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodePool.java Tue Nov 18 19:57:03 2014
@@ -61,6 +61,7 @@ class NodePool
HashMap<Node, Machine> preemptables = new HashMap<Node, Machine>(); // candidates for preemption for reservations
int total_shares = 0;
+ Map<ResourceClass, ResourceClass> allClasses = new HashMap<ResourceClass, ResourceClass>(); // all the classes directly serviced by me
//
// There are "theoretical" shares based on actual capacities of
// the machines. They are used for the "how much" part of the
@@ -116,6 +117,11 @@ class NodePool
}
+ void addResourceClass(ResourceClass cl)
+ { // UIMA-4065
+ allClasses.put(cl, cl);
+ }
+
NodePool getParent()
{
return this.parent;
@@ -145,6 +151,31 @@ class NodePool
return count;
}
+ //
+ // Note, this will only be accurate AFTER reset, but before actuall allocation of
+ // shares begins. After allocation, and before the next reset this will return junk.
+ //
+ // It is intended to be called from ResourceClass.canUseBonus()
+ // UIMA-4065
+ int countAssignableShares(int order)
+ {
+ String methodName = "countAssignableShares";
+ // first calculate my contribution
+ int ret = nSharesByOrder[order];
+ for (ResourceClass rc : allClasses.values() ) {
+ int[] gbo = rc.getGivenByOrder();
+ if ( gbo != null ) {
+ ret -= gbo[order];
+ }
+ }
+ logger.info(methodName, null, "Shares available for", id, ":", ret);
+ // now accumulate the kid's contribution
+ for ( NodePool np : children.values() ) {
+ ret += np.countAssignableShares(order);
+ }
+ return ret;
+ }
+
void removeShare(Share s)
{
allShares.remove(s);
@@ -281,7 +312,7 @@ class NodePool
}
/**
- * Returns N-Shares
+ * Returns N-Shares, recursing down
*/
int countNSharesByOrder(int o)
{
@@ -293,6 +324,14 @@ class NodePool
}
/**
+ * Returns N-Shares, local
+ */
+ int countLocalNSharesByOrder(int o)
+ {
+ return nSharesByOrder[o];
+ }
+
+ /**
* Returns number of N-shares that are still busy but pending eviction.
*/
int countPendingSharesByOrder(int o)
@@ -653,7 +692,15 @@ class NodePool
void resetPreemptables()
{
+ String methodName = "resetPreemptables";
+ logger.info(methodName, null, "Resetting preemptables in nodepool", id);
+
+ // UIMA-4064 Need to do this recrsively
preemptables.clear();
+ for ( NodePool np : children.values() ) {
+ np.resetPreemptables();
+ }
+
}
@@ -667,7 +714,8 @@ class NodePool
}
for ( NodePool np : children.values() ) {
- if (np.getSubpool(name) != null) return np;
+ NodePool ret = np.getSubpool(name);
+ if (ret != null) return ret;
}
return null;
}
@@ -962,7 +1010,9 @@ class NodePool
*/
int countFreeableMachines(IRmJob j, boolean enforce)
{
+ String methodName = "countFreeableMachines";
+ logger.info(methodName, j.getId(), "Enter nodepool", id, "with enforce", enforce, "preemptables.size() =", preemptables.size());
int needed = j.countInstances();
int order = j.getShareOrder();
@@ -976,29 +1026,44 @@ class NodePool
} else {
machs.addAll(allMachines.values());
}
+ StringBuffer sb = new StringBuffer("Machines to search:");
+ for ( Machine m : machs ) {
+ sb.append(" ");
+ sb.append(m.getId());
+ }
+ logger.info(methodName, j.getId(), sb.toString());
+
Collections.sort(machs, new MachineByAscendingOrderSorter());
int given = 0; // total to give, free or freeable
Iterator<Machine> iter = machs.iterator();
ArrayList<Machine> pables = new ArrayList<Machine>();
+
while ( iter.hasNext() && (given < needed) ) {
Machine m = iter.next();
+ logger.info(methodName, j.getId(), "Examining", m.getId());
if ( preemptables.containsKey(m.key()) ) { // already counted, don't count twice
+ logger.info(methodName, j.getId(), "Bypass because machine", m.getId(), "already counted.");
continue;
}
if ( m.getShareOrder() < order ) {
+ logger.info(methodName, j.getId(), "Bypass because machine", m.getId(), "order", m.getShareOrder(), "less than required", order);
continue;
}
if ( m.isFree() ) {
+ logger.info(methodName, j.getId(), "Giving", m.getId(), "because it is free");
given++;
continue;
}
if ( m.isFreeable() ) {
+ logger.info(methodName, j.getId(), "Giving", m.getId(), "because it is freeable");
given++;
pables.add(m);
+ } else {
+ logger.info(methodName, j.getId(), "Bypass because machine", m.getId(), "is not freeable");
}
}
@@ -1014,6 +1079,7 @@ class NodePool
}
for ( Machine m : pables ) {
+ logger.info(methodName, j.getId(), "Setting up", m.getId(), "to clear for reservation");
preemptables.put(m.key(), m);
nMachinesByOrder[m.getShareOrder()]--;
}
@@ -1115,13 +1181,17 @@ class NodePool
j.shrinkByOne(s);
nPendingByOrder[order]++;
} else {
- // This is 99.44% caused by fragmentation. We could force the issue here, but instead will
- // defer to the defrag code which will try to find better candidates for eviction.
- logger.warn(methodName, null, "Found non-preemptable share on machine that should be clearable (possible fragmentation):", s);
+ // if the share was evicted or purged we don't care. otherwise, it SHOULD be evictable so we
+ // log its state to try to figure out why it didn't evict
+ if ( ! (s.isEvicted() || s.isPurged() ) ) {
+ IRmJob j = s.getJob();
+ logger.warn(methodName, j.getId(), "Found non-preemptable share", s.getId(), "fixed:", s.isFixed(), "j.NShares", j.countNShares(), "j.NSharesGiven", j.countNSharesGiven());
+ }
}
}
given++;
iter.remove();
+ logger.info(methodName, null, "Remove", m.getId(), "from preemptables list");
}
return given;
@@ -1323,7 +1393,9 @@ class NodePool
logger.debug(methodName, null, getId(), "NeededByOrder", type, "on entrance eviction", Arrays.toString(neededByOrder));
for ( NodePool np : getChildrenDescending() ) {
+ logger.info(methodName, null, "Recurse to", np.getId(), "from", getId(), "force:", force);
np.doEvictionsByMachine(neededByOrder, force);
+ logger.info(methodName, null, "Recurse from", np.getId(), "proceed with logic for", getId(), "force", force);
}
//
@@ -1331,7 +1403,12 @@ class NodePool
// number of shares that already are free
//
for ( int nbo = maxorder; nbo > 0; nbo-- ) {
- int needed = Math.max(0, neededByOrder[nbo] - countNSharesByOrder(nbo) - countPendingSharesByOrder(nbo));
+ // UIMA-4065 - I think that subtracting countPendingSharesByOrder() amounts to double counting because it
+ // will reflect any evictions from the depth-first recursion. Instead, we would subtract only
+ // our own shares.
+ //
+ // int needed = Math.max(0, neededByOrder[nbo] - countNSharesByOrder(nbo) - countPendingSharesByOrder(nbo));
+ int needed = Math.max(0, neededByOrder[nbo] - countNSharesByOrder(nbo) - nPendingByOrder[nbo]);
neededByOrder[nbo] = needed;
neededByOrder[0] += needed;
}
Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodepoolScheduler.java
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodepoolScheduler.java?rev=1640415&r1=1640414&r2=1640415&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodepoolScheduler.java (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/NodepoolScheduler.java Tue Nov 18 19:57:03 2014
@@ -256,6 +256,7 @@ public class NodepoolScheduler
/**
* Return the nodepool for a class, or the global nodepool if none is explicitly associated with the class.
+ * @deprecated Remove as soon as it is verified corrcct.
*/
NodePool getNodepool(ResourceClass rc)
{
@@ -527,20 +528,36 @@ public class NodepoolScheduler
while ( (nshares[1] > 0) && (given)) {
given = false;
for ( IEntity e : entities ) {
- int[] wbo = e.getWantedByOrder(); // nshares
+ //int[] wbo = e.getWantedByOrder(); // nshares
int[] gbo = e.getGivenByOrder(); // nshares
for ( int o = maxorder; o > 0; o-- ) {
- int canuse = wbo[o] - gbo[o];
- while ( (canuse > 0 ) && (vshares[o] > 0) ) {
+ // the entity access its wbo, gbo, and entity-specific knowledge to decide whether
+ // the bonus is usable. if so, we give out exactly one in an attempt to spread the wealth.
+ //
+ // An example of where you can't use, is a class over a nodepool whose resources
+ // are exhausted, in which case we'd loop and see if anybody else was game.
+ // UIMA-4065
+ while ( (e.canUseBonus(o) ) && (vshares[o] > 0) ) {
gbo[o]++;
- //bonus++;
- canuse = wbo[o] - gbo[o];
removeSharesByOrder(vshares, nshares, 1, o);
given = true;
break;
}
}
+
+ // UIMA-4605 - old 'bonus' code -- keep for a while as quick reference
+ // for ( int o = maxorder; o > 0; o-- ) {
+ // int canuse = wbo[o] - gbo[o];
+ // while ( (canuse > 0 ) && (vshares[o] > 0) ) {
+ // gbo[o]++;
+ // //bonus++;
+ // canuse = wbo[o] - gbo[o];
+ // removeSharesByOrder(vshares, nshares, 1, o);
+ // given = true;
+ // break;
+ // }
+ // }
}
}
@@ -570,11 +587,23 @@ public class NodepoolScheduler
* Count out shares for only the jobs in the ResouceClasses here, and only from the given
* nodepool.
*/
- protected void countClassShares(NodePool np, ArrayList<ResourceClass> rcs)
+ protected void countClassShares(NodePool np, List<ResourceClass> rcs)
{
String methodName = "countClassShares";
- logger.debug(methodName, null, "Counting for nodepool", np.getId());
+ if ( logger.isDebug() ) {
+ StringBuffer sb = new StringBuffer("Counting for nodepool ");
+ sb.append(np.getId());
+ sb.append(" - classes -");
+ for ( ResourceClass rc : rcs ) {
+ sb.append(" ");
+ sb.append(rc.getName());
+ }
+
+ logger.debug(methodName, null, sb.toString());
+ }
+ // if ( true ) return;
+
// pull the counts. these don't get updated by the counting routines per-se. after doing counting the np's are
// expected to do the 'what-of' calculations that do acutall allocation and which update the counts
int[] vshares = np.cloneVMachinesByOrder();
@@ -685,48 +714,106 @@ public class NodepoolScheduler
}
/**
- * Depth-first traversal of the nodepool. Once you get to a leaf, count the shares. This sets an
- * upper-bound on the number of shares a class can have. As you wind back up the tree the counts may
- * be reduced because of competition from jobs in the parent node. By the time we're dont we should have
- * accounted for all jobs and all usable resources.
+ * Find the set of classes from the presented set of elibible classes that have jobs in
+ * the given nodepool. UIMA-4065
+ *
+ * @param np Relevent nodepool
+ * @param eligible (Possibly restricted) set of classes that **might** have jobs in the nodepool
+ * @return List of classes with jobs in the nodepool
+ */
+ private List<ResourceClass> gatherRcs(NodePool np, List<ResourceClass> eligible)
+ {
+ ArrayList<ResourceClass> ret = new ArrayList<ResourceClass>();
+ String npn = np.getId();
+ for ( ResourceClass rc : eligible ) {
+ String rcnpn = rc.getNodepoolName();
+ if ( rcnpn == null ) continue;
+ if ( rc.countJobs() == 0 ) continue;
+ if ( rcnpn.equals(npn) ) ret.add(rc);
+ }
+ return ret;
+ }
+
+ /**
+ * Do a depth-first traversal of the nodepool calculating counts for all the jobs in the nodepool and its children.
*
- * Note how this works:
- * Consider a configuration with two nodepools plus global, A, B, and G. Suppose nodepools A and B have
- * 30 shares each and G only has 10 shares. G can apportion over it's 10, plus the 60 from A and B. So
- * after apporioning over A and B we need to do fair-share over G+A+B to insure that jobs submitted
- * to G are not "cheated" - recall that jobs in this set of classes have the same weight and priority,
- * and thus the same "right" to all the shares. However, allocating a job from class A over the
- * full set of 10+30 shares could over-allocate it. So the cap calculations must be sure never to
- * increase the already-given shares for subpools.
+ * Starting at each leaf NP, gather all the classes that have jobs in the NP, and if there are any, get class counts
+ * for the classes. Pass the classes up to the caller who can do a more global recount if necessary. If there are
+ * no jobs over the nodepool then bypass the count for it, of course. This is a rewrite of the original which did
+ * not properly handle the recursion past 1 level (root + 1). It uses gatherRcs() as a helper to find relevent classes.
+ * jrc 2014-11-05. UIMA-4605
*
- * Therefore we traverse the FULL SET of classes on every recursion. When calculating caps from
- * apportion_shares the resource classes will have to account for multiple traversals and not over-allocate
- * if a class has already been apportioned from a subpool.
+ * Note that this is tricky - please make sure you understand all the logic in countClassShares before changing
+ * anything.
+ *
+ * @param np
+ * @param eligible
+ * @return List of classes with potential counts.
*/
- protected void traverseNodepoolsForCounts(NodePool np, ArrayList<ResourceClass> rcs)
+ protected List<ResourceClass> traverseNodepoolsForCounts(NodePool np, List<ResourceClass> eligible)
{
- //HashMap<String, NodePool> subpools = np.getChildren();
- List<NodePool> subpools = np.getChildrenAscending();
- for ( NodePool subpool : subpools ) {
- ArrayList<ResourceClass> cls = new ArrayList<ResourceClass>();
- String npn = subpool.getId();
- int njobs = 0;
- for ( ResourceClass rc : rcs ) {
- String rcnpn = rc.getNodepoolName();
- if ( rcnpn == null ) continue;
+ //String methodName = "traverseNodepoolsForCounts";
- if ( rc.getNodepoolName().equals(npn) ) {
- cls.add(rc);
- njobs += rc.countJobs();
- }
- }
- if ( njobs > 0 ) {
- traverseNodepoolsForCounts(subpool, cls);
- }
- }
+ List<ResourceClass> myRcs = gatherRcs(np, eligible); // the resource classes for NodePool np
+ boolean hasJobs = (myRcs.size() > 0); // do I have jobs for this np?
- countClassShares(np, rcs);
- }
+ List<NodePool> subpools = np.getChildrenAscending(); // now recurse down to leaves from here
+ for ( NodePool subpool : subpools ) {
+ List<ResourceClass> subrc = traverseNodepoolsForCounts(subpool, eligible);
+ myRcs.addAll(subrc);
+ }
+
+ // now do our fs, if there are jobs resident in this np
+ if ( hasJobs ) countClassShares(np, myRcs);
+ return myRcs; // return aggregated classes to caller
+ }
+
+// /**
+// * Depth-first traversal of the nodepool. Once you get to a leaf, count the shares. This sets an
+// * upper-bound on the number of shares a class can have. As you wind back up the tree the counts may
+// * be reduced because of competition from jobs in the parent node. By the time we're done we should have
+// * accounted for all jobs and all usable resources.
+// *
+// * Note how this works:
+// * Consider a configuration with two nodepools plus global, A, B, and G. Suppose nodepools A and B have
+// * 30 shares each and G only has 10 shares. G can apportion over it's 10, plus the 60 from A and B. So
+// * after apporioning over A and B we need to do fair-share over G+A+B to insure that jobs submitted
+// * to G are not "cheated" - recall that jobs in this set of classes have the same weight and priority,
+// * and thus the same "right" to all the shares. However, allocating a job from class A over the
+// * full set of 10+30 shares could over-allocate it. So the cap calculations must be sure never to
+// * increase the already-given shares for subpools.
+// *
+// * Therefore we traverse the FULL SET of classes on every recursion. When calculating caps from
+// * apportion_shares the resource classes will have to account for multiple traversals and not over-allocate
+// * if a class has already been apportioned from a subpool.
+// *
+// * Keep for a while for reference. It is wrong but if there are still bugs in the rewrite we
+// * want easy reference to the original. jrc 2014-11-05 UIMA-4065
+// */
+// protected void traverseNodepoolsForCounts(NodePool np, List<ResourceClass> rcs)
+// {
+// //HashMap<String, NodePool> subpools = np.getChildren();
+// List<NodePool> subpools = np.getChildrenAscending();
+// for ( NodePool subpool : subpools ) {
+// ArrayList<ResourceClass> cls = new ArrayList<ResourceClass>();
+// String npn = subpool.getId();
+// int njobs = 0;
+// for ( ResourceClass rc : rcs ) {
+// String rcnpn = rc.getNodepoolName();
+// if ( rcnpn == null ) continue;
+
+// if ( rc.getNodepoolName().equals(npn) ) {
+// cls.add(rc);
+// njobs += rc.countJobs();
+// }
+// }
+// if ( njobs > 0 ) {
+// traverseNodepoolsForCounts(subpool, cls);
+// }
+// }
+
+// countClassShares(np, rcs);
+// }
protected void updateNodepools(NodePool np, ArrayList<ResourceClass> rcs)
@@ -807,7 +894,7 @@ public class NodepoolScheduler
//
// First step, figure out how many shares per class.
//
- traverseNodepoolsForCounts(globalNodepool, eligible);
+ traverseNodepoolsForCounts(globalNodepool, eligible); // (only these classes, not the global set)
//
// Everything should be stable - now reduce the counts in the nodepools
@@ -917,7 +1004,7 @@ public class NodepoolScheduler
if ( !jobInClass(rcs, j) ) continue;
- if ( getNodepool(rc) == np ) {
+ if ( rc.getNodepool() == np ) {
switch ( rc.getPolicy()) {
case FAIR_SHARE:
fair_share_jobs.add(j);
@@ -1106,7 +1193,7 @@ public class NodepoolScheduler
j.clearShares();
}
- NodePool np = getNodepool(rc);
+ NodePool np = rc.getNodepool();
int classcap = 0;
classcap = calcCaps(rc.getAbsoluteCap(), rc.getPercentCap(), np.countTotalShares()); // quantum shares
@@ -1196,7 +1283,7 @@ public class NodepoolScheduler
for ( ResourceClass rc : rcs ) {
ArrayList<IRmJob> jobs = rc.getAllJobsSorted(new JobByTimeSorter());
- NodePool np = getNodepool(rc);
+ NodePool np = rc.getNodepool();
for ( IRmJob j : jobs ) {
if ( j.countNShares() > 0 ) { // all or nothing - if we have any, we're fully satisfied
@@ -1238,12 +1325,6 @@ public class NodepoolScheduler
// =========================== REWORKED CODE FOR RESERVATIONS ================================
// ==========================================================================================
// ==========================================================================================
-
- /**
- * TODO: what to do if there are machines that are larger than requested, but
- * not enough of the exact size. For now, refuse, the request will only
- * match exactly.
- */
private void howMuchReserve(ArrayList<ResourceClass> rcs)
{
String methodName = "howMuchToreserve";
@@ -1263,7 +1344,7 @@ public class NodepoolScheduler
ArrayList<IRmJob> jobs = rc.getAllJobsSorted(new JobByTimeSorter());
int machines_given_out = 0;
- NodePool np = getNodepool(rc);
+ NodePool np = rc.getNodepool();
// Find out what is given out already, for class caps. These are already accounted for
// in the global counts.
@@ -1335,7 +1416,7 @@ public class NodepoolScheduler
}
classcap = calcCaps(rc.getAbsoluteCap(), rc.getPercentCap(), np.countLocalMachines());
-
+ logger.info(methodName, j.getId(), "Absolute cap:", rc.getAbsoluteCap(), "PercentCap", rc.getPercentCap(), "np.countLocalMachines", np.countLocalMachines(), "cap", classcap);
//
// Assumption to continue is that this is a new reservation
//
@@ -1391,7 +1472,7 @@ public class NodepoolScheduler
{
String methodName = "whatOfToReserve";
for ( ResourceClass rc : rcs ) {
- NodePool np = getNodepool(rc);
+ NodePool np = rc.getNodepool();
ArrayList<IRmJob> jobs = rc.getAllJobsSorted(new JobByTimeSorter());
for ( IRmJob j: jobs ) {
@@ -1436,7 +1517,7 @@ public class NodepoolScheduler
case FIXED_SHARE:
{
- NodePool np = getNodepool(rc);
+ NodePool np = rc.getNodepool();
HashMap<IRmJob, IRmJob> jobs = rc.getAllJobs();
for ( IRmJob j : jobs.values() ) {
if ( j.countNShares() > 0 ) { // all-or-nothing - if there's anything, it's fully scheduled.
@@ -1449,7 +1530,7 @@ public class NodepoolScheduler
case RESERVE:
{
- NodePool np = getNodepool(rc);
+ NodePool np = rc.getNodepool();
HashMap<IRmJob, IRmJob> jobs = rc.getAllJobs();
for ( IRmJob j : jobs.values() ) {
if ( j.countNShares() > 0 ) { // all-or-nothing - if there's anything, it's fully scheduled.
@@ -1470,7 +1551,13 @@ public class NodepoolScheduler
{
for (ResourceClass rc : resourceClasses.values() ) {
if ( rc.getPolicy() == Policy.FAIR_SHARE ) {
- NodePool np = getNodepool(rc);
+ NodePool np = rc.getNodepool();
+ NodePool check = getNodepool(rc);
+ if ( np != check ) {
+ throw new IllegalStateException("getNodepool is busted.");
+ } else {
+ System.out.println("------------------- np pointer checks out ok. ----------------- rc: " + rc.getName() + " np: " + np.getId() );
+ }
HashMap<IRmJob, IRmJob> jobs = rc.getAllJobs();
for ( IRmJob j : jobs.values() ) {
HashMap<Share, Share> shares = j.getAssignedShares();
@@ -1522,7 +1609,9 @@ public class NodepoolScheduler
// stop_here_de++;
for ( NodePool np : nodepool.getChildrenDescending() ) { // recurse down the tree
- doEvictions(np); // depth-first traversal
+ logger.info(methodName, null, "Recurse to", np.getId(), "from", nodepool.getId());
+ doEvictions(np); // depth-first traversal
+ logger.info(methodName, null, "Return from", np.getId(), "proceeding with logic for", nodepool.getId());
}
int neededByOrder[] = NodePool.makeArray(); // for each order, how many N-shares do I want to add?
@@ -1567,7 +1656,7 @@ public class NodepoolScheduler
{
Machine m = candidate.getMachine();
ResourceClass nrc = needy.getResourceClass();
- NodePool np = getNodepool(nrc);
+ NodePool np = nrc.getNodepool();
return np.containsMachine(m); // can we get to the candidate share from 'needy's np?
}
@@ -1580,8 +1669,8 @@ public class NodepoolScheduler
ResourceClass prc = potential.getResourceClass();
ResourceClass nrc = needy.getResourceClass();
- NodePool np = getNodepool(nrc);
- NodePool pp = getNodepool(prc);
+ NodePool np = nrc.getNodepool();
+ NodePool pp = prc.getNodepool();
return np.containsSubpool(pp) || pp.containsSubpool(np);
}
@@ -1694,13 +1783,15 @@ public class NodepoolScheduler
for ( Machine m : machines.values() ) {
if ( m.getShareOrder() < orderNeeded ) {
- logger.trace(methodName, nj.getId(), "Bypass ", m.getId(), ": too small for request of order", orderNeeded);
+ logger.trace(methodName, nj.getId(), "Bypass ", m.getId(), ": too small for request of order", orderNeeded);
+ logger.info(methodName, nj.getId(), "Bypass ", m.getId(), ": too small for request of order", orderNeeded);
continue;
}
// if the job is a reservation the machine size has to match
if ( nj.isReservation() && ( m.getShareOrder() != orderNeeded )) {
logger.trace(methodName, nj.getId(), "Bypass ", m.getId(), ": reservation requires exact match for order", orderNeeded);
+ logger.info(methodName, nj.getId(), "Bypass ", m.getId(), ": reservation requires exact match for order", orderNeeded);
continue;
}
@@ -1714,11 +1805,13 @@ public class NodepoolScheduler
}
if ( g >= orderNeeded ) {
logger.trace(methodName, nj.getId(), "Candidate machine:", m.getId());
+ logger.info(methodName, nj.getId(), "Candidate machine:", m.getId());
eligibleMachines.put(m, m);
} else {
// (a) the share is not forceable (non-preemptbable, or already being removed), or
// (b) the share is not owned by a rich job
logger.trace(methodName, nj.getId(), "Not a candidate, insufficient rich jobs:", m.getId());
+ logger.info(methodName, nj.getId(), "Not a candidate, insufficient rich jobs:", m.getId());
}
}
@@ -2148,8 +2241,7 @@ public class NodepoolScheduler
to_remove = available;
needed -= to_remove;
}
-
- // TODO TODO TODO Is this loop useful, and if so, for what? Is it old code I forgot to remove?
+
if ( to_remove > 0 ) {
NodePool np = allPools[npi];
for ( NodePool npj = np; npj != null; npj = npj.getParent() ) { // must propogate up because of how these tables work
Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/ResourceClass.java
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/ResourceClass.java?rev=1640415&r1=1640414&r2=1640415&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/ResourceClass.java (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/ResourceClass.java Tue Nov 18 19:57:03 2014
@@ -23,6 +23,7 @@ import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
+import org.apache.uima.ducc.common.utils.DuccLogger;
import org.apache.uima.ducc.common.utils.DuccProperties;
import org.apache.uima.ducc.common.utils.SystemPropertyResolver;
@@ -34,7 +35,7 @@ public class ResourceClass
implements SchedConstants,
IEntity
{
- //private DuccLogger logger = DuccLogger.getLogger(this.getClass(), COMPONENT_NAME);
+ private DuccLogger logger = DuccLogger.getLogger(this.getClass(), COMPONENT_NAME);
private String id;
private Policy policy;
@@ -61,6 +62,8 @@ public class ResourceClass
private HashMap<User, HashMap<IRmJob, IRmJob>> jobsByUser = new HashMap<User, HashMap<IRmJob, IRmJob>>();
private int max_job_order = 0; // largest order of any job still alive in this rc (not necessarily globally though)
+ private NodePool nodepool = null;
+
// private HashMap<Integer, Integer> nSharesByOrder = new HashMap<Integer, Integer>(); // order, N shares of that order
private boolean subpool_counted = false;
@@ -215,6 +218,16 @@ public class ResourceClass
// }
// }
+ public void setNodepool(NodePool np)
+ {
+ this.nodepool = np;
+ }
+
+ public NodePool getNodepool()
+ {
+ return this.nodepool;
+ }
+
public long getTimestamp()
{
return 0;
@@ -401,6 +414,49 @@ public class ResourceClass
}
*/
+ /**
+ * Can I use more 1 more share of this size? This is more complex than for Users and Jobs because
+ * in addition to checking if my request is filled, we need to make sure the underlying nodepools
+ * can support the bonus. (This creates an upper bound on apportionment from this class that tends
+ * to trickle down into users and jobs as the counting progresses).
+ * UIMA-4065
+ *
+ * @param order The size of the available share. Must be an exact match because the
+ * offerer has already done all reasonable splitting and will have a better
+ * use for it if I can't take it.
+ *
+ * The decision is based on the wbo/gbo arrays that the offer has been building up
+ * just before asking this question.
+ *
+ * @return True if I can use the share, false otherwise.
+ */
+ public boolean canUseBonus(int order) // UIMA-4065
+ {
+ String methodName = "canUseBonus";
+ int wbo = getWantedByOrder()[order]; // calculated by caller so we don't need to check caps
+ int gbo = getGivenByOrder()[order];
+
+ //
+ // we want to ask the nodepool and its subpools:
+ // how many open shares of "order" will you have after we give way
+ // the ones already counte?
+ //
+ // To do this, we have "our" nodepool recursively gather all thear classes
+ // and accumulate this: np.countLocalNSharesByOrder - (foreachrc: gbo[order])
+ //
+ // Then, if gbo < resourcesavailable we can return true, else return false
+ //
+ int resourcesAvailable = nodepool.countAssignableShares(order); // recurses, covers all relevent rc's
+ logger.info(methodName, null, "Class", id, "nodepool", nodepool.getId(), "order", order, "wbo", wbo, "gbo", gbo, "resourcesAvailable", resourcesAvailable);
+
+ if ( wbo <= 0 ) return false;
+
+ if ( resourcesAvailable <= 0 ) { // if i get another do I go over?
+ return false; // yep, politely decline
+ }
+ return true;
+ }
+
void updateNodepool(NodePool np)
{
//String methodName = "updateNodepool";
Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/RmJob.java
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/RmJob.java?rev=1640415&r1=1640414&r2=1640415&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/RmJob.java (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/RmJob.java Tue Nov 18 19:57:03 2014
@@ -354,43 +354,24 @@ public class RmJob
return countNShares() - countNSharesGiven();
}
-// /**
-// * Can I use more N-shares; how many?
-// */
-// public int canUseBonus(int bonus, int[] nSharesByOrder)
-// {
-// int cap = getJobCap();
-// int can_use = Math.max(0, cap - shares_given); // what can we actually use?
-
-// if ( can_use > nSharesByOrder[share_order] ) { // can't use more than physicalliy exist
-// return 0;
-// }
-
-// for (int i = share_order; i <= Math.min(bonus, (nSharesByOrder.length - 1)); i++ ) {
-// if ( (nSharesByOrder[i] > 0) && (i <= can_use) ) {
-// return i ;
-// }
-// }
-// return 0;
-// }
-
/**
- * Can I use more N-shares?
-
- public int canUseBonus(int bonus, int[] nSharesByOrder)
+ * Can I use more 1 more share of this size?
+ * UIMA-4065
+ *
+ * @param order The size of the available share. Must be an exact match because the
+ * offerer has already done all reasonable splitting and will have a better
+ * use for it if I can't take it.
+ *
+ * The decision is based on the wbo/gbo arrays that the offer has been building up
+ * just before asking this question.
+ *
+ * @return True if I can use the share, false otherwise.
+ */
+ public boolean canUseBonus(int order) // UIMA-4065
{
- int cap = getJobCap();
- int can_use = Math.max(0, cap - countNSharesGiven()); // what can we actually use?
-
- // if ( can_use > nSharesByOrder[share_order] ) { // can't use more than physicalliy exist
- // return 0;
- // }
- if ( can_use == 0 ) {
- return 0;
- }
- return ( nSharesByOrder[share_order] > 0 ) ? share_order : 0;
- }
- */
+ if ( order != share_order) return false;
+ return (getWantedByOrder()[order] > 0); // yep, still want
+ }
/**
* Officially allocated shares assigned to this job which are known to be in use.
Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/Scheduler.java
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/Scheduler.java?rev=1640415&r1=1640414&r2=1640415&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/Scheduler.java (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/Scheduler.java Tue Nov 18 19:57:03 2014
@@ -61,7 +61,7 @@ public class Scheduler
String ducc_home;
// Integer epoch = 5; // scheduling epoch, seconds
- NodePool[] nodepools;
+ NodePool[] nodepools; // top-level nodepools
int max_order = 0;
//
@@ -564,6 +564,17 @@ public class Scheduler
Map<ResourceClass, ResourceClass> classesForNp = new HashMap<ResourceClass, ResourceClass>();
getClassesForNodepool(np, classesForNp); // all classes served by this heirarchy - fills in classesForNp
+ for ( ResourceClass rc: classesForNp.values() ) { // UIMA-4065 tell each cl which np serves it
+ String rcid = rc.getNodepoolName();
+ if ( rcid != null ) {
+ // set the two-way pointers between rc and np
+ NodePool subpool = nodepools[i].getSubpool(rcid);
+ rc.setNodepool(subpool); // rc -> nodepool
+ logger.info(methodName, null, "Assign rc", rc.getName(), "to np", subpool.getId());
+ subpool.addResourceClass(rc); // nodepool -> rc
+ }
+ }
+
schedulers[i].setClasses(classesForNp);
}
@@ -1025,7 +1036,7 @@ public class Scheduler
if ( m == null ) {
// allNodes.put(node, node);
- long allocatable_mem = node.getNodeMetrics().getNodeMemory().getMemTotal() - share_free_dram;
+ long allocatable_mem = node.getNodeMetrics().getNodeMemory().getMemFree() - share_free_dram;
if ( dramOverride > 0 ) {
allocatable_mem = dramOverride;
}
Modified: uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/User.java
URL: http://svn.apache.org/viewvc/uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/User.java?rev=1640415&r1=1640414&r2=1640415&view=diff
==============================================================================
--- uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/User.java (original)
+++ uima/sandbox/uima-ducc/trunk/uima-ducc-rm/src/main/java/org/apache/uima/ducc/rm/scheduler/User.java Tue Nov 18 19:57:03 2014
@@ -177,91 +177,23 @@ public class User
return Integer.MAX_VALUE; // no cap for users
}
-// HashMap<IRmJob, IRmJob> getJobs()
-// {
-// return jobs;
-// }
-
-// HashMap<IRmJob, IRmJob> getJobsOfOrder(int order)
-// {
-// return jobsByOrder.get(order);
-// }
-
-// HashMap<Integer, HashMap<IRmJob, IRmJob>> getJobsByOrder()
-// {
-// return jobsByOrder;
-// }
-
-// HashMap<String, Machine> getMachines()
-// {
-// // TODO: fill this in - walk the jobs and return the hash
-// System.out.println("Warning: getMachines() is not implemented and is returning null");
-// return null;
-// }
-
-// public int countJobs()
-// {
-// return jobs.size();
-// }
-
-// public int countJobs(int o)
-// {
-// if ( jobsByOrder.containsKey(o) ) {
-// return jobsByOrder.get(o).size();
-// }
-// return 0;
-// }
-
-// public void clearShares()
-// {
-// user_shares = 0;
-// //System.out.println("**** user " + getId() + "/" + uniqueId + " clearing shares");
-// //sharesByOrder.clear();
-// }
-
-// public void addQShares(int s)
-// {
-// user_shares += s;
-// //System.out.println("***** user " + getId() + "/" + uniqueId + " shares are " + s);
-// }
-
-// /**
-// * Try to find the smallest bonus shares we can use.
-// */
-// public int canUseBonus(int bonus, int[] tmpSharesByOrder)
-// {
-// for ( int i = 1; i <= Math.min(bonus, tmpSharesByOrder.length); i++ ) {
-//
-// if ( jobsByOrder.containsKey(i) && (tmpSharesByOrder[i] > 0) ) {
-// return i;
-// }
-// }
-// return 0;
-// }
-
-// public int countQShares(String x)
-// {
-// //System.out.println(x + " **** user " + getId() + "/" + uniqueId + " returning " + user_shares + " shares");
-// return this.user_shares;
-// }
-
-
-// int countCappedQShares(int physicalCap, int order)
-// {
-// int K = 0;
-// physicalCap = physicalCap * order; // to quantum shares
-// HashMap<IRmJob, IRmJob> jobs = jobsByOrder.get(order);
-//
-// if ( jobs == null ) {
-// return 0;
-// }
-//
-// for ( IRmJob j : jobs.values() ) {
-// K += (Math.min(j.getJobCap(), physicalCap));
-// }
-//
-// return Math.min(K, physicalCap) * order;
-// }
+ /**
+ * Can I use more 1 more share of this size?
+ * UIMA-4065
+ *
+ * @param order The size of the available share. Must be an exact match because the
+ * offerer has already done all reasonable splitting and will have a better
+ * use for it if I can't take it.
+ *
+ * The decision is based on the wbo/gbo arrays that the offer has been building up
+ * just before asking this question.
+ *
+ * @return True if I can use the share, false otherwise.
+ */
+ public boolean canUseBonus(int order) // UIMA-4065
+ {
+ return (getWantedByOrder()[order] > 0); // yep, still want
+ }
@Override
public int hashCode()