You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@joshua.apache.org by mj...@apache.org on 2016/08/21 17:42:28 UTC

[19/22] incubator-joshua git commit: JOSHUA-291 - static analysis based code improvements on adagrad package

JOSHUA-291 - static analysis based code improvements on adagrad package


Project: http://git-wip-us.apache.org/repos/asf/incubator-joshua/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-joshua/commit/233818d6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-joshua/tree/233818d6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-joshua/diff/233818d6

Branch: refs/heads/JOSHUA-284
Commit: 233818d6ce526f3a77b33110d67314b723371743
Parents: 44b1b7b
Author: Tommaso Teofili <to...@apache.org>
Authored: Sun Aug 7 19:21:59 2016 +0200
Committer: Tommaso Teofili <to...@apache.org>
Committed: Thu Aug 18 09:57:05 2016 +0200

----------------------------------------------------------------------
 .../org/apache/joshua/adagrad/AdaGradCore.java  |  79 +++--
 .../org/apache/joshua/adagrad/Optimizer.java    | 348 +++++++++----------
 2 files changed, 207 insertions(+), 220 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/233818d6/src/main/java/org/apache/joshua/adagrad/AdaGradCore.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/adagrad/AdaGradCore.java b/src/main/java/org/apache/joshua/adagrad/AdaGradCore.java
index e51e8a3..9dc81a4 100755
--- a/src/main/java/org/apache/joshua/adagrad/AdaGradCore.java
+++ b/src/main/java/org/apache/joshua/adagrad/AdaGradCore.java
@@ -33,6 +33,7 @@ import java.io.OutputStreamWriter;
 import java.io.PrintWriter;
 import java.text.DecimalFormat;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -132,9 +133,9 @@ public class AdaGradCore {
   /* *********************************************************** */
 
   // private double[] lambda;
-  private ArrayList<Double> lambda = new ArrayList<Double>();
+  private ArrayList<Double> lambda = new ArrayList<>();
   // the current weight vector. NOTE: indexing starts at 1.
-  private ArrayList<Double> bestLambda = new ArrayList<Double>();
+  private final ArrayList<Double> bestLambda = new ArrayList<>();
   // the best weight vector across all iterations
 
   private boolean[] isOptimizable;
@@ -341,8 +342,8 @@ public class AdaGradCore {
     // and one line for the normalization method
     // indexing starts at 1 in these arrays
     for (int p = 0; p <= numParams; ++p)
-      lambda.add(new Double(0));
-    bestLambda.add(new Double(0));
+      lambda.add(0d);
+    bestLambda.add(0d);
     // why only lambda is a list? because the size of lambda
     // may increase over time, but other arrays are specified in
     // the param config file, only used for initialization
@@ -497,7 +498,7 @@ public class AdaGradCore {
     indicesOfInterest_all = temp_TSA;
 
     for (int i = 0; i < numSentences; ++i) {
-      indicesOfInterest_all[i] = new TreeSet<Integer>();
+      indicesOfInterest_all[i] = new TreeSet<>();
     }
   } // void initialize(...)
 
@@ -522,9 +523,9 @@ public class AdaGradCore {
     if (folder.exists()) {
       File[] listOfFiles = folder.listFiles();
 
-      for (int i = 0; i < listOfFiles.length; i++) {
-        if (listOfFiles[i].isFile()) {
-          files = listOfFiles[i].getName();
+      for (File listOfFile : listOfFiles) {
+        if (listOfFile.isFile()) {
+          files = listOfFile.getName();
           if (files.startsWith("AdaGrad.temp")) {
             deleteFile(files);
           }
@@ -627,11 +628,11 @@ public class AdaGradCore {
     // save feats and stats for all candidates(old & new)
     HashMap<String, String>[] feat_hash = new HashMap[numSentences];
     for (int i = 0; i < numSentences; i++)
-      feat_hash[i] = new HashMap<String, String>();
+      feat_hash[i] = new HashMap<>();
 
     HashMap<String, String>[] stats_hash = new HashMap[numSentences];
     for (int i = 0; i < numSentences; i++)
-      stats_hash[i] = new HashMap<String, String>();
+      stats_hash[i] = new HashMap<>();
 
     while (!done) { // NOTE: this "loop" will only be carried out once
       println("--- Starting AdaGrad iteration #" + iteration + " @ " + (new Date()) + " ---", 1);
@@ -848,7 +849,7 @@ public class AdaGradCore {
         // (It's not actually a bug, but only because existingCandStats gets
         // cleared before moving to the next source sentence.)
         // FIX: should be made an array, indexed by i
-        HashMap<String, String> existingCandStats = new HashMap<String, String>();
+        HashMap<String, String> existingCandStats = new HashMap<>();
         // VERY IMPORTANT:
         // A CANDIDATE X MAY APPEARED IN ITER 1, ITER 3
         // BUT IF THE USER SPECIFIED TO CONSIDER ITERATIONS FROM ONLY ITER 2, THEN
@@ -943,7 +944,7 @@ public class AdaGradCore {
 
           String[] sentsCurrIt_currSrcSent = new String[sizeOfNBest + 1];
 
-          Vector<String> unknownCands_V = new Vector<String>();
+          Vector<String> unknownCands_V = new Vector<>();
           // which candidates (of the i'th source sentence) have not been seen before
           // this iteration?
 
@@ -1122,7 +1123,7 @@ public class AdaGradCore {
                   // initialized as zero anyway
                   if (featId > numParams) {
                     ++numParams;
-                    lambda.add(new Double(0));
+                    lambda.add(0d);
                   }
                 }
               }
@@ -1236,7 +1237,7 @@ public class AdaGradCore {
               lambda.set(p, bestLambda.get(p));
             // and set the rest of lambda to be 0
             for (int p = 0; p < lambda.size() - bestLambda.size(); ++p)
-              lambda.set(p + bestLambda.size(), new Double(0));
+              lambda.set(p + bestLambda.size(), 0d);
           }
 
           return null; // this means that the old values should be kept by the caller
@@ -1284,7 +1285,7 @@ public class AdaGradCore {
         }
       }
 
-      Vector<String> output = new Vector<String>();
+      Vector<String> output = new Vector<>();
 
       // note: initialLambda[] has length = numParamsOld
       // augmented with new feature weights, initial values are 0
@@ -1328,8 +1329,8 @@ public class AdaGradCore {
 
       /************* end optimization **************/
 
-      for (int i = 0; i < output.size(); i++)
-        println(output.get(i));
+      for (String anOutput : output)
+        println(anOutput);
 
       // check if any parameter has been updated
       boolean anyParamChanged = false;
@@ -1407,7 +1408,7 @@ public class AdaGradCore {
       // (interpolation with previous wt vector)
       double interCoef = 1.0; // no interpolation for now
       for (int i = 1; i <= numParams; i++)
-        lambda.set(i, interCoef * finalLambda[i] + (1 - interCoef) * lambda.get(i).doubleValue());
+        lambda.set(i, interCoef * finalLambda[i] + (1 - interCoef) * lambda.get(i));
 
       println("Next iteration will decode with lambda: " + lambdaToString(lambda), 1);
       println("", 1);
@@ -1441,9 +1442,9 @@ public class AdaGradCore {
 
     retStr += "(listing the first " + featToPrint + " lambdas)";
     for (int c = 1; c <= featToPrint - 1; ++c) {
-      retStr += "" + String.format("%.4f", lambdaA.get(c).doubleValue()) + ", ";
+      retStr += "" + String.format("%.4f", lambdaA.get(c)) + ", ";
     }
-    retStr += "" + String.format("%.4f", lambdaA.get(numParams).doubleValue()) + "}";
+    retStr += "" + String.format("%.4f", lambdaA.get(numParams)) + "}";
 
     return retStr;
   }
@@ -1476,7 +1477,7 @@ public class AdaGradCore {
       println("Running external decoder...", 1);
 
       try {
-        ArrayList<String> cmd = new ArrayList<String>();
+        ArrayList<String> cmd = new ArrayList<>();
         cmd.add(decoderCommandFileName);
 
         if (passIterationToDecoder)
@@ -1627,7 +1628,7 @@ public class AdaGradCore {
         if (c_match == -1) {
           outFile.println(line);
         } else {
-          if (Math.abs(params.get(c_match).doubleValue()) > 1e-20)
+          if (Math.abs(params.get(c_match)) > 1e-20)
             outFile.println(Vocabulary.word(c_match) + " " + params.get(c_match));
         }
 
@@ -1636,7 +1637,7 @@ public class AdaGradCore {
 
       // now append weights of new features
       for (int c = origFeatNum + 1; c <= numParams; ++c) {
-        if (Math.abs(params.get(c).doubleValue()) > 1e-20)
+        if (Math.abs(params.get(c)) > 1e-20)
           outFile.println(Vocabulary.word(c) + " " + params.get(c));
       }
 
@@ -1667,7 +1668,7 @@ public class AdaGradCore {
 
       // read default value
       lambda.set(c, inFile_init.nextDouble());
-      defaultLambda[c] = lambda.get(c).doubleValue();
+      defaultLambda[c] = lambda.get(c);
 
       // read isOptimizable
       dummy = inFile_init.next();
@@ -1849,7 +1850,7 @@ public class AdaGradCore {
 
           boolean format3 = false;
 
-          HashSet<String> seenStrings = new HashSet<String>();
+          HashSet<String> seenStrings = new HashSet<>();
           BufferedReader inFile = new BufferedReader(new FileReader(docInfoFileName));
           for (int i = 0; i < numSentences; ++i) {
             // set format3 = true if a duplicate is found
@@ -1861,8 +1862,8 @@ public class AdaGradCore {
 
           inFile.close();
 
-          HashSet<String> seenDocNames = new HashSet<String>();
-          HashMap<String, Integer> docOrder = new HashMap<String, Integer>();
+          HashSet<String> seenDocNames = new HashSet<>();
+          HashMap<String, Integer> docOrder = new HashMap<>();
           // maps a document name to the order (0-indexed) in which it was seen
 
           inFile = new BufferedReader(new FileReader(docInfoFileName));
@@ -1989,7 +1990,7 @@ public class AdaGradCore {
       try {
         PrintWriter outFile_lambdas = new PrintWriter(finalLambdaFileName);
         for (int c = 1; c <= numParams; ++c) {
-          outFile_lambdas.println(Vocabulary.word(c) + " ||| " + lambda.get(c).doubleValue());
+          outFile_lambdas.println(Vocabulary.word(c) + " ||| " + lambda.get(c));
         }
         outFile_lambdas.close();
 
@@ -2003,7 +2004,7 @@ public class AdaGradCore {
   private String[] cfgFileToArgsArray(String fileName) {
     checkFile(fileName);
 
-    Vector<String> argsVector = new Vector<String>();
+    Vector<String> argsVector = new Vector<>();
 
     BufferedReader inFile = null;
     try {
@@ -2015,7 +2016,7 @@ public class AdaGradCore {
 
         if (line != null && line.length() > 0 && line.charAt(0) != '#') {
 
-          if (line.indexOf("#") != -1) { // discard comment
+          if (line.contains("#")) { // discard comment
             line = line.substring(0, line.indexOf("#"));
           }
 
@@ -2038,7 +2039,7 @@ public class AdaGradCore {
 
           // cmu modification(from meteor for zmert)
           // Parse args
-          ArrayList<String> argList = new ArrayList<String>();
+          ArrayList<String> argList = new ArrayList<>();
           StringBuilder arg = new StringBuilder();
           boolean quoted = false;
           for (int i = 0; i < line.length(); i++) {
@@ -2071,9 +2072,7 @@ public class AdaGradCore {
             argsVector.add(paramA[1]);
           } else if (paramA.length > 2 && (paramA[0].equals("-m") || paramA[0].equals("-docSet"))) {
             // -m (metricName), -docSet are allowed to have extra optinos
-            for (int opt = 0; opt < paramA.length; ++opt) {
-              argsVector.add(paramA[opt]);
-            }
+            Collections.addAll(argsVector, paramA);
           } else {
             String msg = "Malformed line in config file:" + origLine;
             throw new RuntimeException(msg);
@@ -2413,7 +2412,7 @@ public class AdaGradCore {
         if (val < 0 || val > 1) {
           throw new RuntimeException("passIterationToDecoder should be either 0 or 1");
         }
-        passIterationToDecoder = (val == 1) ? true : false;
+        passIterationToDecoder = (val == 1);
       } else if (option.equals("-decOut")) {
         decoderOutFileName = args[i + 1];
       } else if (option.equals("-decExit")) {
@@ -2808,7 +2807,7 @@ public class AdaGradCore {
     str = " " + str + " ";
     str = str.replaceAll("\\s+", " ");
 
-    TreeSet<Integer> splitIndices = new TreeSet<Integer>();
+    TreeSet<Integer> splitIndices = new TreeSet<>();
 
     for (int i = 0; i < str.length(); ++i) {
       char ch = str.charAt(i);
@@ -2855,7 +2854,7 @@ public class AdaGradCore {
     // remove spaces around dashes
     if (normMethod == 2 || normMethod == 4) {
 
-      TreeSet<Integer> skipIndices = new TreeSet<Integer>();
+      TreeSet<Integer> skipIndices = new TreeSet<>();
       str = " " + str + " ";
 
       for (int i = 0; i < str.length(); ++i) {
@@ -3031,7 +3030,7 @@ public class AdaGradCore {
   }
 
   private ArrayList<Double> randomLambda() {
-    ArrayList<Double> retLambda = new ArrayList<Double>(1 + numParams);
+    ArrayList<Double> retLambda = new ArrayList<>(1 + numParams);
 
     for (int c = 1; c <= numParams; ++c) {
       if (isOptimizable[c]) {
@@ -3092,8 +3091,8 @@ public class AdaGradCore {
     // print("discarding: ",4);
 
     int numCandidates = slope.length;
-    HashSet<Integer> discardedIndices = new HashSet<Integer>();
-    HashMap<Double, Integer> indicesOfSlopes = new HashMap<Double, Integer>();
+    HashSet<Integer> discardedIndices = new HashSet<>();
+    HashMap<Double, Integer> indicesOfSlopes = new HashMap<>();
     // maps slope to index of best candidate that has that slope.
     // ("best" as in the one with the highest offset)
 

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/233818d6/src/main/java/org/apache/joshua/adagrad/Optimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/adagrad/Optimizer.java b/src/main/java/org/apache/joshua/adagrad/Optimizer.java
index 722c593..16c25cd 100755
--- a/src/main/java/org/apache/joshua/adagrad/Optimizer.java
+++ b/src/main/java/org/apache/joshua/adagrad/Optimizer.java
@@ -42,13 +42,12 @@ public class Optimizer {
     feat_hash = _feat_hash; // feature hash table
     stats_hash = _stats_hash; // suff. stats hash table
     finalLambda = new double[initialLambda.length];
-    for(int i = 0; i < finalLambda.length; i++)
-      finalLambda[i] = initialLambda[i];
+	    System.arraycopy(initialLambda, 0, finalLambda, 0, finalLambda.length);
   }
 
   //run AdaGrad for one epoch
   public double[] runOptimizer() {
-      List<Integer> sents = new ArrayList<Integer>();
+      List<Integer> sents = new ArrayList<>();
       for( int i = 0; i < sentNum; ++i )
 	  sents.add(i);
       double[] avgLambda = new double[initialLambda.length]; //only needed if averaging is required
@@ -87,14 +86,14 @@ public class Optimizer {
 	  double Hii = 0;
 	  double gradiiSquare = 0;
 	  int lastUpdateTime = 0;
-	  HashMap<Integer, Integer> lastUpdate = new HashMap<Integer, Integer>();
-	  HashMap<Integer, Double> lastVal = new HashMap<Integer, Double>();
-	  HashMap<Integer, Double> H = new HashMap<Integer, Double>();
+	  HashMap<Integer, Integer> lastUpdate = new HashMap<>();
+	  HashMap<Integer, Double> lastVal = new HashMap<>();
+	  HashMap<Integer, Double> H = new HashMap<>();
 	  while( sentCount < sentNum ) {
 	      loss = 0;
 	      thisBatchSize = batchSize;
 	      ++numBatch;
-	      HashMap<Integer, Double> featDiff = new HashMap<Integer, Double>();
+	      HashMap<Integer, Double> featDiff = new HashMap<>();
 	      for(int b = 0; b < batchSize; ++b ) {
 		  //find out oracle and prediction
 		  s = sents.get(sentCount);
@@ -124,51 +123,48 @@ public class Optimizer {
 
 		  //accumulate difference feature vector
 		  if ( b == 0 ) {
-		      for (int i = 0; i < vecOraFeat.length; i++) {
-			  featInfo = vecOraFeat[i].split("=");
-			  diffFeatId = Integer.parseInt(featInfo[0]);
-			  featDiff.put(diffFeatId, Double.parseDouble(featInfo[1]));
-		      }
-		      for (int i = 0; i < vecPredFeat.length; i++) {
-			  featInfo = vecPredFeat[i].split("=");
-			  diffFeatId = Integer.parseInt(featInfo[0]);
-			  if (featDiff.containsKey(diffFeatId)) { //overlapping features
-			      diff = featDiff.get(diffFeatId)-Double.parseDouble(featInfo[1]);
-			      if ( Math.abs(diff) > 1e-20 )
-				  featDiff.put(diffFeatId, diff);
-			      else
-				  featDiff.remove(diffFeatId);
+			  for (String aVecOraFeat : vecOraFeat) {
+				  featInfo = aVecOraFeat.split("=");
+				  diffFeatId = Integer.parseInt(featInfo[0]);
+				  featDiff.put(diffFeatId, Double.parseDouble(featInfo[1]));
+			  }
+			  for (String aVecPredFeat : vecPredFeat) {
+				  featInfo = aVecPredFeat.split("=");
+				  diffFeatId = Integer.parseInt(featInfo[0]);
+				  if (featDiff.containsKey(diffFeatId)) { //overlapping features
+					  diff = featDiff.get(diffFeatId) - Double.parseDouble(featInfo[1]);
+					  if (Math.abs(diff) > 1e-20)
+						  featDiff.put(diffFeatId, diff);
+					  else
+						  featDiff.remove(diffFeatId);
+				  } else //features only firing in the 2nd feature vector
+					  featDiff.put(diffFeatId, -1.0 * Double.parseDouble(featInfo[1]));
 			  }
-			  else //features only firing in the 2nd feature vector
-			      featDiff.put(diffFeatId, -1.0*Double.parseDouble(featInfo[1]));
-		      }
 		  } else {
-		      for (int i = 0; i < vecOraFeat.length; i++) {
-			  featInfo = vecOraFeat[i].split("=");
-			  diffFeatId = Integer.parseInt(featInfo[0]);
-			  if (featDiff.containsKey(diffFeatId)) { //overlapping features
-			      diff = featDiff.get(diffFeatId)+Double.parseDouble(featInfo[1]);
-			      if ( Math.abs(diff) > 1e-20 )
-				  featDiff.put(diffFeatId, diff);
-			      else
-				  featDiff.remove(diffFeatId);
+			  for (String aVecOraFeat : vecOraFeat) {
+				  featInfo = aVecOraFeat.split("=");
+				  diffFeatId = Integer.parseInt(featInfo[0]);
+				  if (featDiff.containsKey(diffFeatId)) { //overlapping features
+					  diff = featDiff.get(diffFeatId) + Double.parseDouble(featInfo[1]);
+					  if (Math.abs(diff) > 1e-20)
+						  featDiff.put(diffFeatId, diff);
+					  else
+						  featDiff.remove(diffFeatId);
+				  } else //features only firing in the new oracle feature vector
+					  featDiff.put(diffFeatId, Double.parseDouble(featInfo[1]));
 			  }
-			  else //features only firing in the new oracle feature vector
-			      featDiff.put(diffFeatId, Double.parseDouble(featInfo[1]));
-		      }
-		      for (int i = 0; i < vecPredFeat.length; i++) {
-			  featInfo = vecPredFeat[i].split("=");
-			  diffFeatId = Integer.parseInt(featInfo[0]);
-			  if (featDiff.containsKey(diffFeatId)) { //overlapping features
-			      diff = featDiff.get(diffFeatId)-Double.parseDouble(featInfo[1]);
-			      if ( Math.abs(diff) > 1e-20 )
-				  featDiff.put(diffFeatId, diff);
-			      else
-				  featDiff.remove(diffFeatId);
+			  for (String aVecPredFeat : vecPredFeat) {
+				  featInfo = aVecPredFeat.split("=");
+				  diffFeatId = Integer.parseInt(featInfo[0]);
+				  if (featDiff.containsKey(diffFeatId)) { //overlapping features
+					  diff = featDiff.get(diffFeatId) - Double.parseDouble(featInfo[1]);
+					  if (Math.abs(diff) > 1e-20)
+						  featDiff.put(diffFeatId, diff);
+					  else
+						  featDiff.remove(diffFeatId);
+				  } else //features only firing in the new prediction feature vector
+					  featDiff.put(diffFeatId, -1.0 * Double.parseDouble(featInfo[1]));
 			  }
-			  else //features only firing in the new prediction feature vector
-			      featDiff.put(diffFeatId, -1.0*Double.parseDouble(featInfo[1]));
-		      }
 		  }
 
 		  //remember the model scores here are already scaled
@@ -350,7 +346,7 @@ public class Optimizer {
       } //for ( int iter = 0; iter < adagradIter; ++iter ) {
 
       //non-optimizable weights should remain unchanged
-      ArrayList<Double> cpFixWt = new ArrayList<Double>();
+      ArrayList<Double> cpFixWt = new ArrayList<>();
       for ( int i = 1; i < isOptimizable.length; ++i ) {
 	  if ( ! isOptimizable[i] )
 	      cpFixWt.add(finalLambda[i]);
@@ -388,26 +384,25 @@ public class Optimizer {
       // find out the 1-best candidate for each sentence
       // this depends on the training mode
       maxModelScore = NegInf;
-      for (Iterator it = candSet.iterator(); it.hasNext();) {
-        modelScore = 0.0;
-        candStr = it.next().toString();
+	    for (String aCandSet : candSet) {
+		    modelScore = 0.0;
+		    candStr = aCandSet.toString();
 
-        feat_str = feat_hash[i].get(candStr).split("\\s+");
+		    feat_str = feat_hash[i].get(candStr).split("\\s+");
 
-	String[] feat_info;
+		    String[] feat_info;
 
-	for (int f = 0; f < feat_str.length; f++) {
-	    feat_info = feat_str[f].split("=");
-	    modelScore +=
-		Double.parseDouble(feat_info[1]) * finalLambda[Vocabulary.id(feat_info[0])];
-	}
+		    for (String aFeat_str : feat_str) {
+			    feat_info = aFeat_str.split("=");
+			    modelScore += Double.parseDouble(feat_info[1]) * finalLambda[Vocabulary.id(feat_info[0])];
+		    }
 
-        if (maxModelScore < modelScore) {
-          maxModelScore = modelScore;
-          tmpStatsVal = stats_hash[i].get(candStr).split("\\s+"); // save the
-                                                                  // suff stats
-        }
-      }
+		    if (maxModelScore < modelScore) {
+			    maxModelScore = modelScore;
+			    tmpStatsVal = stats_hash[i].get(candStr).split("\\s+"); // save the
+			    // suff stats
+		    }
+	    }
 
       for (int j = 0; j < suffStatsCount; j++)
         corpusStatsVal[j] += Integer.parseInt(tmpStatsVal[j]); // accumulate
@@ -451,115 +446,108 @@ public class Optimizer {
       else
         worstPredScore = PosInf;
     }
-    
-    for (Iterator it = candSet.iterator(); it.hasNext();) {
-      cand = it.next().toString();
-      candMetric = computeSentMetric(sentId, cand); //compute metric score
-
-      //start to compute model score
-      candScore = 0;
-      featStr = feat_hash[sentId].get(cand).split("\\s+");
-      feats = "";
-
-      for (int i = 0; i < featStr.length; i++) {
-          featInfo = featStr[i].split("=");
-	  actualFeatId = Vocabulary.id(featInfo[0]);
-	  candScore += Double.parseDouble(featInfo[1]) * lambda[actualFeatId];
-	  if ( (actualFeatId < isOptimizable.length && isOptimizable[actualFeatId]) ||
-	       actualFeatId >= isOptimizable.length )
-	      feats += actualFeatId + "=" + Double.parseDouble(featInfo[1]) + " ";
-      }
-      
-      candScore *= featScale;  //scale the model score
-      
-      //is this cand oracle?
-      if(oraSelectMode == 1) {//"hope", b=1, r=1
-        if(evalMetric.getToBeMinimized()) {//if the smaller the metric score, the better
-          if( bestOraScore<=(candScore-candMetric) ) {
-            bestOraScore = candScore-candMetric;
-            oraMetric = candMetric;
-            oraScore = candScore;
-            oraFeat = feats;
-            oraCand = cand;
-          }
-        }
-        else {
-          if( bestOraScore<=(candScore+candMetric) ) {
-            bestOraScore = candScore+candMetric;
-            oraMetric = candMetric;
-            oraScore = candScore;
-            oraFeat = feats;
-            oraCand = cand;
-          }
-        }
-      }
-      else {//best metric score(ex: max BLEU), b=1, r=0
-        if(evalMetric.getToBeMinimized()) {//if the smaller the metric score, the better
-          if( bestOraScore>=candMetric ) {
-            bestOraScore = candMetric;
-            oraMetric = candMetric;
-            oraScore = candScore;
-            oraFeat = feats;
-            oraCand = cand;
-          }
-        }
-        else {
-          if( bestOraScore<=candMetric ) {
-            bestOraScore = candMetric;
-            oraMetric = candMetric;
-            oraScore = candScore;
-            oraFeat = feats;
-            oraCand = cand;
-          }
-        }
-      }
-      
-      //is this cand prediction?
-      if(predSelectMode == 1) {//"fear"
-        if(evalMetric.getToBeMinimized()) {//if the smaller the metric score, the better
-          if( worstPredScore<=(candScore+candMetric) ) {
-            worstPredScore = candScore+candMetric;
-            predMetric = candMetric;
-            predScore = candScore;
-            predFeat = feats;
-          }
-        }
-        else {
-          if( worstPredScore<=(candScore-candMetric) ) {
-            worstPredScore = candScore-candMetric;
-            predMetric = candMetric;
-            predScore = candScore;
-            predFeat = feats;
-          }
-        }
-      }
-      else if(predSelectMode == 2) {//model prediction(max model score)
-        if( worstPredScore<=candScore ) {
-          worstPredScore = candScore;
-          predMetric = candMetric; 
-          predScore = candScore;
-          predFeat = feats;
-        }
-      }
-      else {//worst metric score(ex: min BLEU)
-        if(evalMetric.getToBeMinimized()) {//if the smaller the metric score, the better
-          if( worstPredScore<=candMetric ) {
-            worstPredScore = candMetric;
-            predMetric = candMetric;
-            predScore = candScore;
-            predFeat = feats;
-          }
-        }
-        else {
-          if( worstPredScore>=candMetric ) {
-            worstPredScore = candMetric;
-            predMetric = candMetric;
-            predScore = candScore;
-            predFeat = feats;
-          }
-        }
-      } 
-    }
+
+	  for (String aCandSet : candSet) {
+		  cand = aCandSet.toString();
+		  candMetric = computeSentMetric(sentId, cand); //compute metric score
+
+		  //start to compute model score
+		  candScore = 0;
+		  featStr = feat_hash[sentId].get(cand).split("\\s+");
+		  feats = "";
+
+		  for (String aFeatStr : featStr) {
+			  featInfo = aFeatStr.split("=");
+			  actualFeatId = Vocabulary.id(featInfo[0]);
+			  candScore += Double.parseDouble(featInfo[1]) * lambda[actualFeatId];
+			  if ((actualFeatId < isOptimizable.length && isOptimizable[actualFeatId])
+					  || actualFeatId >= isOptimizable.length)
+				  feats += actualFeatId + "=" + Double.parseDouble(featInfo[1]) + " ";
+		  }
+
+		  candScore *= featScale;  //scale the model score
+
+		  //is this cand oracle?
+		  if (oraSelectMode == 1) {//"hope", b=1, r=1
+			  if (evalMetric.getToBeMinimized()) {//if the smaller the metric score, the better
+				  if (bestOraScore <= (candScore - candMetric)) {
+					  bestOraScore = candScore - candMetric;
+					  oraMetric = candMetric;
+					  oraScore = candScore;
+					  oraFeat = feats;
+					  oraCand = cand;
+				  }
+			  } else {
+				  if (bestOraScore <= (candScore + candMetric)) {
+					  bestOraScore = candScore + candMetric;
+					  oraMetric = candMetric;
+					  oraScore = candScore;
+					  oraFeat = feats;
+					  oraCand = cand;
+				  }
+			  }
+		  } else {//best metric score(ex: max BLEU), b=1, r=0
+			  if (evalMetric.getToBeMinimized()) {//if the smaller the metric score, the better
+				  if (bestOraScore >= candMetric) {
+					  bestOraScore = candMetric;
+					  oraMetric = candMetric;
+					  oraScore = candScore;
+					  oraFeat = feats;
+					  oraCand = cand;
+				  }
+			  } else {
+				  if (bestOraScore <= candMetric) {
+					  bestOraScore = candMetric;
+					  oraMetric = candMetric;
+					  oraScore = candScore;
+					  oraFeat = feats;
+					  oraCand = cand;
+				  }
+			  }
+		  }
+
+		  //is this cand prediction?
+		  if (predSelectMode == 1) {//"fear"
+			  if (evalMetric.getToBeMinimized()) {//if the smaller the metric score, the better
+				  if (worstPredScore <= (candScore + candMetric)) {
+					  worstPredScore = candScore + candMetric;
+					  predMetric = candMetric;
+					  predScore = candScore;
+					  predFeat = feats;
+				  }
+			  } else {
+				  if (worstPredScore <= (candScore - candMetric)) {
+					  worstPredScore = candScore - candMetric;
+					  predMetric = candMetric;
+					  predScore = candScore;
+					  predFeat = feats;
+				  }
+			  }
+		  } else if (predSelectMode == 2) {//model prediction(max model score)
+			  if (worstPredScore <= candScore) {
+				  worstPredScore = candScore;
+				  predMetric = candMetric;
+				  predScore = candScore;
+				  predFeat = feats;
+			  }
+		  } else {//worst metric score(ex: min BLEU)
+			  if (evalMetric.getToBeMinimized()) {//if the smaller the metric score, the better
+				  if (worstPredScore <= candMetric) {
+					  worstPredScore = candMetric;
+					  predMetric = candMetric;
+					  predScore = candScore;
+					  predFeat = feats;
+				  }
+			  } else {
+				  if (worstPredScore >= candMetric) {
+					  worstPredScore = candMetric;
+					  predMetric = candMetric;
+					  predScore = candScore;
+					  predFeat = feats;
+				  }
+			  }
+		  }
+	  }
     
     oraPredScore[0] = oraMetric;
     oraPredScore[1] = oraScore;
@@ -695,14 +683,14 @@ public class Optimizer {
       return finalMetricScore;
   }
   
-  private Vector<String> output;
+  private final Vector<String> output;
   private double[] initialLambda;
-  private double[] finalLambda;
+  private final double[] finalLambda;
   private double finalMetricScore;
-  private HashMap<String, String>[] feat_hash;
-  private HashMap<String, String>[] stats_hash;
-  private int paramDim;
-  private boolean[] isOptimizable;
+  private final HashMap<String, String>[] feat_hash;
+  private final HashMap<String, String>[] stats_hash;
+  private final int paramDim;
+  private final boolean[] isOptimizable;
   public static int sentNum;
   public static int adagradIter; //AdaGrad internal iterations
   public static int oraSelectMode;