You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@joshua.apache.org by mj...@apache.org on 2016/08/17 10:32:10 UTC

[29/56] [partial] incubator-joshua git commit: maven multi-module layout 1st commit: moving files into joshua-core

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/5735d9ae/joshua-core/src/main/java/org/apache/joshua/pro/ClassifierSVM.java
----------------------------------------------------------------------
diff --git a/joshua-core/src/main/java/org/apache/joshua/pro/ClassifierSVM.java b/joshua-core/src/main/java/org/apache/joshua/pro/ClassifierSVM.java
new file mode 100755
index 0000000..5c1f4e3
--- /dev/null
+++ b/joshua-core/src/main/java/org/apache/joshua/pro/ClassifierSVM.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.joshua.pro;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Vector;
+
+import org.apache.joshua.util.StreamGobbler;
+import org.apache.joshua.util.io.LineReader;
+
+public class ClassifierSVM implements ClassifierInterface {
+  @Override
+  public double[] runClassifier(Vector<String> samples, double[] initialLambda, int featDim) {
+    System.out.println("------- SVM training starts ------");
+
+    double[] lambda = new double[featDim + 1];
+    for (int i = 1; i <= featDim; i++)
+      lambda[i] = 0;
+
+    // String root_dir =
+    // "/media/Data/JHU/Research/MT discriminative LM training/joshua_expbleu/PRO_test/";
+    // String root_dir = "/home/ycao/WS11/nist_zh_en_percep/pro_forward/pro_libsvm/";
+
+    try {
+      // prepare training file for MegaM
+      PrintWriter prt = new PrintWriter(new FileOutputStream(trainingFilePath));
+
+      for (String line : samples) {
+        String[] feat = line.split("\\s+");
+
+        if (feat[feat.length - 1].equals("1"))
+          prt.print("+1 ");
+        else
+          prt.print("-1 ");
+
+        for (int i = 0; i < feat.length - 1; i++)
+          prt.print((i + 1) + ":" + feat[i] + " "); // feat id starts from 1!
+
+        prt.println();
+      }
+      prt.close();
+
+      // start running SVM
+      Runtime rt = Runtime.getRuntime();
+      // String cmd = "/home/yuan/tmp_libsvm_command";
+
+      Process p = rt.exec(commandFilePath); // only linear kernel is used
+
+      StreamGobbler errorGobbler = new StreamGobbler(p.getErrorStream(), 1);
+      StreamGobbler outputGobbler = new StreamGobbler(p.getInputStream(), 1);
+
+      errorGobbler.start();
+      outputGobbler.start();
+
+      int decStatus = p.waitFor();
+      if (decStatus != 0) {
+        throw new RuntimeException("Call to decoder returned " + decStatus + "; was expecting "
+            + 0 + ".");
+      }
+
+      // read the model file
+      boolean sv_start = false;
+      double coef;
+
+      for (String line: new LineReader(modelFilePath)) {
+        if (sv_start) // start reading support vectors and coefs
+        {
+          String[] val = line.split("\\s+");
+          coef = Double.parseDouble(val[0]);
+
+          // System.out.print(coef+" ");
+
+          for (int i = 1; i < val.length; i++) // only valid for linear kernel
+          // W = \sum_{i=1}^{l} y_i alpha_i phi(x_i)
+          // = \sum_{i=1}^{l} coef_i x_i
+          {
+            String[] sv = val[i].split(":"); // feat id
+            lambda[Integer.parseInt(sv[0])] += coef * Double.parseDouble(sv[1]); // index starts
+                                                                                 // from 1
+            // System.out.print(Integer.parseInt(sv[0])+" "+Double.parseDouble(sv[1])+" ");
+          }
+
+          // System.out.println();
+        }
+
+        if (line.equals("SV")) sv_start = true;
+      }
+
+      File file = new File(trainingFilePath);
+      file.delete();
+      file = new File(modelFilePath);
+      file.delete();
+    } catch (IOException | InterruptedException e) {
+      throw new RuntimeException(e);
+    }
+
+    System.out.println("------- SVM training ends ------");
+
+    return lambda;
+  }
+
+  @Override
+  /*
+   * for LibSVM: param[0] = LibSVM command file path param[1] = LibSVM training data file(generated
+   * on the fly) path param[2] = LibSVM model file(generated after training) path note: the training
+   * file path should be consistent with the one specified in command file
+   */
+  public void setClassifierParam(String[] param) {
+    if (param == null) {
+      throw new RuntimeException("ERROR: must provide parameters for LibSVM classifier!");
+    } else {
+      commandFilePath = param[0];
+      trainingFilePath = param[1];
+      modelFilePath = param[2];
+    }
+  }
+
+  String commandFilePath;
+  String trainingFilePath;
+  String modelFilePath;
+}

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/5735d9ae/joshua-core/src/main/java/org/apache/joshua/pro/Optimizer.java
----------------------------------------------------------------------
diff --git a/joshua-core/src/main/java/org/apache/joshua/pro/Optimizer.java b/joshua-core/src/main/java/org/apache/joshua/pro/Optimizer.java
new file mode 100755
index 0000000..ad80305
--- /dev/null
+++ b/joshua-core/src/main/java/org/apache/joshua/pro/Optimizer.java
@@ -0,0 +1,454 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.joshua.pro;
+
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.Vector;
+
+import org.apache.joshua.corpus.Vocabulary;
+import org.apache.joshua.metrics.EvaluationMetric;
+
+// this class implements the PRO tuning method
+public class Optimizer {
+    public Optimizer(long _seed, boolean[] _isOptimizable, Vector<String> _output, double[] _initialLambda,
+      HashMap<String, String>[] _feat_hash, HashMap<String, String>[] _stats_hash,
+      EvaluationMetric _evalMetric, int _Tau, int _Xi, double _metricDiff,
+      double[] _normalizationOptions, String _classifierAlg, String[] _classifierParam) {
+    sentNum = _feat_hash.length; // total number of training sentences
+    output = _output; // (not used for now)
+    initialLambda = _initialLambda;
+    isOptimizable = _isOptimizable;
+    paramDim = initialLambda.length - 1;
+    feat_hash = _feat_hash; // feature hash table
+    stats_hash = _stats_hash; // suff. stats hash table
+    evalMetric = _evalMetric; // evaluation metric
+    Tau = _Tau; // param Tau in PRO
+    Xi = _Xi; // param Xi in PRO
+    metricDiff = _metricDiff; // threshold for sampling acceptance
+    normalizationOptions = _normalizationOptions; // weight normalization option
+    randgen = new Random(_seed); // random number generator
+    classifierAlg = _classifierAlg; // classification algorithm
+    classifierParam = _classifierParam; // params for the specified classifier
+  }
+
+  public double[] run_Optimizer() {
+    // sampling from all candidates
+    Vector<String> allSamples = process_Params();
+
+    try {
+      // create classifier object from the given class name string
+      ClassifierInterface myClassifier =
+          (ClassifierInterface) Class.forName(classifierAlg).newInstance();
+      System.out.println("Total training samples(class +1 & class -1): " + allSamples.size());
+
+      // set classifier parameters
+      myClassifier.setClassifierParam(classifierParam);
+      //run classifier
+      finalLambda = myClassifier.runClassifier(allSamples, initialLambda, paramDim);
+      normalizeLambda(finalLambda);
+      //parameters that are not optimizable are assigned with initial values
+      for ( int i = 1; i < isOptimizable.length; ++i ) {
+	  if ( !isOptimizable[i] )
+	      finalLambda[i] = initialLambda[i];
+      }
+
+      double initMetricScore = computeCorpusMetricScore(initialLambda); // compute the initial
+                                                                        // corpus-level metric score
+      finalMetricScore = computeCorpusMetricScore(finalLambda); // compute the final
+                                                                       // corpus-level metric score
+
+      // for( int i=0; i<finalLambda.length; i++ ) System.out.print(finalLambda[i]+" ");
+      // System.out.println(); System.exit(0);
+
+      // prepare the printing info
+      // int numParamToPrint = 0;
+      // String result = "";
+      // numParamToPrint = paramDim > 10 ? 10 : paramDim; // how many parameters to print
+      // result = paramDim > 10 ? "Final lambda (first 10): {" : "Final lambda: {";
+      
+      // for (int i = 1; i <= numParamToPrint; i++)
+      //     result += String.format("%.4f", finalLambda[i]) + " ";
+
+      output.add("Initial "
+		 + evalMetric.get_metricName() + ": " + String.format("%.4f", initMetricScore) + "\nFinal "
+		 + evalMetric.get_metricName() + ": " + String.format("%.4f", finalMetricScore));
+
+      // System.out.println(output);
+
+      return finalLambda;
+    } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  public double computeCorpusMetricScore(double[] finalLambda) {
+    int suffStatsCount = evalMetric.get_suffStatsCount();
+    double modelScore;
+    double maxModelScore;
+    Set<String> candSet;
+    String candStr;
+    String[] feat_str;
+    String[] tmpStatsVal = new String[suffStatsCount];
+    int[] corpusStatsVal = new int[suffStatsCount];
+    for (int i = 0; i < suffStatsCount; i++)
+      corpusStatsVal[i] = 0;
+
+    for (int i = 0; i < sentNum; i++) {
+      candSet = feat_hash[i].keySet();
+
+      // find out the 1-best candidate for each sentence
+      maxModelScore = NegInf;
+      for (Iterator<String> it = candSet.iterator(); it.hasNext();) {
+        modelScore = 0.0;
+        candStr = it.next().toString();
+
+        feat_str = feat_hash[i].get(candStr).split("\\s+");
+
+	for (int f = 0; f < feat_str.length; f++) {
+            String[] feat_info = feat_str[f].split("[=]");
+            modelScore +=
+                Double.parseDouble(feat_info[1]) * finalLambda[Vocabulary.id(feat_info[0])];
+	}
+
+        if (maxModelScore < modelScore) {
+          maxModelScore = modelScore;
+          tmpStatsVal = stats_hash[i].get(candStr).split("\\s+"); // save the suff stats
+        }
+      }
+
+      for (int j = 0; j < suffStatsCount; j++)
+        corpusStatsVal[j] += Integer.parseInt(tmpStatsVal[j]); // accumulate corpus-leve suff stats
+    } // for( int i=0; i<sentNum; i++ )
+
+    return evalMetric.score(corpusStatsVal);
+  }
+
+  public Vector<String> process_Params() {
+    Vector<String> allSamples = new Vector<String>(); // to save all sampled pairs
+
+    // sampling
+    Vector<String> sampleVec = new Vector<String>(); // use String to make sparse representation
+                                                     // easy
+    for (int i = 0; i < sentNum; i++) {
+      sampleVec = Sampler(i);
+      allSamples.addAll(sampleVec);
+    }
+
+    return allSamples;
+  }
+
+  private Vector<String> Sampler(int sentId) {
+    int candCount = stats_hash[sentId].size();
+    Vector<String> sampleVec = new Vector<String>();
+    HashMap<String, Double> candScore = new HashMap<String, Double>(); // metric(e.g BLEU) score of
+                                                                       // all candidates
+
+    // extract all candidates to a string array to save time in computing BLEU score
+    String[] cands = new String[candCount];
+    Set<String> candSet = stats_hash[sentId].keySet();
+    HashMap<Integer, String> candMap = new HashMap<Integer, String>();
+
+    int candId = 0;
+    for (Iterator<String> it = candSet.iterator(); it.hasNext();) {
+      cands[candId] = it.next().toString();
+      candMap.put(candId, cands[candId]); // map an integer to each candidate
+      candId++;
+    }
+    candScore = compute_Score(sentId, cands); // compute BLEU for each candidate
+
+    // start sampling
+    double scoreDiff;
+    double probAccept;
+    boolean accept;
+    HashMap<String, Double> acceptedPair = new HashMap<String, Double>();
+
+    if (Tau < candCount * (candCount - 1)) // otherwise no need to sample
+    {
+      int j1, j2;
+      for (int i = 0; i < Tau; i++) {
+        // here the case in which the same pair is sampled more than once is allowed
+        // otherwise if Tau is almost the same as candCount^2, it might take a lot of time to find
+        // Tau distinct pairs
+        j1 = randgen.nextInt(candCount);
+        j2 = randgen.nextInt(candCount);
+        while (j1 == j2)
+          j2 = randgen.nextInt(candCount);
+
+        // accept or not?
+        scoreDiff = Math.abs(candScore.get(candMap.get(j1)) - candScore.get(candMap.get(j2)));
+        probAccept = Alpha(scoreDiff);
+        
+//        System.err.println("Diff: " + scoreDiff + " = " + candScore.get(candMap.get(j1)) + " - " 
+//            + candScore.get(candMap.get(j2)));
+
+        accept = randgen.nextDouble() <= probAccept ? true : false;
+
+        if (accept) acceptedPair.put(j1 + " " + j2, scoreDiff);
+      }
+    } else {
+      for (int i = 0; i < candCount; i++) {
+        for (int j = 0; j < candCount; j++) {
+          if (j != i) {
+            // accept or not?
+            scoreDiff = Math.abs(candScore.get(candMap.get(i)) - candScore.get(candMap.get(j)));
+            probAccept = Alpha(scoreDiff);
+
+            accept = randgen.nextDouble() <= probAccept ? true : false;
+
+            if (accept) acceptedPair.put(i + " " + j, scoreDiff);
+          }
+        }
+      }
+    }
+
+    //System.out.println("Tau="+Tau+"\nAll possible pair number: "+candCount*(candCount-1));
+    //System.out.println("Number of accepted pairs after random selection: "+acceptedPair.size());
+
+    // sort sampled pairs according to "scoreDiff"
+    ValueComparator comp = new ValueComparator(acceptedPair);
+    TreeMap<String, Double> acceptedPairSort = new TreeMap<String, Double>(comp);
+    acceptedPairSort.putAll(acceptedPair);
+
+    int topCount = 0;
+    int label;
+    String[] pair_str;
+    String[] feat_str_j1, feat_str_j2;
+    String j1Cand, j2Cand;
+    String featDiff, neg_featDiff;
+    HashSet<String> added = new HashSet<String>(); // to avoid symmetric duplicate
+
+    for (String key : acceptedPairSort.keySet()) {
+      if (topCount == Xi) break;
+
+      pair_str = key.split("\\s+");
+      // System.out.println(pair_str[0]+" "+pair_str[1]+" "+acceptedPair.get(key));
+
+      if (!added.contains(key)) {
+        j1Cand = candMap.get(Integer.parseInt(pair_str[0]));
+        j2Cand = candMap.get(Integer.parseInt(pair_str[1]));
+
+        if (evalMetric.getToBeMinimized()) // if smaller metric score is better(like TER)
+          label = (candScore.get(j1Cand) - candScore.get(j2Cand)) < 0 ? 1 : -1;
+        else
+          // like BLEU
+          label = (candScore.get(j1Cand) - candScore.get(j2Cand)) > 0 ? 1 : -1;
+
+        feat_str_j1 = feat_hash[sentId].get(j1Cand).split("\\s+");
+        feat_str_j2 = feat_hash[sentId].get(j2Cand).split("\\s+");
+
+        featDiff = "";
+        neg_featDiff = "";
+
+        HashMap<Integer, String> feat_diff = new HashMap<Integer, String>();
+        String[] feat_info;
+	int feat_id;
+
+        for (int i = 0; i < feat_str_j1.length; i++) {
+          feat_info = feat_str_j1[i].split("[=]");
+	  feat_id = Vocabulary.id(feat_info[0]);
+	  if ( (feat_id < isOptimizable.length &&
+		isOptimizable[feat_id]) || 
+	       feat_id >= isOptimizable.length )
+	      feat_diff.put( feat_id, feat_info[1] );
+        }
+	for (int i = 0; i < feat_str_j2.length; i++) {
+            feat_info = feat_str_j2[i].split("[=]");
+	    feat_id = Vocabulary.id(feat_info[0]);
+	    if ( (feat_id < isOptimizable.length &&
+		  isOptimizable[feat_id]) || 
+		 feat_id >= isOptimizable.length ) {
+		if (feat_diff.containsKey(feat_id))
+		    feat_diff.put( feat_id,
+				   Double.toString(Double.parseDouble(feat_diff.get(feat_id))-Double.parseDouble(feat_info[1])) );
+		else //only fired in the cand 2
+		    feat_diff.put( feat_id, Double.toString(-1.0*Double.parseDouble(feat_info[1])));
+	    }
+	}
+
+	for (Integer id: feat_diff.keySet()) {
+            featDiff += id + ":" + feat_diff.get(id) + " ";
+            neg_featDiff += id + ":" + -1.0*Double.parseDouble(feat_diff.get(id)) + " ";
+	}
+
+        featDiff += label;
+        neg_featDiff += -label;
+
+        // System.out.println(sentId+": "+key);
+        // System.out.println(featDiff + " | " + candScore.get(j1Cand) + " " +
+        //  candScore.get(j2Cand));
+        // System.out.println(neg_featDiff);
+	// System.out.println("-------");
+
+        sampleVec.add(featDiff);
+        sampleVec.add(neg_featDiff);
+
+        // both (j1,j2) and (j2,j1) have been added to training set
+        added.add(key);
+        added.add(pair_str[1] + " " + pair_str[0]);
+
+        topCount++;
+      }
+    }
+
+    // System.out.println("Selected top "+topCount+ "pairs for training");
+
+    return sampleVec;
+  }
+
+  private double Alpha(double x) {
+    return x < metricDiff ? 0 : 1; // default implementation of the paper's method
+    // other functions possible
+  }
+
+  // compute *sentence-level* metric score
+  private HashMap<String, Double> compute_Score(int sentId, String[] cands) {
+    HashMap<String, Double> candScore = new HashMap<String, Double>();
+    String statString;
+    String[] statVal_str;
+    int[] statVal = new int[evalMetric.get_suffStatsCount()];
+
+    // for all candidates
+    for (int i = 0; i < cands.length; i++) {
+      statString = stats_hash[sentId].get(cands[i]);
+      statVal_str = statString.split("\\s+");
+
+      for (int j = 0; j < evalMetric.get_suffStatsCount(); j++)
+        statVal[j] = Integer.parseInt(statVal_str[j]);
+
+//      System.err.println("Score: " + evalMetric.score(statVal));
+      
+      candScore.put(cands[i], evalMetric.score(statVal));
+    }
+
+    return candScore;
+  }
+
+  // from ZMERT
+  private void normalizeLambda(double[] origLambda) {
+    // private String[] normalizationOptions;
+    // How should a lambda[] vector be normalized (before decoding)?
+    // nO[0] = 0: no normalization
+    // nO[0] = 1: scale so that parameter nO[2] has absolute value nO[1]
+    // nO[0] = 2: scale so that the maximum absolute value is nO[1]
+    // nO[0] = 3: scale so that the minimum absolute value is nO[1]
+    // nO[0] = 4: scale so that the L-nO[1] norm equals nO[2]
+
+    int normalizationMethod = (int) normalizationOptions[0];
+    double scalingFactor = 1.0;
+    if (normalizationMethod == 0) {
+      scalingFactor = 1.0;
+    } else if (normalizationMethod == 1) {
+	int c = (int) normalizationOptions[2];
+      scalingFactor = normalizationOptions[1] / Math.abs(origLambda[c]);
+    } else if (normalizationMethod == 2) {
+      double maxAbsVal = -1;
+      int maxAbsVal_c = 0;
+      for (int c = 1; c <= paramDim; ++c) {
+        if (Math.abs(origLambda[c]) > maxAbsVal) {
+          maxAbsVal = Math.abs(origLambda[c]);
+          maxAbsVal_c = c;
+        }
+      }
+      scalingFactor = normalizationOptions[1] / Math.abs(origLambda[maxAbsVal_c]);
+
+    } else if (normalizationMethod == 3) {
+      double minAbsVal = PosInf;
+      int minAbsVal_c = 0;
+
+      for (int c = 1; c <= paramDim; ++c) {
+        if (Math.abs(origLambda[c]) < minAbsVal) {
+          minAbsVal = Math.abs(origLambda[c]);
+          minAbsVal_c = c;
+        }
+      }
+      scalingFactor = normalizationOptions[1] / Math.abs(origLambda[minAbsVal_c]);
+
+    } else if (normalizationMethod == 4) {
+      double pow = normalizationOptions[1];
+      double norm = L_norm(origLambda, pow);
+      scalingFactor = normalizationOptions[2] / norm;
+    }
+
+    for (int c = 1; c <= paramDim; ++c) {
+      origLambda[c] *= scalingFactor;
+    }
+  }
+
+  // from ZMERT
+  private double L_norm(double[] A, double pow) {
+    // calculates the L-pow norm of A[]
+    // NOTE: this calculation ignores A[0]
+    double sum = 0.0;
+    for (int i = 1; i < A.length; ++i)
+      sum += Math.pow(Math.abs(A[i]), pow);
+
+    return Math.pow(sum, 1 / pow);
+  }
+
+  public double getMetricScore() {
+      return finalMetricScore;
+  }
+
+  private EvaluationMetric evalMetric;
+  private Vector<String> output;
+  private boolean[] isOptimizable;
+  private double[] initialLambda;
+  private double[] finalLambda;
+  private double[] normalizationOptions;
+  private double finalMetricScore;
+  private HashMap<String, String>[] feat_hash;
+  private HashMap<String, String>[] stats_hash;
+  private Random randgen;
+  private int paramDim;
+  private int sentNum;
+  private int Tau; // size of sampled candidate set(say 5000)
+  private int Xi; // choose top Xi candidates from sampled set(say 50)
+  private double metricDiff; // metric difference threshold(to select the qualified candidates)
+  private String classifierAlg; // optimization algorithm
+  private String[] classifierParam;
+
+  private final static double NegInf = (-1.0 / 0.0);
+  private final static double PosInf = (+1.0 / 0.0);
+}
+
+
+class ValueComparator implements Comparator<Object> {
+  Map<String,Double> base;
+
+  public ValueComparator(Map<String,Double> base) {
+    this.base = base;
+  }
+
+  @Override
+  public int compare(Object a, Object b) {
+    if ((Double) base.get(a) <= (Double) base.get(b))
+      return 1;
+    else if ((Double) base.get(a) == (Double) base.get(b))
+      return 0;
+    else
+      return -1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/5735d9ae/joshua-core/src/main/java/org/apache/joshua/pro/PRO.java
----------------------------------------------------------------------
diff --git a/joshua-core/src/main/java/org/apache/joshua/pro/PRO.java b/joshua-core/src/main/java/org/apache/joshua/pro/PRO.java
new file mode 100755
index 0000000..fd9a7cb
--- /dev/null
+++ b/joshua-core/src/main/java/org/apache/joshua/pro/PRO.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.joshua.pro;
+
+import org.apache.joshua.decoder.JoshuaConfiguration;
+import org.apache.joshua.util.FileUtility;
+import org.apache.joshua.util.StreamGobbler;
+
+public class PRO {
+  public static void main(String[] args) throws Exception {
+    JoshuaConfiguration joshuaConfiguration = new JoshuaConfiguration();
+    boolean external = false; // should each PRO iteration be launched externally?
+
+    if (args.length == 1) {
+      if (args[0].equals("-h")) {
+        printPROUsage(args.length, true);
+        System.exit(2);
+      } else {
+        external = false;
+      }
+    } else if (args.length == 3) {
+      external = true;
+    } else {
+      printPROUsage(args.length, false);
+      System.exit(1);
+    }
+
+    if (!external) {
+      PROCore myPRO = new PROCore(args[0],joshuaConfiguration);
+      myPRO.run_PRO(); // optimize lambda[]!!!
+      myPRO.finish();
+    } else {
+
+      int maxMem = Integer.parseInt(args[1]);
+      String configFileName = args[2];
+      String stateFileName = FileUtility.dirname(configFileName) + "/PRO.temp.state";
+      String cp = System.getProperty("java.class.path");
+      boolean done = false;
+      int iteration = 0;
+
+      while (!done) {
+        ++iteration;
+        Runtime rt = Runtime.getRuntime();
+        Process p =
+            rt.exec("java -Xmx" + maxMem + "m -cp " + cp + " org.apache.joshua.pro.PROCore " + configFileName
+                + " " + stateFileName + " " + iteration);
+        /*
+         * BufferedReader br_i = new BufferedReader(new InputStreamReader(p.getInputStream()));
+         * BufferedReader br_e = new BufferedReader(new InputStreamReader(p.getErrorStream()));
+         * String dummy_line = null; while ((dummy_line = br_i.readLine()) != null) {
+         * System.out.println(dummy_line); } while ((dummy_line = br_e.readLine()) != null) {
+         * System.out.println(dummy_line); }
+         */
+        StreamGobbler errorGobbler = new StreamGobbler(p.getErrorStream(), 1);
+        StreamGobbler outputGobbler = new StreamGobbler(p.getInputStream(), 1);
+
+        errorGobbler.start();
+        outputGobbler.start();
+
+        int status = p.waitFor();
+
+        if (status == 90) {
+          done = true;
+        } else if (status == 91) {
+          done = false;
+        } else {
+          System.out.println("PRO exiting prematurely (PROCore returned " + status + ")...");
+          break;
+        }
+      }
+    }
+
+    System.exit(0);
+
+  } // main(String[] args)
+
+  public static void printPROUsage(int argsLen, boolean detailed) {
+    if (!detailed) {
+      println("Oops, you provided " + argsLen + " args!");
+      println("");
+      println("Usage:");
+      println("           PRO -maxMem maxMemoryInMB PRO_configFile");
+      println("");
+      println("Where -maxMem specifies the maximum amount of memory (in MB) PRO is");
+      println("allowed to use when performing its calculations (no memroy is needed while");
+      println("the decoder is running),");
+      println("and the config file contains any subset of PRO's 20-some parameters,");
+      println("one per line.  Run   PRO -h   for more details on those parameters.");
+    } else {
+      println("Usage:");
+      println("           PRO -maxMem maxMemoryInMB PRO_configFile");
+      println("");
+      println("Where -maxMem specifies the maximum amount of memory (in MB) PRO is");
+      println("allowed to use when performing its calculations (no memroy is needed while");
+      println("the decoder is running),");
+      println("and the config file contains any subset of PRO's 20-some parameters,");
+      println("one per line.  Those parameters, and their default values, are:");
+      println("");
+      println("Relevant files:");
+      println("  -dir dirPrefix: working directory\n    [[default: null string (i.e. they are in the current directory)]]");
+      println("  -s sourceFile: source sentences (foreign sentences) of the PRO dataset\n    [[default: null string (i.e. file name is not needed by PRO)]]");
+      println("  -r refFile: target sentences (reference translations) of the PRO dataset\n    [[default: reference.txt]]");
+      println("  -rps refsPerSen: number of reference translations per sentence\n    [[default: 1]]");
+      println("  -txtNrm textNormMethod: how should text be normalized?\n       (0) don't normalize text,\n    or (1) \"NIST-style\", and also rejoin 're, *'s, n't, etc,\n    or (2) apply 1 and also rejoin dashes between letters,\n    or (3) apply 1 and also drop non-ASCII characters,\n    or (4) apply 1+2+3\n    [[default: 1]]");
+      println("  -p paramsFile: file containing parameter names, initial values, and ranges\n    [[default: params.txt]]");
+      println("  -docInfo documentInfoFile: file informing PRO which document each\n    sentence belongs to\n    [[default: null string (i.e. all sentences are in one 'document')]]");
+      println("  -fin finalLambda: file name for final lambda[] values\n    [[default: null string (i.e. no such file will be created)]]");
+      println("");
+      println("PRO specs:");
+      println("  -m metricName metric options: name of evaluation metric and its options\n    [[default: BLEU 4 closest]]");
+      println("  -maxIt maxPROIts: maximum number of PRO iterations\n    [[default: 20]]");
+      println("  -prevIt prevPROIts: maximum number of previous PRO iterations to\n    construct candidate sets from\n    [[default: 20]]");
+      println("  -minIt minPROIts: number of iterations before considering an early exit\n    [[default: 5]]");
+      println("  -stopIt stopMinIts: some early stopping criterion must be satisfied in\n    stopMinIts *consecutive* iterations before an early exit\n    [[default: 3]]");
+      println("  -stopSig sigValue: early PRO exit if no weight changes by more than sigValue\n    [[default: -1 (i.e. this criterion is never investigated)]]");
+      println("  -thrCnt threadCount: number of threads to run in parallel when optimizing\n    [[default: 1]]");
+      println("  -save saveInter: save intermediate cfg files (1) or decoder outputs (2)\n    or both (3) or neither (0)\n    [[default: 3]]");
+      println("  -compress compressFiles: should PRO compress the files it produces (1)\n    or not (0)\n    [[default: 0]]");
+      println("  -opi oncePerIt: modify a parameter only once per iteration (1) or not (0)\n    [[default: 0]]");
+      println("  -rand randInit: choose initial point randomly (1) or from paramsFile (0)\n    [[default: 0]]");
+      println("  -seed seed: seed used to initialize random number generator\n    [[default: time (i.e. value returned by System.currentTimeMillis()]]");
+      // println("  -ud useDisk: reliance on disk (0-2; higher value => more reliance)\n    [[default: 2]]");
+      println("");
+      println("Decoder specs:");
+      println("  -cmd commandFile: name of file containing commands to run the decoder\n    [[default: null string (i.e. decoder is a JoshuaDecoder object)]]");
+      println("  -passIt passIterationToDecoder: should iteration number be passed\n    to command file (1) or not (0)\n    [[default: 0]]");
+      println("  -decOut decoderOutFile: name of the output file produced by the decoder\n    [[default: output.nbest]]");
+      println("  -decExit validExit: value returned by decoder to indicate success\n    [[default: 0]]");
+      println("  -dcfg decConfigFile: name of decoder config file\n    [[default: dec_cfg.txt]]");
+      println("  -N N: size of N-best list (per sentence) generated in each PRO iteration\n    [[default: 100]]");
+      println("");
+      println("Output specs:");
+      println("  -v verbosity: PRO verbosity level (0-2; higher value => more verbose)\n    [[default: 1]]");
+      println("  -decV decVerbosity: should decoder output be printed (1) or ignored (0)\n    [[default: 0]]");
+      println("");
+    }
+  }
+
+  private static void println(Object obj) {
+    System.out.println(obj);
+  }
+
+}