You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@joshua.apache.org by le...@apache.org on 2016/05/26 07:03:46 UTC

[05/12] incubator-joshua git commit: Log4j - Slf4j bridge

Log4j - Slf4j bridge 

+ Removed java.util.log statements
+ SLF4j with string format pattern replacement

Project: http://git-wip-us.apache.org/repos/asf/incubator-joshua/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-joshua/commit/c21fa9e8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-joshua/tree/c21fa9e8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-joshua/diff/c21fa9e8

Branch: refs/heads/JOSHUA-252
Commit: c21fa9e82db5b1f784b89ea8109735a3645298f2
Parents: 659e464
Author: Thamme Gowda <tg...@gmail.com>
Authored: Sat May 21 00:02:42 2016 -0700
Committer: Thamme Gowda <tg...@gmail.com>
Committed: Sat May 21 00:02:42 2016 -0700

----------------------------------------------------------------------
 README.md                                       |   6 +
 pom.xml                                         |  13 ++
 .../org/apache/joshua/corpus/Vocabulary.java    |   7 +-
 .../org/apache/joshua/decoder/ArgsParser.java   |   9 +-
 .../java/org/apache/joshua/decoder/Decoder.java |  55 ++++----
 .../apache/joshua/decoder/DecoderThread.java    |  43 +++---
 .../joshua/decoder/JoshuaConfiguration.java     |  89 ++++++-------
 .../apache/joshua/decoder/JoshuaDecoder.java    |  33 ++---
 .../org/apache/joshua/decoder/Translation.java  |  10 +-
 .../joshua/decoder/chart_parser/Cell.java       |  15 ++-
 .../joshua/decoder/chart_parser/Chart.java      |  49 ++++---
 .../joshua/decoder/chart_parser/DotChart.java   |  27 ++--
 .../chart_parser/ManualConstraintsHandler.java  |  21 ++-
 .../apache/joshua/decoder/ff/StatefulFF.java    |   5 +-
 .../apache/joshua/decoder/ff/lm/ArpaFile.java   |  43 +++---
 .../ff/lm/DefaultNGramLanguageModel.java        |  16 +--
 .../ff/lm/berkeley_lm/LMGrammarBerkeley.java    |   7 +-
 .../BloomFilterLanguageModel.java               |   5 +-
 .../joshua/decoder/ff/lm/buildin_lm/TrieLM.java |  50 +++----
 .../joshua/decoder/ff/tm/AbstractGrammar.java   |  21 ++-
 .../joshua/decoder/ff/tm/CreateGlueGrammar.java |  23 ++--
 .../joshua/decoder/ff/tm/GrammarReader.java     |  21 +--
 .../joshua/decoder/ff/tm/MonolingualRule.java   |  10 +-
 .../decoder/ff/tm/format/SamtFormatReader.java  |  10 +-
 .../tm/hash_based/MemoryBasedBatchGrammar.java  |  13 +-
 .../decoder/ff/tm/packed/PackedGrammar.java     |  31 +++--
 .../joshua/decoder/hypergraph/HyperGraph.java   |   7 +-
 .../joshua/decoder/phrase/PhraseChart.java      |  14 +-
 .../apache/joshua/decoder/phrase/Stacks.java    |   8 +-
 .../joshua/decoder/segment_file/Sentence.java   |   8 +-
 .../joshua/decoder/segment_file/Token.java      |   7 +-
 .../java/org/apache/joshua/lattice/Lattice.java | 132 +++++++++----------
 .../java/org/apache/joshua/metrics/BLEU.java    |  15 ++-
 .../apache/joshua/metrics/GradeLevelBLEU.java   |   9 +-
 .../joshua/metrics/MinimumChangeBLEU.java       |   9 +-
 .../java/org/apache/joshua/metrics/Precis.java  |   9 +-
 .../org/apache/joshua/server/TcpServer.java     |   5 +-
 .../org/apache/joshua/tools/GrammarPacker.java  |  62 ++++-----
 .../apache/joshua/tools/GrammarPackerCli.java   |  17 +--
 .../org/apache/joshua/tools/LabelPhrases.java   |   8 +-
 src/main/java/org/apache/joshua/util/Cache.java |  28 ++--
 .../org/apache/joshua/util/CompareGrammars.java |  45 +++----
 .../util/encoding/FeatureTypeAnalyzer.java      |   7 +-
 .../org/apache/joshua/util/io/BinaryOut.java    |   3 -
 src/main/resources/log4j.properties             |   5 +
 .../org/apache/joshua/packed/Benchmark.java     |  30 +++--
 46 files changed, 553 insertions(+), 507 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index 343f183..b9bd989 100644
--- a/README.md
+++ b/README.md
@@ -49,6 +49,12 @@ The basic method for invoking the decoder looks like this:
 
 Some example usage scenarios and scripts can be found in the [examples/](https://github.com/apache/incubator-joshua/tree/master/examples) directory.
 
+----
+### Maven Build
+
+    . scripts/download_libs.sh 
+    mvn clean compile assembly:single
+
 ## Working with "language packs"
 
 Joshua includes a number of "language packs", which are pre-built models that

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 3b1f4ef..b5f7df7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -35,6 +35,9 @@
   <url>http://joshua.incubator.apache.org</url>
   <inceptionYear>2016</inceptionYear>
 
+  <properties>
+    <slf4j.version>1.7.21</slf4j.version>
+  </properties>
   <licenses>
     <license>
       <name>The Apache Software License, Version 2.0</name>
@@ -181,6 +184,16 @@
       <artifactId>args4j</artifactId>
       <version>2.0.29</version>
     </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <version>${slf4j.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <version>${slf4j.version}</version>
+    </dependency>
     
     <!-- Test Dependencies -->
     <dependency>

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/corpus/Vocabulary.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/corpus/Vocabulary.java b/src/main/java/org/apache/joshua/corpus/Vocabulary.java
index a153902..9836286 100644
--- a/src/main/java/org/apache/joshua/corpus/Vocabulary.java
+++ b/src/main/java/org/apache/joshua/corpus/Vocabulary.java
@@ -35,6 +35,8 @@ import java.util.concurrent.locks.StampedLock;
 import org.apache.joshua.decoder.Decoder;
 import org.apache.joshua.decoder.ff.lm.NGramLanguageModel;
 import org.apache.joshua.util.FormatUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Static singular vocabulary class.
@@ -45,6 +47,7 @@ import org.apache.joshua.util.FormatUtils;
 
 public class Vocabulary {
 
+  public static final Logger LOG = LoggerFactory.getLogger(Vocabulary.class);
   private final static ArrayList<NGramLanguageModel> LMs = new ArrayList<>();
 
   private static List<String> idToString;
@@ -88,7 +91,7 @@ public class Vocabulary {
     DataInputStream vocab_stream =
         new DataInputStream(new BufferedInputStream(new FileInputStream(vocab_file)));
     int size = vocab_stream.readInt();
-    Decoder.LOG(1, String.format("Read %d entries from the vocabulary", size));
+    LOG.info("Read {} entries from the vocabulary", size);
     clear();
     for (int i = 0; i < size; i++) {
       int id = vocab_stream.readInt();
@@ -109,7 +112,7 @@ public class Vocabulary {
       DataOutputStream vocab_stream =
           new DataOutputStream(new BufferedOutputStream(new FileOutputStream(vocab_file)));
       vocab_stream.writeInt(idToString.size() - 1);
-      Decoder.LOG(1, String.format("Writing vocabulary: %d tokens", idToString.size() - 1));
+      LOG.info("Writing vocabulary: {} tokens", idToString.size() - 1);
       for (int i = 1; i < idToString.size(); i++) {
         vocab_stream.writeInt(i);
         vocab_stream.writeUTF(idToString.get(i));

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ArgsParser.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ArgsParser.java b/src/main/java/org/apache/joshua/decoder/ArgsParser.java
index fea20fc..529e9fe 100644
--- a/src/main/java/org/apache/joshua/decoder/ArgsParser.java
+++ b/src/main/java/org/apache/joshua/decoder/ArgsParser.java
@@ -24,6 +24,8 @@ import java.nio.file.Files;
 import java.nio.file.Paths;
 
 import org.apache.joshua.util.io.LineReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * @author orluke
@@ -31,6 +33,8 @@ import org.apache.joshua.util.io.LineReader;
  */
 public class ArgsParser {
 
+  public static final Logger LOG = LoggerFactory.getLogger(ArgsParser.class);
+
   private String configFile = null;
 
   /**
@@ -83,18 +87,17 @@ public class ArgsParser {
 
           setConfigFile(args[i + 1].trim());
           try {
-            Decoder.LOG(1, "Parameters read from configuration file:");
+            LOG.info("Parameters read from configuration file: {}", getConfigFile());
             joshuaConfiguration.readConfigFile(getConfigFile());
           } catch (IOException e) {
             throw new RuntimeException(e);
           }
-
           break;
         }
       }
 
       // Now process all the command-line args
-      Decoder.LOG(1, "Parameters overridden from the command line:");
+      LOG.info("Parameters overridden from the command line:");
       joshuaConfiguration.processCommandLineOptions(args);
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/Decoder.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/Decoder.java b/src/main/java/org/apache/joshua/decoder/Decoder.java
index 1aac0b0..43dc75e 100644
--- a/src/main/java/org/apache/joshua/decoder/Decoder.java
+++ b/src/main/java/org/apache/joshua/decoder/Decoder.java
@@ -28,7 +28,6 @@ import java.io.FileNotFoundException;
 import java.io.FileWriter;
 import java.lang.reflect.Constructor;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -59,6 +58,8 @@ import org.apache.joshua.util.FileUtility;
 import org.apache.joshua.util.FormatUtils;
 import org.apache.joshua.util.Regex;
 import org.apache.joshua.util.io.LineReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class handles decoder initialization and the complication introduced by multithreading.
@@ -87,6 +88,8 @@ import org.apache.joshua.util.io.LineReader;
  */
 public class Decoder {
 
+  public static final Logger LOG = LoggerFactory.getLogger(Decoder.class);
+
   private final JoshuaConfiguration joshuaConfiguration;
 
   public JoshuaConfiguration getJoshuaConfiguration() {
@@ -270,7 +273,7 @@ public class Decoder {
             String.format("[X] ||| [X,1] %s ||| [X,1] %s ||| custom=1", tokens[0], tokens[1]));
         Decoder.this.customPhraseTable.addRule(rule);
         rule.estimateRuleCost(featureFunctions);
-        Decoder.LOG(1, String.format("Added custom rule %s", formatRule(rule)));
+        LOG.info("Added custom rule {}", formatRule(rule));
 
         String response = String.format("Added rule %s", formatRule(rule));
         out.write(response.getBytes());
@@ -649,7 +652,7 @@ public class Decoder {
             feature = demoses(feature);
 
           joshuaConfiguration.weights.add(String.format("%s %s", feature, tokens[i+1]));
-          Decoder.LOG(1, String.format("COMMAND LINE WEIGHT: %s -> %.3f", feature, value));
+          LOG.info("COMMAND LINE WEIGHT: {} -> {}", feature, value);
         }
       }
 
@@ -671,8 +674,7 @@ public class Decoder {
         weights.set(pair[0], Float.parseFloat(pair[1]));
       }
 
-      Decoder.LOG(1, String.format("Read %d weights (%d of them dense)", weights.size(),
-          DENSE_FEATURE_NAMES.size()));
+      LOG.info("Read {} weights ({} of them dense)", weights.size(), DENSE_FEATURE_NAMES.size());
 
       // Do this before loading the grammars and the LM.
       this.featureFunctions = new ArrayList<FeatureFunction>();
@@ -680,9 +682,8 @@ public class Decoder {
       // Initialize and load grammars. This must happen first, since the vocab gets defined by
       // the packed grammar (if any)
       this.initializeTranslationGrammars();
-
-      Decoder.LOG(1, String.format("Grammar loading took: %d seconds.",
-          (System.currentTimeMillis() - pre_load_time) / 1000));
+      LOG.info("Grammar loading took: {} seconds.",
+          (System.currentTimeMillis() - pre_load_time) / 1000);
 
       // Initialize the features: requires that LM model has been initialized.
       this.initializeFeatureFunctions();
@@ -701,14 +702,14 @@ public class Decoder {
 
       // Sort the TM grammars (needed to do cube pruning)
       if (joshuaConfiguration.amortized_sorting) {
-        Decoder.LOG(1, "Grammar sorting happening lazily on-demand.");
+        LOG.info("Grammar sorting happening lazily on-demand.");
       } else {
         long pre_sort_time = System.currentTimeMillis();
         for (Grammar grammar : this.grammars) {
           grammar.sortGrammar(this.featureFunctions);
         }
-        Decoder.LOG(1, String.format("Grammar sorting took %d seconds.",
-            (System.currentTimeMillis() - pre_sort_time) / 1000));
+        LOG.info("Grammar sorting took {} seconds.",
+            (System.currentTimeMillis() - pre_sort_time) / 1000);
       }
 
       // Create the threads
@@ -716,12 +717,8 @@ public class Decoder {
         this.threadPool.put(new DecoderThread(this.grammars, Decoder.weights,
             this.featureFunctions, joshuaConfiguration));
       }
-
-    } catch (IOException e) {
-      e.printStackTrace();
-    } catch (InterruptedException e) {
-      // TODO Auto-generated catch block
-      e.printStackTrace();
+    } catch (IOException | InterruptedException e) {
+      LOG.warn(e.getMessage(), e);
     }
 
     return this;
@@ -786,7 +783,7 @@ public class Decoder {
       checkSharedVocabularyChecksumsForPackedGrammars(packed_grammars);
 
     } else {
-      Decoder.LOG(1, "* WARNING: no grammars supplied!  Supplying dummy glue grammar.");
+      LOG.info("* WARNING: no grammars supplied!  Supplying dummy glue grammar.");
       MemoryBasedBatchGrammar glueGrammar = new MemoryBasedBatchGrammar("glue", joshuaConfiguration);
       glueGrammar.setSpanLimit(-1);
       glueGrammar.addGlueRules(featureFunctions);
@@ -799,7 +796,7 @@ public class Decoder {
     
     /* Create an epsilon-deleting grammar */
     if (joshuaConfiguration.lattice_decoding) {
-      Decoder.LOG(1, "Creating an epsilon-deleting grammar");
+      LOG.info("Creating an epsilon-deleting grammar");
       MemoryBasedBatchGrammar latticeGrammar = new MemoryBasedBatchGrammar("lattice", joshuaConfiguration);
       latticeGrammar.setSpanLimit(-1);
       HieroFormatReader reader = new HieroFormatReader();
@@ -807,6 +804,7 @@ public class Decoder {
       String goalNT = FormatUtils.cleanNonTerminal(joshuaConfiguration.goal_symbol);
       String defaultNT = FormatUtils.cleanNonTerminal(joshuaConfiguration.default_non_terminal);
 
+      //FIXME: too many arguments
       String ruleString = String.format("[%s] ||| [%s,1] <eps> ||| [%s,1] ||| ", goalNT, goalNT, defaultNT,
           goalNT, defaultNT);
 
@@ -829,8 +827,8 @@ public class Decoder {
       }
     }
 
-    Decoder.LOG(1, String.format("Memory used %.1f MB",
-        ((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1000000.0)));
+    LOG.info("Memory used {} MB",
+        ((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1000000.0));
   }
 
   /**
@@ -889,7 +887,7 @@ public class Decoder {
     } catch (IOException ioe) {
       throw new RuntimeException(ioe);
     }
-    Decoder.LOG(1, String.format("Read %d weights from file '%s'", weights.size(), fileName));
+    LOG.info("Read {} weights from file '{}'", weights.size(), fileName);
   }
 
   private String demoses(String feature) {
@@ -911,7 +909,6 @@ public class Decoder {
    *
    * Weights for features are listed separately.
    *
-   * @param tmOwnersSeen
    * @throws IOException
    *
    */
@@ -938,7 +935,7 @@ public class Decoder {
     }
 
     for (FeatureFunction feature : featureFunctions) {
-      Decoder.LOG(1, String.format("FEATURE: %s", feature.logString()));
+      LOG.info("FEATURE: {}", feature.logString());
 
     }
 
@@ -955,6 +952,7 @@ public class Decoder {
    */
   private Class<?> getClass(String featureName) {
     Class<?> clas = null;
+
     String[] packages = { "joshua.decoder.ff", "joshua.decoder.ff.lm", "joshua.decoder.ff.phrase" };
     for (String path : packages) {
       try {
@@ -971,13 +969,4 @@ public class Decoder {
     }
     return clas;
   }
-
-  public static boolean VERBOSE(int i) {
-    return i <= VERBOSE;
-  }
-
-  public static void LOG(int i, String msg) {
-    if (VERBOSE(i))
-      System.err.println(msg);
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/DecoderThread.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/DecoderThread.java b/src/main/java/org/apache/joshua/decoder/DecoderThread.java
index 4390a59..f04c3be 100644
--- a/src/main/java/org/apache/joshua/decoder/DecoderThread.java
+++ b/src/main/java/org/apache/joshua/decoder/DecoderThread.java
@@ -21,7 +21,6 @@ package org.apache.joshua.decoder;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.logging.Logger;
 
 import org.apache.joshua.decoder.chart_parser.Chart;
 import org.apache.joshua.decoder.ff.FeatureFunction;
@@ -34,6 +33,8 @@ import org.apache.joshua.decoder.hypergraph.HyperGraph;
 import org.apache.joshua.decoder.phrase.Stacks;
 import org.apache.joshua.decoder.segment_file.Sentence;
 import org.apache.joshua.corpus.Vocabulary;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class handles decoding of individual Sentence objects (which can represent plain sentences
@@ -49,6 +50,8 @@ import org.apache.joshua.corpus.Vocabulary;
  */
 
 public class DecoderThread extends Thread {
+  public static final Logger LOG = LoggerFactory.getLogger(DecoderThread.class);
+
   private final JoshuaConfiguration joshuaConfiguration;
   /*
    * these variables may be the same across all threads (e.g., just copy from DecoderFactory), or
@@ -57,7 +60,6 @@ public class DecoderThread extends Thread {
   private final List<Grammar> allGrammars;
   private final List<FeatureFunction> featureFunctions;
 
-  private static final Logger logger = Logger.getLogger(DecoderThread.class.getName());
 
   // ===============================================================
   // Constructor
@@ -94,15 +96,15 @@ public class DecoderThread extends Thread {
    */
   public Translation translate(Sentence sentence) {
 
-    Decoder.LOG(1, String.format("Input %d: %s", sentence.id(), sentence.fullSource()));
+    LOG.info("Input {}: {}", sentence.id(), sentence.fullSource());
 
     if (sentence.target() != null)
-      Decoder.LOG(1, String.format("Input %d: Constraining to target sentence '%s'", 
-          sentence.id(), sentence.target()));
+      LOG.info("Input {}: Constraining to target sentence '{}'",
+          sentence.id(), sentence.target());
 
     // skip blank sentences
     if (sentence.isEmpty()) {
-      Decoder.LOG(1, String.format("Translation %d: Translation took 0 seconds", sentence.id()));
+      LOG.info("Translation {}: Translation took 0 seconds", sentence.id());
       return new Translation(sentence, null, featureFunctions, joshuaConfiguration);
     }
     
@@ -140,14 +142,14 @@ public class DecoderThread extends Thread {
       }
       
     } catch (java.lang.OutOfMemoryError e) {
-      Decoder.LOG(1, String.format("Input %d: out of memory", sentence.id()));
+      LOG.info("Input {}: out of memory", sentence.id());
       hypergraph = null;
     }
 
     float seconds = (System.currentTimeMillis() - startTime) / 1000.0f;
-    Decoder.LOG(1, String.format("Input %d: Translation took %.3f seconds", sentence.id(), seconds));
-    Decoder.LOG(1, String.format("Input %d: Memory used is %.1f MB", sentence.id(), (Runtime
-        .getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1000000.0));
+    LOG.info("Input {}: Translation took {} seconds", sentence.id(), seconds);
+    LOG.info("Input {}: Memory used is {} MB", sentence.id(), (Runtime
+        .getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1000000.0);
 
     /* Return the translation unless we're doing synchronous parsing. */
     if (!joshuaConfiguration.parse || hypergraph == null) {
@@ -164,8 +166,8 @@ public class DecoderThread extends Thread {
     Grammar newGrammar = getGrammarFromHyperGraph(joshuaConfiguration.goal_symbol, hypergraph);
     newGrammar.sortGrammar(this.featureFunctions);
     long sortTime = System.currentTimeMillis();
-    logger.info(String.format("Sentence %d: New grammar has %d rules.", sentence.id(),
-        newGrammar.getNumRules()));
+    LOG.info("Sentence {}: New grammar has {} rules.", sentence.id(),
+        newGrammar.getNumRules());
 
     /* Step 2. Create a new chart and parse with the instantiated grammar. */
     Grammar[] newGrammarArray = new Grammar[] { newGrammar };
@@ -173,20 +175,19 @@ public class DecoderThread extends Thread {
     Chart chart = new Chart(targetSentence, featureFunctions, newGrammarArray, "GOAL",joshuaConfiguration);
     int goalSymbol = GrammarBuilderWalkerFunction.goalSymbol(hypergraph);
     String goalSymbolString = Vocabulary.word(goalSymbol);
-    logger.info(String.format("Sentence %d: goal symbol is %s (%d).", sentence.id(),
-        goalSymbolString, goalSymbol));
+    LOG.info("Sentence {}: goal symbol is {} ({}).", sentence.id(),
+        goalSymbolString, goalSymbol);
     chart.setGoalSymbolID(goalSymbol);
 
     /* Parsing */
     HyperGraph englishParse = chart.expand();
     long secondParseTime = System.currentTimeMillis();
-    logger.info(String.format("Sentence %d: Finished second chart expansion (%d seconds).",
-        sentence.id(), (secondParseTime - sortTime) / 1000));
-    logger.info(String.format("Sentence %d total time: %d seconds.\n", sentence.id(),
-        (secondParseTime - startTime) / 1000));
-    logger.info(String.format("Memory used after sentence %d is %.1f MB", sentence.id(), (Runtime
-        .getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1000000.0));
-
+    LOG.info("Sentence {}: Finished second chart expansion ({} seconds).",
+        sentence.id(), (secondParseTime - sortTime) / 1000);
+    LOG.info("Sentence %d total time: {} seconds.\n", sentence.id(),
+        (secondParseTime - startTime) / 1000);
+    LOG.info("Memory used after sentence {} is {} MB", sentence.id(), (Runtime
+        .getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1000000.0);
     return new Translation(sentence, englishParse, featureFunctions, joshuaConfiguration); // or do something else
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/JoshuaConfiguration.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/JoshuaConfiguration.java b/src/main/java/org/apache/joshua/decoder/JoshuaConfiguration.java
index b4624cf..964cdc9 100644
--- a/src/main/java/org/apache/joshua/decoder/JoshuaConfiguration.java
+++ b/src/main/java/org/apache/joshua/decoder/JoshuaConfiguration.java
@@ -29,13 +29,14 @@ import java.io.BufferedReader;
 import java.io.FileReader;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.logging.Logger;
 
 import org.apache.joshua.decoder.ff.StatefulFF;
 import org.apache.joshua.decoder.ff.fragmentlm.Tree;
 import org.apache.joshua.util.FormatUtils;
 import org.apache.joshua.util.Regex;
 import org.apache.joshua.util.io.LineReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Configuration file for Joshua decoder.
@@ -48,7 +49,9 @@ import org.apache.joshua.util.io.LineReader;
  */
 public class JoshuaConfiguration {
 
-  // whether to construct a StructuredTranslation object for each request instead of 
+  public static final Logger LOG = LoggerFactory.getLogger(JoshuaConfiguration.class);
+
+  // whether to construct a StructuredTranslation object for each request instead of
   // printing to stdout. Used when the Decoder is used from Java directly.
   public Boolean use_structured_output = false;
 
@@ -72,33 +75,32 @@ public class JoshuaConfiguration {
    * also just be listed in the main config file.
    */
   public String weights_file = "";
-
   // Default symbols. The symbol here should be enclosed in square brackets.
   public String default_non_terminal = FormatUtils.markup("X");
+
   public String goal_symbol = FormatUtils.markup("GOAL");
 
   /*
    * A list of OOV symbols in the form
-   * 
+   *
    * [X1] weight [X2] weight [X3] weight ...
-   * 
+   *
    * where the [X] symbols are nonterminals and the weights are weights. For each OOV word w in the
    * input sentence, Joshua will create rules of the form
-   * 
+   *
    * X1 -> w (weight)
-   * 
+   *
    * If this is empty, an unweighted default_non_terminal is used.
    */
-
   public class OOVItem implements Comparable<OOVItem> {
     public String label;
+
     public float weight;
 
     OOVItem(String l, float w) {
       label = l;
       weight = w;
     }
-
     @Override
     public int compareTo(OOVItem other) {
       if (weight > other.weight)
@@ -108,6 +110,7 @@ public class JoshuaConfiguration {
       return 0;
     }
   }
+
   public ArrayList<OOVItem> oovList = null;
 
   /*
@@ -126,9 +129,9 @@ public class JoshuaConfiguration {
    * much, much quicker (good for debugging), but that per-sentence decoding is a bit slower.
    */
   public boolean amortized_sorting = true;
-
   // syntax-constrained decoding
   public boolean constrain_parse = false;
+
   public boolean use_pos_labels = false;
 
   // oov-specific
@@ -161,14 +164,14 @@ public class JoshuaConfiguration {
    * variables are available:
    *
    * <pre>
-   * - %i the 0-indexed sentence number 
-   * - %e the source string %s the translated sentence 
-   * - %S the translated sentence with some basic capitalization and denormalization 
-   * - %t the synchronous derivation 
-   * - %f the list of feature values (as name=value pairs) 
+   * - %i the 0-indexed sentence number
+   * - %e the source string %s the translated sentence
+   * - %S the translated sentence with some basic capitalization and denormalization
+   * - %t the synchronous derivation
+   * - %f the list of feature values (as name=value pairs)
    * - %c the model cost
-   * - %w the weight vector 
-   * - %a the alignments between source and target words (currently unimplemented) 
+   * - %w the weight vector
+   * - %a the alignments between source and target words (currently unimplemented)
    * - %d a verbose, many-line version of the derivation
    * </pre>
    */
@@ -189,7 +192,6 @@ public class JoshuaConfiguration {
   /* Enables synchronous parsing. */
   public boolean parse = false; // perform synchronous parsing
 
-  private final Logger logger = Logger.getLogger(JoshuaConfiguration.class.getName());
 
   /* A list of the feature functions. */
   public ArrayList<String> features = new ArrayList<String>();
@@ -282,9 +284,9 @@ public class JoshuaConfiguration {
    *
    */
   public void reset() {
-    logger.info("Resetting the JoshuaConfiguration to its defaults ...");
-    logger.info("\n\tResetting the StatefullFF global state index ...");
-    logger.info("\n\t...done");
+    LOG.info("Resetting the JoshuaConfiguration to its defaults ...");
+    LOG.info("\n\tResetting the StatefullFF global state index ...");
+    LOG.info("\n\t...done");
     StatefulFF.resetGlobalStateIndex();
     tms = new ArrayList<String>();
     weights_file = "";
@@ -314,7 +316,7 @@ public class JoshuaConfiguration {
 
     reordering_limit = 8;
     num_translation_options = 20;
-    logger.info("...done");
+    LOG.info("...done");
   }
 
   // ===============================================================
@@ -376,7 +378,7 @@ public class JoshuaConfiguration {
         if (line.indexOf("=") != -1) { // parameters; (not feature function)
           String[] fds = Regex.equalsWithSpaces.split(line, 2);
           if (fds.length < 2) {
-            Decoder.LOG(1, String.format("* WARNING: skipping config file line '%s'", line));
+            LOG.warn("skipping config file line '{}'", line);
             continue;
           }
 
@@ -417,7 +419,7 @@ public class JoshuaConfiguration {
             String[] tokens = fds[1].split("\\s+");
             if (! tokens[1].startsWith("-")) { // old format
               tmLine = String.format("%s -owner %s -maxspan %s -path %s", tokens[0], tokens[1], tokens[2], tokens[3]);
-              Decoder.LOG(1, String.format("WARNING: Converting deprecated TM line from '%s' -> '%s'", fds[1], tmLine));
+              LOG.warn("Converting deprecated TM line from '{}' -> '{}'", fds[1], tmLine);
             }
             tms.add(tmLine);
 
@@ -426,12 +428,11 @@ public class JoshuaConfiguration {
 
           } else if (parameter.equals(normalize_key("parse"))) {
             parse = Boolean.parseBoolean(fds[1]);
-            logger.finest(String.format("parse: %s", parse));
+            LOG.debug("parse: {}", parse);
 
           } else if (parameter.equals(normalize_key("dump-hypergraph"))) {
             hypergraphFilePattern = fds[1].trim();
-            logger
-                .finest(String.format("  hypergraph dump file format: %s", hypergraphFilePattern));
+            LOG.debug("  hypergraph dump file format: {}", hypergraphFilePattern);
 
           } else if (parameter.equals(normalize_key("oov-list"))) {
             if (new File(fds[1]).exists()) {
@@ -481,11 +482,11 @@ public class JoshuaConfiguration {
 
           } else if (parameter.equals(normalize_key("default-non-terminal"))) {
             default_non_terminal = markup(cleanNonTerminal(fds[1].trim()));
-            logger.finest(String.format("default_non_terminal: %s", default_non_terminal));
+            LOG.debug("default_non_terminal: {}", default_non_terminal);
 
           } else if (parameter.equals(normalize_key("goal-symbol"))) {
             goal_symbol = markup(cleanNonTerminal(fds[1].trim()));
-            logger.finest("goalSymbol: " + goal_symbol);
+            LOG.debug("goalSymbol: {}", goal_symbol);
 
           } else if (parameter.equals(normalize_key("weights-file"))) {
             weights_file = fds[1];
@@ -507,19 +508,19 @@ public class JoshuaConfiguration {
 
           } else if (parameter.equals(normalize_key("use_unique_nbest"))) {
             use_unique_nbest = Boolean.valueOf(fds[1]);
-            logger.finest(String.format("use_unique_nbest: %s", use_unique_nbest));
+            LOG.debug("use_unique_nbest: {}", use_unique_nbest);
 
           } else if (parameter.equals(normalize_key("output-format"))) {
             outputFormat = fds[1];
-            logger.finest(String.format("output-format: %s", outputFormat));
+            LOG.debug("output-format: {}", outputFormat);
 
           } else if (parameter.equals(normalize_key("include_align_index"))) {
             include_align_index = Boolean.valueOf(fds[1]);
-            logger.finest(String.format("include_align_index: %s", include_align_index));
+            LOG.debug("include_align_index: {}", include_align_index);
 
           } else if (parameter.equals(normalize_key("top_n"))) {
             topN = Integer.parseInt(fds[1]);
-            logger.finest(String.format("topN: %s", topN));
+            LOG.debug("topN: {}", topN);
 
           } else if (parameter.equals(normalize_key("num_parallel_decoders"))
               || parameter.equals(normalize_key("threads"))) {
@@ -528,15 +529,15 @@ public class JoshuaConfiguration {
               throw new IllegalArgumentException(
                   "Must specify a positive number for num_parallel_decoders");
             }
-            logger.finest(String.format("num_parallel_decoders: %s", num_parallel_decoders));
+            LOG.debug("num_parallel_decoders: {}", num_parallel_decoders);
 
           } else if (parameter.equals(normalize_key("mark_oovs"))) {
             mark_oovs = Boolean.valueOf(fds[1]);
-            logger.finest(String.format("mark_oovs: %s", mark_oovs));
+            LOG.debug("mark_oovs: {}", mark_oovs);
 
           } else if (parameter.equals(normalize_key("pop-limit"))) {
             pop_limit = Integer.parseInt(fds[1]);
-            logger.finest(String.format("pop-limit: %s", pop_limit));
+            LOG.info("pop-limit: {}", pop_limit);
 
           } else if (parameter.equals(normalize_key("input-type"))) {
             if (fds[1].equals("json")) {
@@ -546,7 +547,7 @@ public class JoshuaConfiguration {
             } else {
               throw new RuntimeException(String.format("* FATAL: invalid server type '%s'", fds[1]));
             }
-            logger.info(String.format("    input-type: %s", input_type));
+            LOG.info("    input-type: {}", input_type);
 
           } else if (parameter.equals(normalize_key("server-type"))) {
             if (fds[1].toLowerCase().equals("tcp"))
@@ -554,19 +555,19 @@ public class JoshuaConfiguration {
             else if (fds[1].toLowerCase().equals("http"))
               server_type = SERVER_TYPE.HTTP;
 
-            logger.info(String.format("    server-type: %s", server_type));
+            LOG.info("    server-type: {}", server_type);
 
           } else if (parameter.equals(normalize_key("server-port"))) {
             server_port = Integer.parseInt(fds[1]);
-            logger.info(String.format("    server-port: %d", server_port));
+            LOG.info("    server-port: {}", server_port);
 
           } else if (parameter.equals(normalize_key("rescore-forest"))) {
             rescoreForest = true;
-            logger.info(String.format("    rescore-forest: %s", rescoreForest));
+            LOG.info("    rescore-forest: {}", rescoreForest);
 
           } else if (parameter.equals(normalize_key("rescore-forest-weight"))) {
             rescoreForestWeight = Float.parseFloat(fds[1]);
-            logger.info(String.format("    rescore-forest-weight: %f", rescoreForestWeight));
+            LOG.info("    rescore-forest-weight: {}", rescoreForestWeight);
 
           } else if (parameter.equals(normalize_key("maxlen"))) {
             // reset the maximum length
@@ -587,7 +588,7 @@ public class JoshuaConfiguration {
           } else if (parameter
               .equals(normalize_key(SOFT_SYNTACTIC_CONSTRAINT_DECODING_PROPERTY_NAME))) {
             fuzzy_matching = Boolean.parseBoolean(fds[1]);
-            logger.finest(String.format(fuzzy_matching + ": %s", fuzzy_matching));
+            LOG.debug("fuzzy_matching : {}", fuzzy_matching);
 
           } else if (parameter.equals(normalize_key("fragment-map"))) {
             fragmentMapFile = fds[1];
@@ -662,14 +663,14 @@ public class JoshuaConfiguration {
                 || parameter.equals(normalize_key("useCubePrune"))
                 || parameter.equals(normalize_key("useBeamAndThresholdPrune"))
                 || parameter.equals(normalize_key("regexp-grammar"))) {
-              logger.warning(String.format("WARNING: ignoring deprecated parameter '%s'", fds[0]));
+              LOG.warn("WARNING: ignoring deprecated parameter '{}'", fds[0]);
 
             } else {
               throw new RuntimeException("FATAL: unknown configuration parameter '" + fds[0] + "'");
             }
           }
 
-          Decoder.LOG(1, String.format("    %s = '%s'", normalize_key(fds[0]), fds[1]));
+          LOG.info("    {} = '{}'", normalize_key(fds[0]), fds[1]);
 
         } else {
           /*

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/JoshuaDecoder.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/JoshuaDecoder.java b/src/main/java/org/apache/joshua/decoder/JoshuaDecoder.java
index 8c0b10b..e29ef0a 100644
--- a/src/main/java/org/apache/joshua/decoder/JoshuaDecoder.java
+++ b/src/main/java/org/apache/joshua/decoder/JoshuaDecoder.java
@@ -26,7 +26,6 @@ import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
-import java.util.logging.Logger;
 
 import com.sun.net.httpserver.HttpServer;
 
@@ -34,6 +33,8 @@ import org.apache.joshua.decoder.JoshuaConfiguration.SERVER_TYPE;
 import org.apache.joshua.decoder.io.TranslationRequestStream;
 import org.apache.joshua.server.TcpServer;
 import org.apache.joshua.server.ServerThread;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Implements decoder initialization, including interaction with <code>JoshuaConfiguration</code>
@@ -45,8 +46,8 @@ import org.apache.joshua.server.ServerThread;
  */
 public class JoshuaDecoder {
 
-  private static final Logger logger = Logger.getLogger(JoshuaDecoder.class.getName());
-  
+  public static final Logger LOG = LoggerFactory.getLogger(JoshuaDecoder.class);
+
   // ===============================================================
   // Main
   // ===============================================================
@@ -55,12 +56,6 @@ public class JoshuaDecoder {
     JoshuaConfiguration joshuaConfiguration = new JoshuaConfiguration();
     ArgsParser userArgs = new ArgsParser(args,joshuaConfiguration);
 
-    String logFile = System.getenv().get("JOSHUA") + "/logging.properties";
-    try {
-      java.util.logging.LogManager.getLogManager().readConfiguration(new FileInputStream(logFile));
-    } catch (IOException e) {
-      logger.warning("Couldn't initialize logging properties from '" + logFile + "'");
-    }
 
     long startTime = System.currentTimeMillis();
 
@@ -70,10 +65,10 @@ public class JoshuaDecoder {
     /* Step-1: initialize the decoder, test-set independent */
     Decoder decoder = new Decoder(joshuaConfiguration, userArgs.getConfigFile());
 
-    Decoder.LOG(1, String.format("Model loading took %d seconds",
-        (System.currentTimeMillis() - startTime) / 1000));
-    Decoder.LOG(1, String.format("Memory used %.1f MB", ((Runtime.getRuntime().totalMemory() - Runtime
-        .getRuntime().freeMemory()) / 1000000.0)));  
+    LOG.info("Model loading took %d seconds",
+        (System.currentTimeMillis() - startTime) / 1000);
+    LOG.info("Memory used {} MB", ((Runtime.getRuntime().totalMemory()
+        - Runtime.getRuntime().freeMemory()) / 1000000.0));
 
     /* Step-2: Decoding */
     // create a server if requested, which will create TranslationRequest objects
@@ -84,12 +79,13 @@ public class JoshuaDecoder {
 
       } else if (joshuaConfiguration.server_type == SERVER_TYPE.HTTP) {
         HttpServer server = HttpServer.create(new InetSocketAddress(port), 0);
-        Decoder.LOG(1, String.format("** HTTP Server running and listening on port %d.", port));  
+        LOG.info("** HTTP Server running and listening on port %d.", port);
         server.createContext("/", new ServerThread(null, decoder, joshuaConfiguration));
         server.setExecutor(null); // creates a default executor
         server.start();
       } else {
         System.err.println("* FATAL: unknown server type");
+        LOG.error("* FATAL: unknown server type");
         System.exit(1);
       }
       return;
@@ -112,13 +108,12 @@ public class JoshuaDecoder {
     if (joshuaConfiguration.n_best_file != null)
       out.close();
 
-    Decoder.LOG(1, "Decoding completed.");
-    Decoder.LOG(1, String.format("Memory used %.1f MB", ((Runtime.getRuntime().totalMemory() - Runtime
-        .getRuntime().freeMemory()) / 1000000.0)));
+    LOG.info("Decoding completed.");
+    LOG.info("Memory used {} MB", ((Runtime.getRuntime().totalMemory()
+        - Runtime.getRuntime().freeMemory()) / 1000000.0));
 
     /* Step-3: clean up */
     decoder.cleanUp();
-    Decoder.LOG(1, String.format("Total running time: %d seconds",
-      (System.currentTimeMillis() - startTime) / 1000));
+    LOG.info("Total running time: {} seconds",  (System.currentTimeMillis() - startTime) / 1000);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/Translation.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/Translation.java b/src/main/java/org/apache/joshua/decoder/Translation.java
index 5afae74..b13b0f6 100644
--- a/src/main/java/org/apache/joshua/decoder/Translation.java
+++ b/src/main/java/org/apache/joshua/decoder/Translation.java
@@ -35,6 +35,8 @@ import org.apache.joshua.decoder.hypergraph.HyperGraph;
 import org.apache.joshua.decoder.hypergraph.KBestExtractor;
 import org.apache.joshua.decoder.io.DeNormalize;
 import org.apache.joshua.decoder.segment_file.Sentence;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class represents translated input objects (sentences or lattices). It is aware of the source
@@ -45,6 +47,7 @@ import org.apache.joshua.decoder.segment_file.Sentence;
  */
 
 public class Translation {
+  public static final Logger LOG = LoggerFactory.getLogger(Translation.class);
   private Sentence source;
 
   /**
@@ -87,8 +90,7 @@ public class Translation {
             /* construct Viterbi output */
             final String best = getViterbiString(hypergraph);
             
-            Decoder.LOG(1, String.format("Translation %d: %.3f %s", source.id(), hypergraph.goalNode.getScore(),
-                best));
+            LOG.info("Translation {}: {} {}", source.id(), hypergraph.goalNode.getScore(), best);
             
             /*
              * Setting topN to 0 turns off k-best extraction, in which case we need to parse through
@@ -129,8 +131,8 @@ public class Translation {
           }
 
           float seconds = (float) (System.currentTimeMillis() - startTime) / 1000.0f;
-          Decoder.LOG(1, String.format("Input %d: %d-best extraction took %.3f seconds", id(),
-              joshuaConfiguration.topN, seconds));
+          LOG.info("Input {}: {}-best extraction took {} seconds", id(),
+              joshuaConfiguration.topN, seconds);
 
       } else {
         

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/chart_parser/Cell.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/chart_parser/Cell.java b/src/main/java/org/apache/joshua/decoder/chart_parser/Cell.java
index 06de8c7..1ddea2c 100644
--- a/src/main/java/org/apache/joshua/decoder/chart_parser/Cell.java
+++ b/src/main/java/org/apache/joshua/decoder/chart_parser/Cell.java
@@ -28,13 +28,14 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.Map.Entry;
-import java.util.logging.Logger;
 
 import org.apache.joshua.decoder.ff.FeatureFunction;
 import org.apache.joshua.decoder.ff.state_maintenance.DPState;
 import org.apache.joshua.decoder.ff.tm.Rule;
 import org.apache.joshua.decoder.hypergraph.HGNode;
 import org.apache.joshua.decoder.hypergraph.HyperEdge;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * this class implement functions: (1) combine small itesm into larger ones using rules, and create
@@ -46,6 +47,12 @@ import org.apache.joshua.decoder.hypergraph.HyperEdge;
  */
 class Cell {
 
+  // ===============================================================
+  // Static fields
+  // ===============================================================
+  public static final Logger LOG = LoggerFactory.getLogger(Cell.class);
+
+
   // The chart this cell belongs to
   private Chart chart = null;
 
@@ -63,10 +70,6 @@ class Cell {
    */
   private List<HGNode> sortedNodes = null;
 
-  // ===============================================================
-  // Static fields
-  // ===============================================================
-  private static final Logger logger = Logger.getLogger(Cell.class.getName());
 
   // ===============================================================
   // Constructor
@@ -131,7 +134,7 @@ class Cell {
 
     int itemsInGoalBin = getSortedNodes().size();
     if (1 != itemsInGoalBin) {
-      logger.severe("the goal_bin does not have exactly one item");
+      LOG.error("the goal_bin does not have exactly one item");
       return false;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/chart_parser/Chart.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/chart_parser/Chart.java b/src/main/java/org/apache/joshua/decoder/chart_parser/Chart.java
index 0825ccb..a432fbe 100644
--- a/src/main/java/org/apache/joshua/decoder/chart_parser/Chart.java
+++ b/src/main/java/org/apache/joshua/decoder/chart_parser/Chart.java
@@ -23,13 +23,9 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
 import java.util.PriorityQueue;
-import java.util.logging.Level;
-import java.util.logging.Logger;
 
 import org.apache.joshua.corpus.Vocabulary;
-import org.apache.joshua.decoder.Decoder;
 import org.apache.joshua.decoder.JoshuaConfiguration;
-import org.apache.joshua.decoder.chart_parser.CubePruneState;
 import org.apache.joshua.decoder.chart_parser.DotChart.DotNode;
 import org.apache.joshua.decoder.ff.FeatureFunction;
 import org.apache.joshua.decoder.ff.SourceDependentFF;
@@ -47,6 +43,8 @@ import org.apache.joshua.lattice.Arc;
 import org.apache.joshua.lattice.Lattice;
 import org.apache.joshua.lattice.Node;
 import org.apache.joshua.util.ChartSpan;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Chart class this class implements chart-parsing: (1) seeding the chart (2)
@@ -66,6 +64,7 @@ import org.apache.joshua.util.ChartSpan;
 
 public class Chart {
 
+  public static final Logger LOG = LoggerFactory.getLogger(Chart.class);
   private final JoshuaConfiguration config;
   // ===========================================================
   // Statistics
@@ -100,7 +99,6 @@ public class Chart {
 //  private ManualConstraintsHandler manualConstraintsHandler;
   private StateConstraint stateConstraint;
 
-  private static final Logger logger = Logger.getLogger(Chart.class.getName());
 
   // ===============================================================
   // Constructors
@@ -169,7 +167,7 @@ public class Chart {
       if (ff instanceof SourceDependentFF)
         ((SourceDependentFF) ff).setSource(sentence);
 
-    Decoder.LOG(2, "Finished seeding chart.");
+    LOG.debug("Finished seeding chart.");
   }
 
   /**
@@ -434,8 +432,8 @@ public class Chart {
     if (null == this.cells.get(0, sourceLength)
         || !this.goalBin.transitToGoal(this.cells.get(0, sourceLength), this.featureFunctions,
             this.sourceLength)) {
-      Decoder.LOG(1, String.format("Input %d: Parse failure (either no derivations exist or pruning is too aggressive",
-          sentence.id()));
+      LOG.info("Input {}: Parse failure (either no derivations exist or pruning is too aggressive",
+          sentence.id());
       return null;
     }
 
@@ -565,8 +563,8 @@ public class Chart {
     for (int width = 1; width <= sourceLength; width++) {
       for (int i = 0; i <= sourceLength - width; i++) {
         int j = i + width;
-        if (logger.isLoggable(Level.FINEST))
-          logger.finest(String.format("Processing span (%d, %d)", i, j));
+        if (LOG.isDebugEnabled())
+          LOG.debug("Processing span (%d, %d)", i, j);
 
         /* Skips spans for which no path exists (possible in lattices). */
         if (inputLattice.distance(i, j) == Float.POSITIVE_INFINITY) {
@@ -578,7 +576,7 @@ public class Chart {
          * rules over (i,j-1) that need the terminal at (j-1,j) and looking at
          * all split points k to expand nonterminals.
          */
-        logger.finest("Expanding cell");
+        LOG.debug("Expanding cell");
         for (int k = 0; k < this.grammars.length; k++) {
           /**
            * Each dotChart can act individually (without consulting other
@@ -592,17 +590,17 @@ public class Chart {
          * 2. The regular CKY part: add completed items onto the chart via cube
          * pruning.
          */
-        logger.finest("Adding complete items into chart");
+        LOG.debug("Adding complete items into chart");
         completeSpan(i, j);
 
         /* 3. Process unary rules. */
-        logger.finest("Adding unary items into chart");
+        LOG.debug("Adding unary items into chart");
         addUnaryNodes(this.grammars, i, j);
 
         // (4)=== in dot_cell(i,j), add dot-nodes that start from the /complete/
         // superIterms in
         // chart_cell(i,j)
-        logger.finest("Initializing new dot-items that start from complete items in this cell");
+        LOG.debug("Initializing new dot-items that start from complete items in this cell");
         for (int k = 0; k < this.grammars.length; k++) {
           if (this.grammars[k].hasRuleForSpan(i, j, inputLattice.distance(i, j))) {
             this.dotcharts[k].startDotItems(i, j);
@@ -621,18 +619,18 @@ public class Chart {
       }
     }
 
-    logStatistics(Level.INFO);
+    logStatistics();
 
     // transition_final: setup a goal item, which may have many deductions
     if (null == this.cells.get(0, sourceLength)
         || !this.goalBin.transitToGoal(this.cells.get(0, sourceLength), this.featureFunctions,
             this.sourceLength)) {
-      Decoder.LOG(1, String.format("Input %d: Parse failure (either no derivations exist or pruning is too aggressive",
-          sentence.id()));
+      LOG.info("Input {}: Parse failure (either no derivations exist or pruning is too aggressive",
+          sentence.id());
       return null;
     }
 
-    logger.fine("Finished expand");
+    LOG.debug("Finished expand");
     return new HyperGraph(this.goalBin.getSortedNodes().get(0), -1, -1, this.sentence);
   }
 
@@ -657,9 +655,9 @@ public class Chart {
   // Private methods
   // ===============================================================
 
-  private void logStatistics(Level level) {
-    Decoder.LOG(2, String.format("Input %d: Chart: added %d merged %d dot-items added: %d",
-        this.sentence.id(), this.nAdded, this.nMerged, this.nDotitemAdded));
+  private void logStatistics() {
+    LOG.info("Input {}: Chart: added {} merged {} dot-items added: {}",
+        this.sentence.id(), this.nAdded, this.nMerged, this.nDotitemAdded);
   }
 
   /**
@@ -683,8 +681,8 @@ public class Chart {
     ArrayList<HGNode> queue = new ArrayList<HGNode>(chartBin.getSortedNodes());
     HashSet<Integer> seen_lhs = new HashSet<Integer>();
 
-    if (logger.isLoggable(Level.FINEST))
-      logger.finest("Adding unary to [" + i + ", " + j + "]");
+    if (LOG.isDebugEnabled())
+      LOG.debug("Adding unary to [{}, {}]", i, j);
 
     while (queue.size() > 0) {
       HGNode node = queue.remove(0);
@@ -713,8 +711,9 @@ public class Chart {
             HGNode resNode = chartBin.addHyperEdgeInCell(states, rule, i, j, antecedents,
                 new SourcePath(), true);
 
-            if (logger.isLoggable(Level.FINEST))
-              logger.finest(rule.toString());
+            if (LOG.isDebugEnabled()){
+              LOG.debug(rule.toString());
+            }
 
             if (null != resNode && !seen_lhs.contains(resNode.lhs)) {
               queue.add(resNode);

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/chart_parser/DotChart.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/chart_parser/DotChart.java b/src/main/java/org/apache/joshua/decoder/chart_parser/DotChart.java
index bcabd11..70be6cd 100644
--- a/src/main/java/org/apache/joshua/decoder/chart_parser/DotChart.java
+++ b/src/main/java/org/apache/joshua/decoder/chart_parser/DotChart.java
@@ -23,8 +23,6 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.logging.Level;
-import java.util.logging.Logger;
 
 import org.apache.joshua.corpus.Vocabulary;
 import org.apache.joshua.decoder.ff.tm.Grammar;
@@ -36,6 +34,8 @@ import org.apache.joshua.lattice.Arc;
 import org.apache.joshua.lattice.Lattice;
 import org.apache.joshua.lattice.Node;
 import org.apache.joshua.util.ChartSpan;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The DotChart handles Earley-style implicit binarization of translation rules.
@@ -59,6 +59,13 @@ import org.apache.joshua.util.ChartSpan;
 class DotChart {
 
   // ===============================================================
+  // Static fields
+  // ===============================================================
+
+  public static final Logger LOG = LoggerFactory.getLogger(DotChart.class);
+
+
+  // ===============================================================
   // Package-protected instance fields
   // ===============================================================
   /**
@@ -94,11 +101,6 @@ class DotChart {
   /* If enabled, rule terminals are treated as regular expressions. */
   private final boolean regexpMatching;
 
-  // ===============================================================
-  // Static fields
-  // ===============================================================
-
-  private static final Logger logger = Logger.getLogger(DotChart.class.getName());
 
   // ===============================================================
   // Constructors
@@ -169,8 +171,7 @@ class DotChart {
    * </ol>
    */
   void expandDotCell(int i, int j) {
-    if (logger.isLoggable(Level.FINEST))
-      logger.finest("Expanding dot cell (" + i + "," + j + ")");
+      LOG.debug("Expanding dot cell ({}, {})", i, j);
 
     /*
      * (1) If the dot is just to the left of a non-terminal variable, we look for theorems or axioms
@@ -352,15 +353,15 @@ class DotChart {
     dotcells.get(i, j).addDotNode(item);
     dotChart.nDotitemAdded++;
 
-    if (logger.isLoggable(Level.FINEST)) {
-      logger.finest(String.format("Add a dotitem in cell (%d, %d), n_dotitem=%d, %s", i, j,
-          dotChart.nDotitemAdded, srcPath));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Add a dotitem in cell ({}, {}), n_dotitem={}, {}", i, j,
+          dotChart.nDotitemAdded, srcPath);
 
       RuleCollection rules = tnode.getRuleCollection();
       if (rules != null) {
         for (Rule r : rules.getRules()) {
           // System.out.println("rule: "+r.toString());
-          logger.finest(r.toString());
+          LOG.debug(r.toString());
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/chart_parser/ManualConstraintsHandler.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/chart_parser/ManualConstraintsHandler.java b/src/main/java/org/apache/joshua/decoder/chart_parser/ManualConstraintsHandler.java
index 38e9f4a..be70423 100644
--- a/src/main/java/org/apache/joshua/decoder/chart_parser/ManualConstraintsHandler.java
+++ b/src/main/java/org/apache/joshua/decoder/chart_parser/ManualConstraintsHandler.java
@@ -21,14 +21,14 @@ package org.apache.joshua.decoder.chart_parser;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
-import java.util.logging.Level;
-import java.util.logging.Logger;
 
 import org.apache.joshua.corpus.Vocabulary;
 import org.apache.joshua.decoder.ff.tm.Grammar;
 import org.apache.joshua.decoder.ff.tm.Rule;
 import org.apache.joshua.decoder.segment_file.ConstraintRule;
 import org.apache.joshua.decoder.segment_file.ConstraintSpan;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * @author Zhifei Li, <zh...@gmail.com>
@@ -36,6 +36,8 @@ import org.apache.joshua.decoder.segment_file.ConstraintSpan;
 
 public class ManualConstraintsHandler {
 
+  public static final Logger LOG = LoggerFactory.getLogger(ManualConstraintsHandler.class);
+
   // TODO: each span only has one ConstraintSpan
   // contain spans that have LHS or RHS constraints (they are always hard)
   private HashMap<String, ConstraintSpan> constraintSpansForFiltering;
@@ -43,11 +45,9 @@ public class ManualConstraintsHandler {
   // contain spans that have hard "rule" constraint; key: start_span; value:
   // end_span
   private ArrayList<Span> spansWithHardRuleConstraint;
-
   private Chart chart;
-  private Grammar grammarForConstructManualRule;
 
-  private static final Logger logger = Logger.getLogger(ManualConstraintsHandler.class.getName());
+  private Grammar grammarForConstructManualRule;
 
   public ManualConstraintsHandler(Chart chart, Grammar grammarForConstructManualRule,
       List<ConstraintSpan> constraintSpans) {
@@ -109,19 +109,16 @@ public class ManualConstraintsHandler {
 
                 // add to the chart
                 chart.addAxiom(cSpan.start(), cSpan.end(), rule, new SourcePath());
-                if (logger.isLoggable(Level.INFO))
-                  logger.info("Adding RULE constraint for span " + cSpan.start() + ", "
-                      + cSpan.end() + "; isHard=" + cSpan.isHard() + rule.getLHS());
+                LOG.info("Adding RULE constraint for span {}, {}; isHard={}",
+                    cSpan.start(), cSpan.end(),  cSpan.isHard() + "" + rule.getLHS());
                 break;
-
               default:
                 shouldAdd = true;
             }
           }
           if (shouldAdd) {
-            if (logger.isLoggable(Level.INFO))
-              logger.info("Adding LHS or RHS constraint for span " + cSpan.start() + ", "
-                  + cSpan.end());
+            LOG.info("Adding LHS or RHS constraint for span {}, {}",
+                cSpan.start(), cSpan.end());
             if (null == this.constraintSpansForFiltering) {
               this.constraintSpansForFiltering = new HashMap<String, ConstraintSpan>();
             }

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/StatefulFF.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/StatefulFF.java b/src/main/java/org/apache/joshua/decoder/ff/StatefulFF.java
index 626eb3c..05b9358 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/StatefulFF.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/StatefulFF.java
@@ -27,6 +27,8 @@ import org.apache.joshua.decoder.ff.state_maintenance.DPState;
 import org.apache.joshua.decoder.ff.tm.Rule;
 import org.apache.joshua.decoder.hypergraph.HGNode;
 import org.apache.joshua.decoder.segment_file.Sentence;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Stateful features contribute dynamic programming state. Unlike earlier versions of Joshua, the
@@ -40,6 +42,7 @@ import org.apache.joshua.decoder.segment_file.Sentence;
  */
 public abstract class StatefulFF extends FeatureFunction {
 
+  public static final Logger LOG = LoggerFactory.getLogger(StatefulFF.class);
   /* Every stateful FF takes a unique index value and increments this. */
   static int GLOBAL_STATE_INDEX = 0;
 
@@ -49,7 +52,7 @@ public abstract class StatefulFF extends FeatureFunction {
   public StatefulFF(FeatureVector weights, String name, String[] args, JoshuaConfiguration config) {
     super(weights, name, args, config);
 
-    Decoder.LOG(1, "Stateful object with state index " + GLOBAL_STATE_INDEX);
+    LOG.info("Stateful object with state index {}",  GLOBAL_STATE_INDEX);
     stateIndex = GLOBAL_STATE_INDEX++;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/lm/ArpaFile.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/lm/ArpaFile.java b/src/main/java/org/apache/joshua/decoder/ff/lm/ArpaFile.java
index 5e66afa..0052afc 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/lm/ArpaFile.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/lm/ArpaFile.java
@@ -26,15 +26,15 @@ import java.io.InputStream;
 import java.util.Iterator; 
 import java.util.NoSuchElementException; 
 import java.util.Scanner; 
-import java.util.logging.Level; 
-import java.util.logging.Logger; 
 import java.util.regex.Matcher; 
 import java.util.regex.Pattern; 
 import java.util.zip.GZIPInputStream; 
 
 import org.apache.joshua.corpus.Vocabulary; 
 import org.apache.joshua.util.Regex; 
-import org.apache.joshua.util.io.LineReader; 
+import org.apache.joshua.util.io.LineReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utility class for reading ARPA language model files. 
@@ -43,9 +43,7 @@ import org.apache.joshua.util.io.LineReader;
  */ 
 public class ArpaFile implements Iterable<ArpaNgram> { 
 
-  /** Logger for this class. */ 
-  private static final Logger logger =  
-      Logger.getLogger(ArpaFile.class.getName()); 
+  public static final Logger LOG = LoggerFactory.getLogger(ArpaFile.class);
 
   /** Regular expression representing a blank line. */ 
   public static final Regex BLANK_LINE  = new Regex("^\\s*$"); 
@@ -111,12 +109,11 @@ public class ArpaFile implements Iterable<ArpaNgram> {
           String[] words = Regex.spaces.split(parts[1]); 
 
           for (String word : words) { 
-            if (logger.isLoggable(Level.FINE)) logger.fine("Adding to vocab: " + word); 
+            if (LOG.isDebugEnabled()) LOG.debug("Adding to vocab: " + word);
             Vocabulary.addAll(word);
           } 
-
-        } else { 
-          logger.info(line); 
+        } else {
+          LOG.info(line);
         } 
 
       } 
@@ -156,7 +153,7 @@ public class ArpaFile implements Iterable<ArpaNgram> {
 
     //  } 
 
-    logger.info("Done constructing ArpaFile"); 
+    LOG.info("Done constructing ArpaFile");
 
   } 
 
@@ -180,13 +177,13 @@ public class ArpaFile implements Iterable<ArpaNgram> {
   @SuppressWarnings("unused") 
   public int size() { 
 
-    logger.fine("Counting n-grams in ARPA file"); 
+    LOG.debug("Counting n-grams in ARPA file");
     int count=0; 
 
     for (ArpaNgram ngram : this) { 
       count++; 
     } 
-    logger.fine("Done counting n-grams in ARPA file"); 
+    LOG.debug("Done counting n-grams in ARPA file");
 
     return count; 
   } 
@@ -194,7 +191,7 @@ public class ArpaFile implements Iterable<ArpaNgram> {
   public int getOrder() throws FileNotFoundException { 
 
     Pattern pattern = Pattern.compile("^ngram (\\d+)=\\d+$"); 
-    if (logger.isLoggable(Level.FINEST)) logger.finest("Pattern is " + pattern.toString()); 
+    if (LOG.isDebugEnabled()) LOG.debug("Pattern is {}", pattern.toString());
     @SuppressWarnings("resource")
     final Scanner scanner = new Scanner(arpaFile); 
 
@@ -209,10 +206,10 @@ public class ArpaFile implements Iterable<ArpaNgram> {
       } else { 
         Matcher matcher = pattern.matcher(line); 
         if (matcher.matches()) { 
-          if (logger.isLoggable(Level.FINEST)) logger.finest("DOES   match: \'" + line + "\'"); 
+          LOG.debug("DOES  match: '{}'", line);
           order = Integer.valueOf(matcher.group(1)); 
-        } else if (logger.isLoggable(Level.FINEST)) { 
-          logger.finest("Doesn't match: \'" + line + "\'"); 
+        } else {
+          LOG.debug("Doesn't match: '{}'", line );
         } 
       } 
     } 
@@ -244,7 +241,7 @@ public class ArpaFile implements Iterable<ArpaNgram> {
       // Eat initial header lines 
       while (scanner.hasNextLine()) { 
         String line = scanner.nextLine(); 
-        logger.finest("Discarding line: " + line); 
+        LOG.debug("Discarding line: {}", line);
         if (NGRAM_HEADER.matches(line)) { 
           break; 
         } 
@@ -323,13 +320,9 @@ public class ArpaFile implements Iterable<ArpaNgram> {
         } 
 
       }; 
-    } catch (FileNotFoundException e) { 
-      logger.severe(e.toString()); 
-      return null; 
-    } catch (IOException e) { 
-      logger.severe(e.toString()); 
+    } catch (IOException e) {
+      LOG.error(e.getMessage(), e);
       return null; 
     } 
-
-  } 
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/lm/DefaultNGramLanguageModel.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/lm/DefaultNGramLanguageModel.java b/src/main/java/org/apache/joshua/decoder/ff/lm/DefaultNGramLanguageModel.java
index 4ff8f59..1191eac 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/lm/DefaultNGramLanguageModel.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/lm/DefaultNGramLanguageModel.java
@@ -19,10 +19,10 @@
 package org.apache.joshua.decoder.ff.lm;
 
 import java.util.Arrays;
-import java.util.logging.Level;
-import java.util.logging.Logger;
 
 import org.apache.joshua.corpus.Vocabulary;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class provides a default implementation for the Equivalent LM State optimization (namely,
@@ -36,8 +36,7 @@ import org.apache.joshua.corpus.Vocabulary;
  */
 public abstract class DefaultNGramLanguageModel implements NGramLanguageModel {
 
-  /** Logger for this class. */
-  private static final Logger logger = Logger.getLogger(DefaultNGramLanguageModel.class.getName());
+  public static final Logger LOG = LoggerFactory.getLogger(DefaultNGramLanguageModel.class);
 
   protected final int ngramOrder;
   
@@ -88,10 +87,11 @@ public abstract class DefaultNGramLanguageModel implements NGramLanguageModel {
       // start_index=2. othercase, need to check)
       int[] ngram = Arrays.copyOfRange(sentence, 0, j);
       double logProb = ngramLogProbability(ngram, order);
-      if (logger.isLoggable(Level.FINE)) {
+      if (LOG.isDebugEnabled()) {
         String words = Vocabulary.getWords(ngram);
-        logger.fine("\tlogp ( " + words + " )  =  " + logProb);
+        LOG.debug("\tlogp ({})  =  {}", words, logProb);
       }
+
       probability += logProb;
     }
 
@@ -99,9 +99,9 @@ public abstract class DefaultNGramLanguageModel implements NGramLanguageModel {
     for (int i = 0; i <= sentenceLength - order; i++) {
       int[] ngram = Arrays.copyOfRange(sentence, i, i + order);
       double logProb = ngramLogProbability(ngram, order);
-      if (logger.isLoggable(Level.FINE)) {
+      if (LOG.isDebugEnabled()) {
         String words = Vocabulary.getWords(ngram);
-        logger.fine("\tlogp ( " + words + " )  =  " + logProb);
+        LOG.debug("\tlogp ( {} )  = {} ", words, logProb);
       }
       probability += logProb;
     }

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/lm/berkeley_lm/LMGrammarBerkeley.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/lm/berkeley_lm/LMGrammarBerkeley.java b/src/main/java/org/apache/joshua/decoder/ff/lm/berkeley_lm/LMGrammarBerkeley.java
index 859ca6a..d642fe9 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/lm/berkeley_lm/LMGrammarBerkeley.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/lm/berkeley_lm/LMGrammarBerkeley.java
@@ -37,6 +37,7 @@ import edu.berkeley.nlp.lm.WordIndexer;
 import edu.berkeley.nlp.lm.cache.ArrayEncodedCachingLmWrapper;
 import edu.berkeley.nlp.lm.io.LmReaders;
 import edu.berkeley.nlp.lm.util.StrUtils;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class wraps Berkeley LM.
@@ -45,6 +46,8 @@ import edu.berkeley.nlp.lm.util.StrUtils;
  */
 public class LMGrammarBerkeley extends DefaultNGramLanguageModel {
 
+  public static final org.slf4j.Logger LOG = LoggerFactory.getLogger(LMGrammarBerkeley.class);
+
   private ArrayEncodedNgramLanguageModel<String> lm;
 
   private static final Logger logger = Logger.getLogger(LMGrammarBerkeley.class.getName());
@@ -83,10 +86,10 @@ public class LMGrammarBerkeley extends DefaultNGramLanguageModel {
 
     try { // try binary format (even gzipped)
       lm = (ArrayEncodedNgramLanguageModel<String>) LmReaders.<String>readLmBinary(lm_file);
-      Decoder.LOG(1, "Loading Berkeley LM from binary " + lm_file);
+      LOG.info("Loading Berkeley LM from binary {}", lm_file);
     } catch (RuntimeException e) {
       ConfigOptions opts = new ConfigOptions();
-      Decoder.LOG(1, "Loading Berkeley LM from ARPA file " + lm_file);
+      LOG.info("Loading Berkeley LM from ARPA file {}", lm_file);
       final StringWordIndexer wordIndexer = new StringWordIndexer();
       ArrayEncodedNgramLanguageModel<String> berkeleyLm =
           LmReaders.readArrayEncodedLmFromArpa(lm_file, false, wordIndexer, opts, order);

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/lm/bloomfilter_lm/BloomFilterLanguageModel.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/lm/bloomfilter_lm/BloomFilterLanguageModel.java b/src/main/java/org/apache/joshua/decoder/ff/lm/bloomfilter_lm/BloomFilterLanguageModel.java
index 21dd819..7d00f2f 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/lm/bloomfilter_lm/BloomFilterLanguageModel.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/lm/bloomfilter_lm/BloomFilterLanguageModel.java
@@ -29,7 +29,6 @@ import java.io.ObjectInputStream;
 import java.io.ObjectOutput;
 import java.io.ObjectOutputStream;
 import java.util.HashMap;
-import java.util.logging.Logger;
 import java.util.zip.GZIPInputStream;
 import java.util.zip.GZIPOutputStream;
 
@@ -37,6 +36,8 @@ import org.apache.joshua.corpus.Vocabulary;
 import org.apache.joshua.decoder.ff.lm.DefaultNGramLanguageModel;
 import org.apache.joshua.util.Regex;
 import org.apache.joshua.util.io.LineReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An n-gram language model with linearly-interpolated Witten-Bell smoothing, using a Bloom filter
@@ -62,7 +63,7 @@ public class BloomFilterLanguageModel extends DefaultNGramLanguageModel implemen
   /**
    * The logger for this class.
    */
-  public static final Logger logger = Logger.getLogger(BloomFilterLanguageModel.class.getName());
+  public static final Logger LOG = LoggerFactory.getLogger(BloomFilterLanguageModel.class);
 
   /**
    * The Bloom filter data structure itself.

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/lm/buildin_lm/TrieLM.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/lm/buildin_lm/TrieLM.java b/src/main/java/org/apache/joshua/decoder/ff/lm/buildin_lm/TrieLM.java
index 654561c..68244ad 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/lm/buildin_lm/TrieLM.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/lm/buildin_lm/TrieLM.java
@@ -27,17 +27,15 @@ import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.Map;
 import java.util.Scanner;
-import java.util.logging.Level;
-import java.util.logging.Logger;
 
-import org.apache.joshua.corpus.SymbolTable;
 import org.apache.joshua.corpus.Vocabulary;
-import  org.apache.joshua.decoder.JoshuaConfiguration;
 import  org.apache.joshua.decoder.ff.lm.AbstractLM;
 import  org.apache.joshua.decoder.ff.lm.ArpaFile;
 import  org.apache.joshua.decoder.ff.lm.ArpaNgram;
 import  org.apache.joshua.util.Bits;
 import  org.apache.joshua.util.Regex;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Relatively memory-compact language model
@@ -56,9 +54,7 @@ import  org.apache.joshua.util.Regex;
  */
 public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
 
-  /** Logger for this class. */
-  private static Logger logger =
-      Logger.getLogger(TrieLM.class.getName());
+  public static final Logger LOG = LoggerFactory.getLogger(TrieLM.class);
 
   /**
    * Node ID for the root node.
@@ -101,7 +97,7 @@ public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
     super(arpaFile.getVocab().size(), arpaFile.getOrder());
 
     int ngramCounts = arpaFile.size();
-    if (logger.isLoggable(Level.FINE)) logger.fine("ARPA file contains " + ngramCounts + " n-grams");
+    if (LOG.isDebugEnabled()) LOG.debug("ARPA file contains {} n-grams", ngramCounts);
 
     this.children = new HashMap<Long,Integer>(ngramCounts);
     this.logProbs = new HashMap<Long,Float>(ngramCounts);
@@ -112,9 +108,10 @@ public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
     int lineNumber = 0;
     for (ArpaNgram ngram : arpaFile) {
       lineNumber += 1;
-      if (lineNumber%100000==0) logger.info("Line: " + lineNumber);
+      if (lineNumber%100000==0) LOG.info("Line: {}", lineNumber);
 
-      if (logger.isLoggable(Level.FINEST)) logger.finest(ngram.order() + "-gram: (" + ngram.getWord() + " | " + Arrays.toString(ngram.getContext()) + ")");
+      if (LOG.isDebugEnabled()) LOG.debug("{}-gram: ({} | {})", ngram.order(), ngram.getWord(),
+          Arrays.toString(ngram.getContext()));
       int word = ngram.getWord();
 
       int[] context = ngram.getContext();
@@ -130,7 +127,7 @@ public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
               childID = children.get(key);
             } else {
               childID = ++nodeCounter;
-              if (logger.isLoggable(Level.FINEST)) logger.finest("children.put(" + contextNodeID + ":"+context[i] + " , " + childID + ")");
+              LOG.debug("children.put({}:{} , {})", contextNodeID, context[i], childID);
               children.put(key, childID);
             }
             contextNodeID = childID;
@@ -141,7 +138,7 @@ public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
         {
           long key = Bits.encodeAsLong(contextNodeID, word);
           float logProb = ngram.getValue();
-          if (logger.isLoggable(Level.FINEST)) logger.finest("logProbs.put(" + contextNodeID + ":"+word + " , " + logProb);
+          if (LOG.isDebugEnabled()) LOG.debug("logProbs.put({}:{}, {}", contextNodeID, word, logProb);
           this.logProbs.put(key, logProb);
         }
       }
@@ -156,7 +153,8 @@ public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
             wordChildID = children.get(backoffNodeKey);
           } else {
             wordChildID = ++nodeCounter;
-            if (logger.isLoggable(Level.FINEST)) logger.finest("children.put(" + backoffNodeID + ":"+word + " , " + wordChildID + ")");
+            if (LOG.isDebugEnabled())
+              LOG.debug("children.put({}:{} , {})", backoffNodeID, word, wordChildID );
             children.put(backoffNodeKey, wordChildID);
           }
           backoffNodeID = wordChildID;
@@ -168,7 +166,8 @@ public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
               childID = children.get(key);
             } else {
               childID = ++nodeCounter;
-              if (logger.isLoggable(Level.FINEST)) logger.finest("children.put(" + backoffNodeID + ":"+context[i] + " , " + childID + ")");
+              if (LOG.isDebugEnabled())
+                LOG.debug("children.put({}:{}, )", backoffNodeID, context[i], childID);
               children.put(key, childID);
             }
             backoffNodeID = childID;
@@ -178,7 +177,8 @@ public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
         // Store the backoff for this n-gram at this node in the trie
         {
           float backoff = ngram.getBackoff();
-          if (logger.isLoggable(Level.FINEST)) logger.finest("backoffs.put(" + backoffNodeID + ":" +word+" , " + backoff + ")");
+          if (LOG.isDebugEnabled())
+            LOG.debug("backoffs.put({}:{}, {})", backoffNodeID, word, backoff);
           this.backoffs.put(backoffNodeID, backoff);
         }
       }
@@ -252,29 +252,29 @@ public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
 
   public static void main(String[] args) throws IOException {
 
-    logger.info("Constructing ARPA file");
+    LOG.info("Constructing ARPA file");
     ArpaFile arpaFile = new ArpaFile(args[0]);
 
-    logger.info("Getting symbol table");
+    LOG.info("Getting symbol table");
     Vocabulary vocab = arpaFile.getVocab();
 
-    logger.info("Constructing TrieLM");
+    LOG.info("Constructing TrieLM");
     TrieLM lm = new TrieLM(arpaFile);
 
     int n = Integer.valueOf(args[2]);
-    logger.info("N-gram order will be " + n);
+    LOG.info("N-gram order will be {}", n);
 
     Scanner scanner = new Scanner(new File(args[1]));
 
     LinkedList<String> wordList = new LinkedList<String>();
     LinkedList<String> window = new LinkedList<String>();
 
-    logger.info("Starting to scan " + args[1]);
+    LOG.info("Starting to scan {}", args[1]);
     while (scanner.hasNext()) {
 
-      logger.info("Getting next line...");
+      LOG.info("Getting next line...");
       String line = scanner.nextLine();
-      logger.info("Line: " + line);
+      LOG.info("Line: {}", line);
 
       String[] words = Regex.spaces.split(line);
       wordList.clear();
@@ -315,15 +315,15 @@ public class TrieLM extends AbstractLM { //DefaultNGramLanguageModel {
             i++;
           }
 
-          logger.info("logProb " + window.toString() + " = " + lm.ngramLogProbability(wordIDs, n));
+          LOG.info("logProb {} = {}", window.toString(), lm.ngramLogProbability(wordIDs, n));
         }
       }
 
       double logProb = lm.sentenceLogProbability(sentence, n, 2);//.ngramLogProbability(ids, n);
       double prob = Math.exp(logProb);
 
-      logger.info("Total logProb = " + logProb);
-      logger.info("Total    prob = " + prob);
+      LOG.info("Total logProb = {}", logProb);
+      LOG.info("Total    prob = {}",  prob);
     }
 
   }

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/tm/AbstractGrammar.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/tm/AbstractGrammar.java b/src/main/java/org/apache/joshua/decoder/ff/tm/AbstractGrammar.java
index 188c2a9..3b8eb39 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/tm/AbstractGrammar.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/tm/AbstractGrammar.java
@@ -20,8 +20,6 @@ package org.apache.joshua.decoder.ff.tm;
 
 import java.util.HashSet;
 import java.util.List;
-import java.util.logging.Level;
-import java.util.logging.Logger;
 
 import org.apache.joshua.corpus.Vocabulary;
 import org.apache.joshua.decoder.JoshuaConfiguration;
@@ -33,6 +31,8 @@ import org.apache.joshua.lattice.Lattice;
 import org.apache.joshua.lattice.Node;
 
 import cern.colt.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Partial implementation of the <code>Grammar</code> interface that provides logic for sorting a
@@ -49,8 +49,7 @@ import cern.colt.Arrays;
 public abstract class AbstractGrammar implements Grammar {
 
   /** Logger for this class. */
-  private static final Logger logger = Logger.getLogger(AbstractGrammar.class.getName());
-
+  public static final Logger LOG = LoggerFactory.getLogger(AbstractGrammar.class);
   /**
    * Indicates whether the rules in this grammar have been sorted based on the latest feature
    * function values.
@@ -137,7 +136,7 @@ public abstract class AbstractGrammar implements Grammar {
    */
   protected void setSorted(boolean sorted) {
     this.sorted = sorted;
-    logger.fine("This grammar is now sorted: " + this);
+    LOG.debug("This grammar is now sorted: {}",  this);
   }
 
   /**
@@ -154,13 +153,13 @@ public abstract class AbstractGrammar implements Grammar {
     if (node != null) {
       if (node.hasRules()) {
         RuleCollection rules = node.getRuleCollection();
-        if (logger.isLoggable(Level.FINE))
-          logger.fine("Sorting node " + Arrays.toString(rules.getSourceSide()));
+        if (LOG.isDebugEnabled())
+          LOG.debug("Sorting node {}", Arrays.toString(rules.getSourceSide()));
 
         /* This causes the rules at this trie node to be sorted */
         rules.getSortedRules(models);
 
-        if (logger.isLoggable(Level.FINEST)) {
+        if (LOG.isDebugEnabled()) {
           StringBuilder s = new StringBuilder();
           for (Rule r : rules.getSortedRules(models)) {
             s.append("\n\t" + r.getLHS() + " ||| " + Arrays.toString(r.getFrench()) + " ||| "
@@ -168,7 +167,7 @@ public abstract class AbstractGrammar implements Grammar {
                 + r.getEstimatedCost() + "  " + r.getClass().getName() + "@"
                 + Integer.toHexString(System.identityHashCode(r)));
           }
-          logger.finest(s.toString());
+          LOG.debug(s.toString());
         }
       }
 
@@ -176,8 +175,8 @@ public abstract class AbstractGrammar implements Grammar {
         for (Trie child : node.getExtensions()) {
           sort(child, models);
         }
-      } else if (logger.isLoggable(Level.FINE)) {
-        logger.fine("Node has 0 children to extend: " + node);
+      } else if (LOG.isDebugEnabled()) {
+        LOG.debug("Node has 0 children to extend: {}", node);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/tm/CreateGlueGrammar.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/tm/CreateGlueGrammar.java b/src/main/java/org/apache/joshua/decoder/ff/tm/CreateGlueGrammar.java
index a1ed815..bbb1e57 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/tm/CreateGlueGrammar.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/tm/CreateGlueGrammar.java
@@ -26,7 +26,6 @@ import java.io.File;
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
-import java.util.logging.Logger;
 
 import org.apache.joshua.corpus.Vocabulary;
 import org.apache.joshua.decoder.JoshuaConfiguration;
@@ -35,14 +34,16 @@ import org.apache.joshua.util.io.LineReader;
 import org.kohsuke.args4j.CmdLineException;
 import org.kohsuke.args4j.CmdLineParser;
 import org.kohsuke.args4j.Option;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class CreateGlueGrammar {
-  
-  
+
+
+  public static final Logger LOG = LoggerFactory.getLogger(CreateGlueGrammar.class);
+
   private final Set<String> nonTerminalSymbols = new HashSet<>();
-  private static final Logger log = Logger.getLogger(CreateGlueGrammar.class.getName());
-  
+
   @Option(name = "--grammar", aliases = {"-g"}, required = true, usage = "provide grammar to determine list of NonTerminal symbols.")
   private String grammarPath;
   
@@ -84,7 +85,7 @@ public class CreateGlueGrammar {
         int lhsStart = line.indexOf("[") + 1;
         int lhsEnd = line.indexOf("]");
         if (lhsStart < 1 || lhsEnd < 0) {
-          log.info(String.format("malformed rule: %s\n", line));
+          LOG.info("malformed rule: {}\n", line);
           continue;
         }
         final String lhs = line.substring(lhsStart, lhsEnd);
@@ -92,10 +93,8 @@ public class CreateGlueGrammar {
       }
     }
     
-    log.info(
-        String.format("%d nonTerminal symbols read: %s",
-        nonTerminalSymbols.size(),
-        nonTerminalSymbols.toString()));
+    LOG.info("{} nonTerminal symbols read: {}", nonTerminalSymbols.size(),
+        nonTerminalSymbols.toString());
 
     // write glue rules to stdout
     
@@ -119,7 +118,7 @@ public class CreateGlueGrammar {
       parser.parseArgument(args);
       glueCreator.run();
     } catch (CmdLineException e) {
-      log.info(e.toString());
+      LOG.error(e.getMessage(), e);
       parser.printUsage(System.err);
       System.exit(1);
     }

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/tm/GrammarReader.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/tm/GrammarReader.java b/src/main/java/org/apache/joshua/decoder/ff/tm/GrammarReader.java
index e340a85..77ee48e 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/tm/GrammarReader.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/tm/GrammarReader.java
@@ -20,12 +20,12 @@ package org.apache.joshua.decoder.ff.tm;
 
 import java.io.IOException;
 import java.util.Iterator;
-import java.util.logging.Level;
-import java.util.logging.Logger;
 
 import org.apache.joshua.corpus.Vocabulary;
 import org.apache.joshua.decoder.Decoder;
 import org.apache.joshua.util.io.LineReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This is a base class for simple, ASCII line-based grammars that are stored on disk.
@@ -35,6 +35,8 @@ import org.apache.joshua.util.io.LineReader;
  */
 public abstract class GrammarReader<R extends Rule> implements Iterable<R>, Iterator<R> {
 
+  public static final Logger LOG = LoggerFactory.getLogger(GrammarReader.class);
+
   protected static String fieldDelimiter;
   protected static String nonTerminalRegEx;
   protected static String nonTerminalCleanRegEx;
@@ -46,7 +48,6 @@ public abstract class GrammarReader<R extends Rule> implements Iterable<R>, Iter
   protected String lookAhead;
   protected int numRulesRead;
 
-  private static final Logger logger = Logger.getLogger(GrammarReader.class.getName());
 
   // dummy constructor for
   public GrammarReader() {
@@ -65,7 +66,7 @@ public abstract class GrammarReader<R extends Rule> implements Iterable<R>, Iter
           + (null != e.getMessage() ? e.getMessage() : "No details available. Sorry."), e);
     }
 
-    Decoder.LOG(1, String.format("Reading grammar from file %s...", fileName));
+    LOG.info("Reading grammar from file {}...", fileName);
     numRulesRead = 0;
     advanceReader();
   }
@@ -86,8 +87,7 @@ public abstract class GrammarReader<R extends Rule> implements Iterable<R>, Iter
         this.reader.close();
       } catch (IOException e) {
         // FIXME: is this the right logging level?
-        if (logger.isLoggable(Level.WARNING))
-          logger.info("Error closing grammar file stream: " + this.fileName);
+        LOG.warn("Error closing grammar file stream: {}",  this.fileName);
       }
       this.reader = null;
     }
@@ -97,13 +97,13 @@ public abstract class GrammarReader<R extends Rule> implements Iterable<R>, Iter
    * For correct behavior <code>close</code> must be called on every GrammarReader, however this
    * code attempts to avoid resource leaks.
    * 
-   * @see joshua.util.io.LineReader
+   * @see org.apache.joshua.util.io.LineReader
    */
   @Override
   protected void finalize() throws Throwable {
     if (this.reader != null) {
-      logger.severe("Grammar file stream was not closed, this indicates a coding error: "
-          + this.fileName);
+      LOG.error("Grammar file stream was not closed, this indicates a coding error: {}",
+          this.fileName);
     }
 
     this.close();
@@ -120,7 +120,8 @@ public abstract class GrammarReader<R extends Rule> implements Iterable<R>, Iter
       lookAhead = reader.readLine();
       numRulesRead++;
     } catch (IOException e) {
-      logger.severe("Error reading grammar from file: " + fileName);
+      LOG.error("Error reading grammar from file: {}", fileName);
+      LOG.error(e.getMessage(), e);
     }
     if (lookAhead == null && reader != null) {
       this.close();

http://git-wip-us.apache.org/repos/asf/incubator-joshua/blob/c21fa9e8/src/main/java/org/apache/joshua/decoder/ff/tm/MonolingualRule.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/joshua/decoder/ff/tm/MonolingualRule.java b/src/main/java/org/apache/joshua/decoder/ff/tm/MonolingualRule.java
index 812e669..56bb01d 100644
--- a/src/main/java/org/apache/joshua/decoder/ff/tm/MonolingualRule.java
+++ b/src/main/java/org/apache/joshua/decoder/ff/tm/MonolingualRule.java
@@ -21,10 +21,11 @@ package org.apache.joshua.decoder.ff.tm;
 import java.util.Arrays; 
 import java.util.List; 
 import java.util.Map; 
-import java.util.logging.Logger; 
 
 import org.apache.joshua.corpus.SymbolTable; 
-import org.apache.joshua.decoder.ff.FeatureFunction; 
+import org.apache.joshua.decoder.ff.FeatureFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * this class implements MonolingualRule 
@@ -34,8 +35,7 @@ import org.apache.joshua.decoder.ff.FeatureFunction;
  */ 
 public class MonolingualRule extends Rule { 
 
-  private static final Logger logger = 
-      Logger.getLogger(MonolingualRule.class.getName()); 
+  public static final Logger LOG = LoggerFactory.getLogger(MonolingualRule.class);
 
   //=============================================================== 
   // Instance Fields 
@@ -175,7 +175,7 @@ public class MonolingualRule extends Rule {
 
   public final float getEstCost() { 
     if (est_cost <= Double.NEGATIVE_INFINITY) { 
-      logger.warning("The est cost is neg infinity; must be bad rule; rule is:\n" + toString()); 
+      LOG.warn("The est cost is neg infinity; must be bad rule; rule is:\n {}", this);
     } 
     return est_cost; 
   }