You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@opennlp.apache.org by jo...@apache.org on 2016/12/23 17:47:20 UTC

[3/7] opennlp git commit: OPENNLP-899: Replace deprecated code from Tokenizer Trainer

OPENNLP-899: Replace deprecated code from Tokenizer Trainer

This closes #13


Project: http://git-wip-us.apache.org/repos/asf/opennlp/repo
Commit: http://git-wip-us.apache.org/repos/asf/opennlp/commit/927ee0fc
Tree: http://git-wip-us.apache.org/repos/asf/opennlp/tree/927ee0fc
Diff: http://git-wip-us.apache.org/repos/asf/opennlp/diff/927ee0fc

Branch: refs/heads/897
Commit: 927ee0fc7f47c7c6ccbd72a184aaa6215d77943c
Parents: bbda5de
Author: smarthi <sm...@apache.org>
Authored: Wed Dec 21 14:35:49 2016 -0500
Committer: Kottmann <jo...@apache.org>
Committed: Thu Dec 22 16:25:32 2016 +0100

----------------------------------------------------------------------
 .../tools/tokenize/TokenizerCrossValidator.java    |  4 +---
 .../opennlp/tools/tokenize/TokenizerFactory.java   |  2 +-
 .../java/opennlp/tools/tokenize/TokenizerME.java   |  8 ++++----
 .../opennlp/tools/tokenize/TokenizerModel.java     | 17 ++++++-----------
 .../opennlp/tools/eval/ArvoresDeitadasEval.java    |  2 +-
 .../opennlp/tools/tokenize/TokenizerTestUtil.java  | 11 +++++------
 .../opennlp/uima/tokenize/TokenizerTrainer.java    |  6 +++++-
 7 files changed, 23 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/opennlp/blob/927ee0fc/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerCrossValidator.java
----------------------------------------------------------------------
diff --git a/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerCrossValidator.java b/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerCrossValidator.java
index 811165c..3ca3c1d 100644
--- a/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerCrossValidator.java
+++ b/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerCrossValidator.java
@@ -94,9 +94,7 @@ public class TokenizerCrossValidator {
          partitioner.next();
 
        // Maybe throws IOException if temporary file handling fails ...
-       TokenizerModel model;
-
-      model = TokenizerME.train(trainingSampleStream, this.factory, params);
+       TokenizerModel model = TokenizerME.train(trainingSampleStream, this.factory, params);
 
        TokenizerEvaluator evaluator = new TokenizerEvaluator(new TokenizerME(model), listeners);
 

http://git-wip-us.apache.org/repos/asf/opennlp/blob/927ee0fc/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerFactory.java
----------------------------------------------------------------------
diff --git a/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerFactory.java b/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerFactory.java
index 4c67ce1..f9e789a 100644
--- a/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerFactory.java
+++ b/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerFactory.java
@@ -37,7 +37,7 @@ public class TokenizerFactory extends BaseToolFactory {
 
   private String languageCode;
   private Dictionary abbreviationDictionary;
-  private Boolean useAlphaNumericOptimization = null;
+  private Boolean useAlphaNumericOptimization;
   private Pattern alphaNumericPattern;
 
   private static final String ABBREVIATIONS_ENTRY_NAME = "abbreviations.dictionary";

http://git-wip-us.apache.org/repos/asf/opennlp/blob/927ee0fc/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerME.java
----------------------------------------------------------------------
diff --git a/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerME.java b/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerME.java
index 5412c28..4c4c638 100644
--- a/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerME.java
+++ b/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerME.java
@@ -52,7 +52,7 @@ import opennlp.tools.util.model.ModelUtil;
  * must be instantiated which can share one <code>TokenizerModel</code> instance
  * to safe memory.
  * <p>
- * To train a new model {{@link #train(String, ObjectStream, boolean, TrainingParameters)} method
+ * To train a new model {{@link #train(ObjectStream, TokenizerFactory, TrainingParameters)} method
  * can be used.
  * <p>
  * Sample usage:
@@ -250,8 +250,7 @@ public class TokenizerME extends AbstractTokenizer {
 
     MaxentModel maxentModel = trainer.train(eventStream);
 
-    return new TokenizerModel(maxentModel, manifestInfoEntries,
-        factory);
+    return new TokenizerModel(maxentModel, manifestInfoEntries, factory);
   }
 
   /**
@@ -338,7 +337,8 @@ public class TokenizerME extends AbstractTokenizer {
    */
   public static TokenizerModel train(String languageCode, ObjectStream<TokenSample> samples,
       boolean useAlphaNumericOptimization) throws IOException {
-    return train(languageCode, samples, useAlphaNumericOptimization, ModelUtil.createDefaultTrainingParameters());
+    return train(samples, TokenizerFactory.create(null, languageCode, null, useAlphaNumericOptimization, null),
+      ModelUtil.createDefaultTrainingParameters());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/opennlp/blob/927ee0fc/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerModel.java
----------------------------------------------------------------------
diff --git a/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerModel.java b/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerModel.java
index 1af60f4..e63b946 100644
--- a/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerModel.java
+++ b/opennlp-tools/src/main/java/opennlp/tools/tokenize/TokenizerModel.java
@@ -124,7 +124,7 @@ public final class TokenizerModel extends BaseModel {
    * @throws IOException if reading from the stream fails in anyway
    * @throws InvalidFormatException if the stream doesn't have the expected format
    */
-  public TokenizerModel(InputStream in) throws IOException, InvalidFormatException {
+  public TokenizerModel(InputStream in) throws IOException {
     super(COMPONENT_NAME, in);
   }
 
@@ -134,9 +134,8 @@ public final class TokenizerModel extends BaseModel {
    * @param modelFile the file containing the tokenizer model
    *
    * @throws IOException if reading from the stream fails in anyway
-   * @throws InvalidFormatException if the stream doesn't have the expected format
    */
-  public TokenizerModel(File modelFile) throws IOException, InvalidFormatException {
+  public TokenizerModel(File modelFile) throws IOException {
     super(COMPONENT_NAME, modelFile);
   }
 
@@ -146,9 +145,8 @@ public final class TokenizerModel extends BaseModel {
    * @param modelURL the URL pointing to the tokenizer model
    *
    * @throws IOException if reading from the stream fails in anyway
-   * @throws InvalidFormatException if the stream doesn't have the expected format
    */
-  public TokenizerModel(URL modelURL) throws IOException, InvalidFormatException {
+  public TokenizerModel(URL modelURL) throws IOException {
     super(COMPONENT_NAME, modelURL);
   }
 
@@ -196,10 +194,7 @@ public final class TokenizerModel extends BaseModel {
   }
 
   public boolean useAlphaNumericOptimization() {
-    if (getFactory() != null) {
-      return getFactory().isUseAlphaNumericOptmization();
-    }
-    return false;
+    return getFactory() != null && getFactory().isUseAlphaNumericOptmization();
   }
 
   public static void main(String[] args) throws IOException {
@@ -224,8 +219,8 @@ public final class TokenizerModel extends BaseModel {
     AbstractModel model = new BinaryGISModelReader(new DataInputStream(
         new FileInputStream(modelName))).getModel();
 
-    TokenizerModel packageModel = new TokenizerModel(languageCode, model,
-        alphaNumericOptimization);
+    TokenizerModel packageModel = new TokenizerModel(model, null,
+      TokenizerFactory.create(null, languageCode, null, alphaNumericOptimization, null));
 
     OutputStream out = null;
     try {

http://git-wip-us.apache.org/repos/asf/opennlp/blob/927ee0fc/opennlp-tools/src/test/java/opennlp/tools/eval/ArvoresDeitadasEval.java
----------------------------------------------------------------------
diff --git a/opennlp-tools/src/test/java/opennlp/tools/eval/ArvoresDeitadasEval.java b/opennlp-tools/src/test/java/opennlp/tools/eval/ArvoresDeitadasEval.java
index 35f0e00..33d6ffe 100644
--- a/opennlp-tools/src/test/java/opennlp/tools/eval/ArvoresDeitadasEval.java
+++ b/opennlp-tools/src/test/java/opennlp/tools/eval/ArvoresDeitadasEval.java
@@ -72,7 +72,7 @@ public class ArvoresDeitadasEval {
 
   private static final String LANG = "pt";
 
-  private static final TrainingParameters getPerceptronZeroCutoff() {
+  private static TrainingParameters getPerceptronZeroCutoff() {
     TrainingParameters params = ModelUtil.createDefaultTrainingParameters();
     params.put(TrainingParameters.ALGORITHM_PARAM,
         PerceptronTrainer.PERCEPTRON_VALUE);

http://git-wip-us.apache.org/repos/asf/opennlp/blob/927ee0fc/opennlp-tools/src/test/java/opennlp/tools/tokenize/TokenizerTestUtil.java
----------------------------------------------------------------------
diff --git a/opennlp-tools/src/test/java/opennlp/tools/tokenize/TokenizerTestUtil.java b/opennlp-tools/src/test/java/opennlp/tools/tokenize/TokenizerTestUtil.java
index f8eb85b..ffe5101 100644
--- a/opennlp-tools/src/test/java/opennlp/tools/tokenize/TokenizerTestUtil.java
+++ b/opennlp-tools/src/test/java/opennlp/tools/tokenize/TokenizerTestUtil.java
@@ -19,11 +19,10 @@
 package opennlp.tools.tokenize;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
+
 import java.io.IOException;
-import java.io.InputStreamReader;
 import java.util.ArrayList;
 import java.util.List;
-
 import opennlp.tools.formats.ResourceAsStreamFactory;
 import opennlp.tools.util.CollectionObjectStream;
 import opennlp.tools.util.InputStreamFactory;
@@ -38,7 +37,7 @@ import opennlp.tools.util.TrainingParameters;
 public class TokenizerTestUtil {
 
   static TokenizerModel createSimpleMaxentTokenModel() throws IOException {
-    List<TokenSample> samples = new ArrayList<TokenSample>();
+    List<TokenSample> samples = new ArrayList<>();
 
     samples.add(new TokenSample("year", new Span[]{new Span(0, 4)}));
     samples.add(new TokenSample("year,", new Span[]{
@@ -59,8 +58,8 @@ public class TokenizerTestUtil {
     mlParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(100));
     mlParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(0));
 
-    return TokenizerME.train("en", new CollectionObjectStream<TokenSample>(samples), true,
-        mlParams);
+    return TokenizerME.train(new CollectionObjectStream<>(samples),
+      TokenizerFactory.create(null, "en", null, true, null), mlParams);
   }
 
   static TokenizerModel createMaxentTokenModel() throws IOException {
@@ -75,7 +74,7 @@ public class TokenizerTestUtil {
     mlParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(100));
     mlParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(0));
 
-    return TokenizerME.train("en", samples, true, mlParams);
+    return TokenizerME.train(samples, TokenizerFactory.create(null, "en", null, true, null), mlParams);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/opennlp/blob/927ee0fc/opennlp-uima/src/main/java/opennlp/uima/tokenize/TokenizerTrainer.java
----------------------------------------------------------------------
diff --git a/opennlp-uima/src/main/java/opennlp/uima/tokenize/TokenizerTrainer.java b/opennlp-uima/src/main/java/opennlp/uima/tokenize/TokenizerTrainer.java
index 66d1dfa..2b36051 100644
--- a/opennlp-uima/src/main/java/opennlp/uima/tokenize/TokenizerTrainer.java
+++ b/opennlp-uima/src/main/java/opennlp/uima/tokenize/TokenizerTrainer.java
@@ -32,6 +32,7 @@ import java.util.List;
 import opennlp.tools.ml.maxent.GIS;
 import opennlp.tools.tokenize.TokenSample;
 import opennlp.tools.tokenize.TokenSampleStream;
+import opennlp.tools.tokenize.TokenizerFactory;
 import opennlp.tools.tokenize.TokenizerME;
 import opennlp.tools.tokenize.TokenizerModel;
 import opennlp.tools.util.InputStreamFactory;
@@ -40,6 +41,7 @@ import opennlp.tools.util.ObjectStream;
 import opennlp.tools.util.ObjectStreamUtils;
 import opennlp.tools.util.PlainTextByLineStream;
 import opennlp.tools.util.Span;
+import opennlp.tools.util.model.ModelUtil;
 import opennlp.uima.util.CasConsumerUtil;
 import opennlp.uima.util.ContainingConstraint;
 import opennlp.uima.util.OpennlpUtil;
@@ -257,7 +259,9 @@ public final class TokenizerTrainer extends CasConsumer_ImplBase {
       samples = new SampleTraceStream<>(samples, samplesOut);
     }
 
-    tokenModel = TokenizerME.train(language, samples, isSkipAlphaNumerics);
+    tokenModel = TokenizerME.train(samples,
+      TokenizerFactory.create(null, language, null, isSkipAlphaNumerics, null),
+      ModelUtil.createDefaultTrainingParameters());
 
     // dereference to allow garbage collection
     tokenSamples = null;