You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@opennlp.apache.org by ma...@apache.org on 2023/01/21 12:27:52 UTC

[opennlp-sandbox] 01/01: updates sandbox component 'opennlp-similarity' to be compatible with latest opennlp-tools release

This is an automated email from the ASF dual-hosted git repository.

mawiesne pushed a commit to branch migrate-opennlp-similarity-to-opennlp-tools-2_1_0
in repository https://gitbox.apache.org/repos/asf/opennlp-sandbox.git

commit 493f99a533234ab71ba13665b40d26dd9effc05f
Author: Martin Wiesner <ma...@hs-heilbronn.de>
AuthorDate: Fri Jan 20 21:53:30 2023 +0100

    updates sandbox component 'opennlp-similarity' to be compatible with latest opennlp-tools release
    
    - adjusts opennlp-tools to 2.1.0
    - adjusts parent project (org.apache.apache) to version 18
    - adjusts Java language level to 11
    - updates several dependencies to more up-to-date versions to mitigate several CVEs
    - removes binary version of `jverbnet` from the lib folder in exchange for the most recent, maven managed version of this dependency
    - adjusts code to changes in various dependencies
    - adjusts some array declarations to comply with Java, not C style
    - adapts codebase to work with Solr & Lucene version 8.11.x
    - fixes existing JUnit tests
    - removes all binary dep jars from 'lib' folder - all managed via Maven now
    - kicks out "Auto-generated catch block" comments
    - removes unused imports
---
 opennlp-similarity/lib/edu.mit.jverbnet-1.2.0.jar  | Bin 168444 -> 0 bytes
 opennlp-similarity/lib/ejml-0.23.jar               | Bin 211938 -> 0 bytes
 opennlp-similarity/lib/javax.json.jar              | Bin 85147 -> 0 bytes
 opennlp-similarity/lib/joda-time.jar               | Bin 570478 -> 0 bytes
 opennlp-similarity/lib/jollyday.jar                | Bin 200993 -> 0 bytes
 opennlp-similarity/lib/xom.jar                     | Bin 313253 -> 0 bytes
 opennlp-similarity/pom.xml                         | 177 ++--
 .../review_builder/WebPageReviewExtractor.java     |   4 +-
 .../ClassifierTrainingSetIndexer.java              | 386 ++++----
 .../tools/doc_classifier/DocClassifier.java        |  17 +-
 ...cClassifierTrainingSetMultilingualExtender.java | 116 +--
 .../enron_email_recognizer/EmailNormalizer.java    |  30 +-
 .../EmailTrainingSetFormer.java                    |  14 +-
 .../java/opennlp/tools/fca/BasicLevelMetrics.java  | 133 ++-
 .../java/opennlp/tools/fca/ConceptLattice.java     |  35 +-
 .../tools/jsmlearning/IntersectionSetBuilder.java  |  28 +-
 .../java/opennlp/tools/parse_thicket/Pair.java     |   2 +-
 .../tools/parse_thicket/VerbNetProcessor.java      |   5 +-
 .../parse_thicket/apps/SnippetToParagraph.java     |   6 +-
 .../apps/WebPageContentSentenceExtractor.java      |   4 +-
 .../tools/parse_thicket/apps/WebPageExtractor.java |   7 +-
 .../CommunicativeActionsArcBuilder.java            | 142 ++-
 .../parse_thicket/matching/LemmaGeneralizer.java   |   2 +-
 .../matching/ParseTreeNodeGeneralizer.java         |  15 +-
 .../matching/PhraseGroupGeneralizer.java           |  39 +-
 .../LinguisticPatternStructure.java                |  11 +-
 .../pattern_structure/PhrasePatternStructure.java  |  12 +-
 .../tools/similarity/apps/ContentGenerator.java    | 183 ++--
 .../similarity/apps/ContentGeneratorSupport.java   |   8 +-
 .../apps/GeneratedSentenceProcessor.java           |  24 +-
 .../similarity/apps/RelatedSentenceFinder.java     | 230 ++---
 .../similarity/apps/SearchResultsProcessor.java    |  20 +-
 .../apps/SpeechRecognitionResultsProcessor.java    |  28 +-
 .../tools/similarity/apps/YahooAnswersMiner.java   |  26 +-
 .../apps/solr/ContentGeneratorRequestHandler.java  |  96 +-
 .../apps/solr/IterativeQueryComponent.java         |  39 +-
 .../apps/solr/IterativeSearchRequestHandler.java   | 447 +++++-----
 .../apps/solr/NLProgram2CodeRequestHandler.java    |  79 +-
 .../apps/solr/QueryExpansionRequestHandler.java    |  57 +-
 .../solr/SearchResultsReRankerRequestHandler.java  |  80 +-
 .../apps/solr/SyntGenRequestHandler.java           | 144 ++-
 .../tools/similarity/apps/utils/FileHandler.java   |  11 +-
 .../tools/similarity/apps/utils/PageFetcher.java   |  56 +-
 .../opennlp/tools/similarity/apps/utils/Pair.java  |  11 +-
 .../apps/utils/StringDistanceMeasurer.java         |  16 +-
 .../opennlp/tools/similarity/apps/utils/Utils.java |  56 +-
 .../main/java/opennlp/tools/stemmer/PStemmer.java  | 984 ++++++++++-----------
 .../textsimilarity/GeneralizationListReducer.java  |   6 +-
 .../tools/textsimilarity/ParseTreeChunk.java       |   5 +-
 .../textsimilarity/ParseTreeChunkListScorer.java   |   4 +-
 .../ParseTreeMatcherDeterministic.java             |  54 +-
 .../ParserChunker2MatcherProcessor.java            |  16 +-
 .../tools/word2vec/W2VDistanceMeasurer.java        |  72 +-
 .../MultiSentenceSearchResultsProcessorTest.java   |  16 +-
 .../apps/RelatedSentenceFinderTest.java            |  66 +-
 .../apps/StoryDiscourseNavigatorTest.java          |  32 +-
 .../pattern_structure/JSMLearnerOnLatticeTest.java | 133 ++-
 .../pattern_structure/PhraseTest.java              |   2 -
 .../apps/SearchResultsProcessorTest.java           |  15 +-
 .../SpeechRecognitionResultsProcessorTest.java     |  15 +-
 .../apps/taxo_builder/TaxonomyBuildMatchTest.java  |  36 +-
 .../GeneralizationListReducerTest.java             |  25 +-
 .../tools/textsimilarity/LemmaFormManagerTest.java |  28 +-
 .../ParseTreeChunkListScorerTest.java              |  13 +-
 .../tools/textsimilarity/ParseTreeChunkTest.java   |  69 +-
 .../tools/textsimilarity/SyntMatcherTest.java      |  60 +-
 .../ParserChunker2MatcherProcessorTest.java        |  81 +-
 .../chunker2matcher/PhraseNodeTest.java            |  19 +-
 .../src/test/resources/models/en-sent.bin          | Bin 0 -> 98533 bytes
 69 files changed, 2010 insertions(+), 2537 deletions(-)

diff --git a/opennlp-similarity/lib/edu.mit.jverbnet-1.2.0.jar b/opennlp-similarity/lib/edu.mit.jverbnet-1.2.0.jar
deleted file mode 100644
index eba3a97..0000000
Binary files a/opennlp-similarity/lib/edu.mit.jverbnet-1.2.0.jar and /dev/null differ
diff --git a/opennlp-similarity/lib/ejml-0.23.jar b/opennlp-similarity/lib/ejml-0.23.jar
deleted file mode 100644
index 60a37df..0000000
Binary files a/opennlp-similarity/lib/ejml-0.23.jar and /dev/null differ
diff --git a/opennlp-similarity/lib/javax.json.jar b/opennlp-similarity/lib/javax.json.jar
deleted file mode 100644
index 09967d8..0000000
Binary files a/opennlp-similarity/lib/javax.json.jar and /dev/null differ
diff --git a/opennlp-similarity/lib/joda-time.jar b/opennlp-similarity/lib/joda-time.jar
deleted file mode 100644
index b2aca95..0000000
Binary files a/opennlp-similarity/lib/joda-time.jar and /dev/null differ
diff --git a/opennlp-similarity/lib/jollyday.jar b/opennlp-similarity/lib/jollyday.jar
deleted file mode 100644
index a6bf8b3..0000000
Binary files a/opennlp-similarity/lib/jollyday.jar and /dev/null differ
diff --git a/opennlp-similarity/lib/xom.jar b/opennlp-similarity/lib/xom.jar
deleted file mode 100644
index 4eb88da..0000000
Binary files a/opennlp-similarity/lib/xom.jar and /dev/null differ
diff --git a/opennlp-similarity/pom.xml b/opennlp-similarity/pom.xml
index bfa34b6..5d76857 100644
--- a/opennlp-similarity/pom.xml
+++ b/opennlp-similarity/pom.xml
@@ -14,72 +14,101 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
 	<modelVersion>4.0.0</modelVersion>
-
 	<parent>
 		<groupId>org.apache</groupId>
 		<artifactId>apache</artifactId>
-		<version>9</version>
+		<!-- TODO OPENNLP-1452 once this is resolved, move to 29 as well. -->
+		<version>18</version>
 		<relativePath />
 	</parent>
 
 	<groupId>org.apache.opennlp</groupId>
 	<artifactId>opennlp-similarity</artifactId>
-	<version>0.1.0</version>
+	<version>2.1.1-SNAPSHOT</version>
 	<packaging>jar</packaging>
 
-	<name>OpenNLP Tool Similarity distribution</name>
-
-	<scm>
-		<connection>scm:svn:http://svn.apache.org/repos/asf/opennlp/sandbox/opennlp-similarity/tags/opennlp-similarity-0.0.1</connection>
-		<developerConnection>scm:svn:https://svn.apache.org/repos/asf/opennlp/sandbox/opennlp-similarity/tags/opennlp-similarity-0.0.1</developerConnection>
-		<url>http://svn.apache.org/viewvc/opennlp/tags/opennlp-similarity-1.1.0</url>
-	</scm>
-	<prerequisites>
-		<maven>3.0</maven>
-	</prerequisites>
-	<distributionManagement>
-	  <snapshotRepository>
-	    <id>ossrh</id>
-	    <url>https://oss.sonatype.org/content/repositories/snapshots</url>
-	  </snapshotRepository>
-	</distributionManagement>
-
+	<name>Apache OpenNLP Tool Similarity distribution</name>
+	
+	<properties>
+		<nd4j.version>0.4-rc3.6</nd4j.version>
+		<dl4j.version>1.0.0-M2.1</dl4j.version>
+		<maven.compiler.source>11</maven.compiler.source>
+		<maven.compiler.target>11</maven.compiler.target>
+	</properties>
 
 	<repositories>
 		<repository>
-			<id>net.billylieurance</id>
-			<name>BillyLieuranceNet</name>
-			<url>http://www.billylieurance.net/maven2</url>
+			<id>central</id>
+			<name>Maven Central Repository</name>
+			<url>https://repo1.maven.org/maven2</url>
+		</repository>
+		<repository>
+			<id>billylieurance-net</id>
+			<url>https://www.billylieurance.net/maven2</url>
+			<snapshots>
+				<enabled>false</enabled>
+			</snapshots>
 		</repository>
 	</repositories>
-	
-	<properties>
-              <nd4j.version>0.4-rc3.4</nd4j.version> 
-              <dl4j.version>0.4-rc3.3</dl4j.version>
-   </properties>
+
+	<dependencyManagement>
+		<dependencies>
+			<dependency>
+				<groupId>org.apache.httpcomponents</groupId>
+				<artifactId>httpclient</artifactId>
+				<version>4.5.13</version>
+			</dependency>
+			<dependency>
+				<groupId>org.apache.httpcomponents</groupId>
+				<artifactId>httpclient-cache</artifactId>
+				<version>4.5.13</version>
+			</dependency>
+			<dependency>
+				<groupId>org.apache.httpcomponents</groupId>
+				<artifactId>httpcore</artifactId>
+				<version>4.4.14</version>
+			</dependency>
+			<dependency>
+				<groupId>org.apache.httpcomponents</groupId>
+				<artifactId>httpmime</artifactId>
+				<version>4.5.13</version>
+			</dependency>
+			<dependency>
+				<groupId>org.apache.httpcomponents</groupId>
+				<artifactId>fluent-hc</artifactId>
+				<version>4.5.13</version>
+			</dependency>
+			<!-- Required to avoid IllegalAccessError by Lombok during compilation -->
+			<dependency>
+				<groupId>org.projectlombok</groupId>
+				<artifactId>lombok</artifactId>
+				<version>1.18.22</version>
+			</dependency>
+		</dependencies>
+	</dependencyManagement>
 
 	<dependencies>
 		<dependency>
-			<groupId>org.slf4j</groupId>
-			<artifactId>slf4j-log4j12</artifactId>
-			<version>1.6.4</version>
+			<groupId>org.apache.opennlp</groupId>
+			<artifactId>opennlp-tools</artifactId>
+			<version>2.1.0</version>
 		</dependency>
+
 		<dependency>
-  			<groupId>org.apache.opennlp</groupId>
-  			<artifactId>opennlp-tools</artifactId>
-  			<version>1.6.0</version>
+			<groupId>org.slf4j</groupId>
+			<artifactId>slf4j-log4j12</artifactId>
+			<version>1.7.33</version>
 		</dependency>
-
 		<dependency>
 			<groupId>junit</groupId>
 			<artifactId>junit</artifactId>
-			<version>4.8.1</version>
+			<version>4.13.2</version>
 			<scope>test</scope>
 		</dependency>
 		<dependency>
 			<groupId>commons-lang</groupId>
 			<artifactId>commons-lang</artifactId>
-			<version>2.5</version>
+			<version>2.6</version>
 		</dependency>
 
 		<dependency>
@@ -90,28 +119,23 @@
 		<dependency>
 			<groupId>org.apache.tika</groupId>
 			<artifactId>tika-app</artifactId>
-			<version>1.6</version>
+			<version>2.6.0</version>
 		</dependency>
 		<dependency>
 			<groupId>net.sf.opencsv</groupId>
 			<artifactId>opencsv</artifactId>
 			<version>2.0</version>
 		</dependency>
-		<dependency>
-			<groupId>org.apache.lucene</groupId>
-			<artifactId>lucene-core</artifactId>
-			<version>4.10.0</version>
-		</dependency>
 
 		<dependency>
 			<groupId>org.apache.solr</groupId>
 			<artifactId>solr-core</artifactId>
-			<version>4.10.0</version>
+			<version>8.11.2</version>
 		</dependency>
 		<dependency>
 			<groupId>commons-codec</groupId>
 			<artifactId>commons-codec</artifactId>
-			<version>1.7</version>
+			<version>1.13</version>
 		</dependency>
 		<dependency>
 			<groupId>commons-logging</groupId>
@@ -128,31 +152,26 @@
 			<artifactId>commons-math3</artifactId>
 			<version>3.5</version>
 		</dependency>
-
+		
 		<dependency>
 			<groupId>org.apache.httpcomponents</groupId>
 			<artifactId>httpclient</artifactId>
-			<version>4.2.1</version>
 		</dependency>
 		<dependency>
 			<groupId>org.apache.httpcomponents</groupId>
 			<artifactId>httpclient-cache</artifactId>
-			<version>4.2.1</version>
 		</dependency>
 		<dependency>
 			<groupId>org.apache.httpcomponents</groupId>
 			<artifactId>httpcore</artifactId>
-			<version>4.2.1</version>
 		</dependency>
 		<dependency>
 			<groupId>org.apache.httpcomponents</groupId>
 			<artifactId>httpmime</artifactId>
-			<version>4.2.1</version>
 		</dependency>
 		<dependency>
 			<groupId>org.apache.httpcomponents</groupId>
 			<artifactId>fluent-hc</artifactId>
-			<version>4.2.1</version>
 		</dependency>
 
 		<dependency>
@@ -189,14 +208,20 @@
 		<dependency>
 			<groupId>net.billylieurance.azuresearch</groupId>
 			<artifactId>azure-bing-search-java</artifactId>
-			<version>0.11.0</version>
+			<version>0.12.0</version>
 		</dependency>
+
 		<dependency>
 			<groupId>edu.mit</groupId>
 			<artifactId>jverbnet</artifactId>
-			<version>1.2.0</version>
-			<systemPath>${project.basedir}/lib/edu.mit.jverbnet-1.2.0.jar</systemPath>
-			<scope>system</scope>
+			<version>1.2.0.1</version>
+			<exclusions>
+				<!-- Avoids problems with conflicting sl4j bindings at runtime -->
+				<exclusion>
+					<groupId>org.slf4j</groupId>
+					<artifactId>log4j-over-slf4j</artifactId>
+				</exclusion>
+			</exclusions>
 		</dependency>
 		
 		<dependency>
@@ -205,20 +230,20 @@
 			<version>2.7.1</version>
 		</dependency>
 		<dependency>
-            <groupId>org.deeplearning4j</groupId>
-            <artifactId>deeplearning4j-ui</artifactId>
-            <version>${dl4j.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>org.deeplearning4j</groupId>
-            <artifactId>deeplearning4j-nlp</artifactId>
-            <version>${dl4j.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>org.nd4j</groupId>
-            <artifactId>nd4j-jblas</artifactId> 
-            <version>${nd4j.version}</version>
-        </dependency>
+				<groupId>org.deeplearning4j</groupId>
+				<artifactId>deeplearning4j-ui</artifactId>
+				<version>${dl4j.version}</version>
+		</dependency>
+		<dependency>
+				<groupId>org.deeplearning4j</groupId>
+				<artifactId>deeplearning4j-nlp</artifactId>
+				<version>${dl4j.version}</version>
+		</dependency>
+		<dependency>
+				<groupId>org.nd4j</groupId>
+				<artifactId>nd4j-jblas</artifactId>
+				<version>${nd4j.version}</version>
+		</dependency>
 	</dependencies>
 
 	<build>
@@ -227,8 +252,8 @@
 				<groupId>org.apache.maven.plugins</groupId>
 				<artifactId>maven-compiler-plugin</artifactId>
 				<configuration>
-					<source>1.5</source>
-					<target>1.5</target>
+					<source>11</source>
+					<target>11</target>
 					<compilerArgument>-Xlint</compilerArgument>
 				</configuration>
 			</plugin>
@@ -248,7 +273,6 @@
 			
 			<plugin>
 				<artifactId>maven-antrun-plugin</artifactId>
-				<version>1.6</version>
 				<executions>
 					<execution>
 						<id>generate checksums for binary artifacts</id>
@@ -313,15 +337,6 @@
 				</executions>
 			</plugin>
 			-->
-			 <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>3.1</version>
-                <configuration>
-                    <source>1.8</source>
-                    <target>1.8</target>
-                </configuration>
-            </plugin>
 			<plugin>
 		      <groupId>org.sonatype.plugins</groupId>
 		      <artifactId>nexus-staging-maven-plugin</artifactId>
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/apps/review_builder/WebPageReviewExtractor.java b/opennlp-similarity/src/main/java/opennlp/tools/apps/review_builder/WebPageReviewExtractor.java
index f9fb43b..15179b9 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/apps/review_builder/WebPageReviewExtractor.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/apps/review_builder/WebPageReviewExtractor.java
@@ -85,7 +85,7 @@ public class WebPageReviewExtractor extends WebPageExtractor {
 					continue;
 				item = item.replace("<span>","").replace("</span>","").replace("<b>","").replace("</b>","");
 				if (item.length()>80 && MinedSentenceProcessor.acceptableMinedSentence(item)==null){
-					System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+item);
+					// System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+item);
 					continue;
 				}
 				productFeaturesList .add(item);
@@ -175,7 +175,7 @@ public class WebPageReviewExtractor extends WebPageExtractor {
 		for (String sentenceOrMultSent : longestSents)
 		{
 			if (MinedSentenceProcessor.acceptableMinedSentence(sentenceOrMultSent)==null){
-				System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+sentenceOrMultSent);
+				// System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+sentenceOrMultSent);
 				continue;
 			}
 			// aaa. hhh hhh.  kkk . kkk ll hhh. lll kkk n.
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/ClassifierTrainingSetIndexer.java b/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/ClassifierTrainingSetIndexer.java
index ad851e3..52cd3c7 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/ClassifierTrainingSetIndexer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/ClassifierTrainingSetIndexer.java
@@ -20,11 +20,8 @@ import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
-import opennlp.tools.jsmlearning.ProfileReaderWriter;
-
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -38,229 +35,224 @@ import org.apache.lucene.util.Version;
 import org.apache.tika.Tika;
 
 public class ClassifierTrainingSetIndexer {
-	public static String resourceDir = new File(".").getAbsolutePath().replace("/.", "") + "/src/main/resources";
-    public static String INDEX_PATH = "/classif",
-            CLASSIF_TRAINING_CORPUS_PATH = "/training_corpus";
-    protected ArrayList<File> queue = new ArrayList<File>();
-    Tika tika = new Tika();
+  
+  public static String resourceDir = new File(".").getAbsolutePath().replace("/.", "") + "/src/main/resources";
+  public static String INDEX_PATH = "/classif", CLASSIF_TRAINING_CORPUS_PATH = "/training_corpus";
+  protected ArrayList<File> queue = new ArrayList<>();
+  Tika tika = new Tika();
+
+  IndexWriter indexWriter = null;
+  protected static String[] domains =  new String[] { "legal", "health", "computing", "engineering", "business" };
+  private String absolutePathTrainingSet=null;
+
+  public ClassifierTrainingSetIndexer() {
+
+    try {
+      initIndexWriter(resourceDir);
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+  }
+
+  public ClassifierTrainingSetIndexer(String absolutePathTrainingSet) {
+    this.absolutePathTrainingSet = absolutePathTrainingSet;
+    try {
+      initIndexWriter(resourceDir);
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+  }
 
-    IndexWriter indexWriter = null;
-    protected static String[] domains =  new String[] { "legal", "health",
-   	 "computing", "engineering", "business" };
-	private String absolutePathTrainingSet=null;
+  public void indexTrainingSet() {
 
-    public ClassifierTrainingSetIndexer() {
+    try {
+      if (absolutePathTrainingSet==null)
+        indexFileOrDirectory(resourceDir
+                + CLASSIF_TRAINING_CORPUS_PATH);
+      else
+        indexFileOrDirectory(
+                this.absolutePathTrainingSet);
 
-        try {
-            initIndexWriter(resourceDir);
-        } catch (Exception e) {
-            e.printStackTrace();
-        }
-    } 
+    } catch (IOException e1) {
+      e1.printStackTrace();
+    }
+    try {
+      indexWriter.commit();
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+  /*
+  private void indexTrainingSample(String text, String flag, int id)
+          throws IOException {
 
-    public ClassifierTrainingSetIndexer(String absolutePathTrainingSet) {
-    	this.absolutePathTrainingSet = absolutePathTrainingSet;
-        try {
-            initIndexWriter(resourceDir);
-        } catch (Exception e) {
-            e.printStackTrace();
-        }
+      Document doc = new Document();
+      doc.add(new StringField("id", new Integer(id).toString(),
+              Field.Store.YES));
+      doc.add(new TextField("text", text.toLowerCase(), Field.Store.YES));
+      doc.add(new StringField("class", flag.toLowerCase(), Field.Store.YES));
+      indexWriter.addDocument(doc);
+
+  }
+  */
+
+  private void addFiles(File file) {
+
+    if (!file.exists()) {
+      System.out.println(file + " does not exist.");
     }
+    if (file.isDirectory()) {
+      for (File f : file.listFiles()) {
+        if (f.getName().startsWith("."))
+          continue;
+        addFiles(f);
+        System.out.println(f.getName());
+      }
+    } else {
+      queue.add(file);
 
-    public void indexTrainingSet() {
-        
-        try {
-        	if (absolutePathTrainingSet==null)
-            indexFileOrDirectory(resourceDir
-                    + CLASSIF_TRAINING_CORPUS_PATH);
-        	else
-        		 indexFileOrDirectory(
-                         this.absolutePathTrainingSet);
-        		
-        } catch (IOException e1) {
-            e1.printStackTrace();
-        }
-        try {
-            indexWriter.commit();
-        } catch (IOException e) {
-            e.printStackTrace();
-        }
     }
-/*
-    private void indexTrainingSample(String text, String flag, int id)
-            throws IOException {
+  }
 
-        Document doc = new Document();
-        doc.add(new StringField("id", new Integer(id).toString(),
-                Field.Store.YES));
-        doc.add(new TextField("text", text.toLowerCase(), Field.Store.YES));
-        doc.add(new StringField("class", flag.toLowerCase(), Field.Store.YES));
-        indexWriter.addDocument(doc);
+  // index last folder name, before filename itself
 
-    }
-*/
-    private void addFiles(File file) {
+  public void indexFileOrDirectory(String fileName) throws IOException {
+    addFiles(new File(fileName));
 
-        if (!file.exists()) {
-            System.out.println(file + " does not exist.");
-        }
-        if (file.isDirectory()) {
-            for (File f : file.listFiles()) {
-                if (f.getName().startsWith("."))
-                    continue;
-                addFiles(f);
-                System.out.println(f.getName());
+    List<File> files = new ArrayList<File>(queue);
+    for (File f : files) {
+      if (!f.getName().endsWith(".xml")) {
+
+        try {
+          Document doc = new Document();
+
+          String name = f.getPath();
+          String className = null;
+          for (String d : domains) {
+            if (name.indexOf(d) > -1) {
+              className = d;
+              break;
             }
-        } else {
-            queue.add(file);
+          }
 
-        }
-    }
+          try {
+            doc.add(new TextField("text", tika.parse(f)));
+          } catch (Exception e1) {
+            e1.printStackTrace();
+          }
 
-    // index last folder name, before filename itself
-
-    public void indexFileOrDirectory(String fileName) throws IOException {
-        addFiles(new File(fileName));
-
-        List<File> files = new ArrayList<File>(queue);
-        for (File f : files) {
-            if (!f.getName().endsWith(".xml")) {
-
-                try {
-                    Document doc = new Document();
-
-                    String name = f.getPath();
-                    String className = null;
-                    for (String d : domains) {
-                        if (name.indexOf(d) > -1) {
-                            className = d;
-                            break;
-                        }
-                    }
-
-                    try {
-                        doc.add(new TextField("text", tika.parse(f)));
-                    } catch (Exception e1) {
-                        e1.printStackTrace();
-                    }
-
-                    doc.add(new StringField("path", f.getPath(),
-                            Field.Store.YES));
-                    doc.add(new StringField("class", className, Field.Store.YES));
-                    try {
-
-                        indexWriter.addDocument(doc);
-
-                    } catch (Exception e) {
-                        e.printStackTrace();
-                        System.out.println("Could not add: " + f);
-                    }
-                } catch (Exception ee) {
-                    ee.printStackTrace();
-                }
-            } else { // for xml files
-                try {
-                    Document doc = new Document();
-
-                    String name = new String(f.getPath());
-                    String[] nparts = name.split("/");
-                    int len = nparts.length;
-                    name = nparts[len - 2];
-
-                    FileReader fr = new FileReader(f);
-                    doc.add(new TextField("text", fr));
-
-                    doc.add(new StringField("path", f.getPath(),
-                            Field.Store.YES));
-                    doc.add(new StringField("class", name, Field.Store.YES));
-                    try {
-
-                        indexWriter.addDocument(doc);
-
-                    } catch (Exception e) {
-                        e.printStackTrace();
-                        System.out.println("Could not add: " + f);
-                    } finally {
-                        fr.close();
-                    }
-                } catch (Exception ee) {
-                    ee.printStackTrace();
-                }
-            }
+          doc.add(new StringField("path", f.getPath(),
+                  Field.Store.YES));
+          doc.add(new StringField("class", className, Field.Store.YES));
+          try {
 
-            queue.clear();
-        }
-    }
+            indexWriter.addDocument(doc);
 
-    public static String getIndexDir() {
-        try {
-            return new File(".").getCanonicalPath() + INDEX_PATH;
-        } catch (IOException e) {
-            // TODO Auto-generated catch block
+          } catch (Exception e) {
             e.printStackTrace();
-            return null;
+            System.out.println("Could not add: " + f);
+          }
+        } catch (Exception ee) {
+          ee.printStackTrace();
         }
-    }
+      } else { // for xml files
+        try {
+          Document doc = new Document();
 
-    private void initIndexWriter(String dir) throws Exception {
+          String name = new String(f.getPath());
+          String[] nparts = name.split("/");
+          int len = nparts.length;
+          name = nparts[len - 2];
 
-        Directory indexDir = null;
+          FileReader fr = new FileReader(f);
+          doc.add(new TextField("text", fr));
 
-        try {
-            indexDir = FSDirectory.open(new File(dir + INDEX_PATH));
-        } catch (IOException e) {
-            // TODO Auto-generated catch block
+          doc.add(new StringField("path", f.getPath(),
+                  Field.Store.YES));
+          doc.add(new StringField("class", name, Field.Store.YES));
+          try {
+
+            indexWriter.addDocument(doc);
+
+          } catch (Exception e) {
             e.printStackTrace();
+            System.out.println("Could not add: " + f);
+          } finally {
+            fr.close();
+          }
+        } catch (Exception ee) {
+          ee.printStackTrace();
         }
+      }
+
+      queue.clear();
+    }
+  }
+
+  public static String getIndexDir() {
+    try {
+      return new File(".").getCanonicalPath() + INDEX_PATH;
+    } catch (IOException e) {
+      e.printStackTrace();
+      return null;
+    }
+  }
 
-        Version luceneVersion = Version.LUCENE_46;
-        IndexWriterConfig luceneConfig = new IndexWriterConfig(luceneVersion,
-                new StandardAnalyzer(luceneVersion));
-        luceneConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
+  private void initIndexWriter(String dir) throws Exception {
 
-        indexWriter = new IndexWriter(indexDir, luceneConfig);
+    Directory indexDir = null;
 
+    try {
+      indexDir = FSDirectory.open(new File(dir + INDEX_PATH).toPath());
+    } catch (IOException e) {
+      e.printStackTrace();
     }
 
-    void close() {
-        try {
-            indexWriter.commit();
-            indexWriter.close();
-        } catch (IOException e) {
-            // TODO Auto-generated catch block
-            e.printStackTrace();
-        }
+    IndexWriterConfig luceneConfig = new IndexWriterConfig(new StandardAnalyzer());
+    luceneConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
+
+    indexWriter = new IndexWriter(indexDir, luceneConfig);
+
+  }
+
+  void close() {
+    try {
+      indexWriter.commit();
+      indexWriter.close();
+    } catch (IOException e) {
+      e.printStackTrace();
     }
-    
-    public static String getCategoryFromFilePath(String path){
-    	String className = null;
-        for (String d : domains) {
-            if (path.indexOf("/"+d+"/") > -1) {
-                className = d;
-                break;
-            }
-        }
-        return className;
+  }
+
+  public static String getCategoryFromFilePath(String path){
+    String className = null;
+    for (String d : domains) {
+      if (path.indexOf("/"+d+"/") > -1) {
+        className = d;
+        break;
+      }
     }
-
-    public static void main(String[] args) {
-    	ClassifierTrainingSetIndexer indexer = null;
-    	if (args!=null && args.length==1){
-	    	String relativeDirWithTrainingCorpus = args[0];
-	    	// expect corpus relative to 'resource' directory, such as 'training_corpus'
-	    	if (!relativeDirWithTrainingCorpus.startsWith("/"))
-	    		relativeDirWithTrainingCorpus = "/"+relativeDirWithTrainingCorpus;
-	        indexer = new ClassifierTrainingSetIndexer(relativeDirWithTrainingCorpus);
-    	} else {
-    		// expect corpus in the default location, "/training_corpus" in the resource directory
-    		indexer = new ClassifierTrainingSetIndexer();
-    	}
-        try {
-            indexer.indexTrainingSet();
-        } catch (Exception e) {
-            e.printStackTrace();
-        }
-        indexer.close();
+    return className;
+  }
+
+  public static void main(String[] args) {
+    ClassifierTrainingSetIndexer indexer = null;
+    if (args!=null && args.length==1){
+      String relativeDirWithTrainingCorpus = args[0];
+      // expect corpus relative to 'resource' directory, such as 'training_corpus'
+      if (!relativeDirWithTrainingCorpus.startsWith("/"))
+        relativeDirWithTrainingCorpus = "/"+relativeDirWithTrainingCorpus;
+      indexer = new ClassifierTrainingSetIndexer(relativeDirWithTrainingCorpus);
+    } else {
+      // expect corpus in the default location, "/training_corpus" in the resource directory
+      indexer = new ClassifierTrainingSetIndexer();
+    }
+    try {
+      indexer.indexTrainingSet();
+    } catch (Exception e) {
+      e.printStackTrace();
     }
+    indexer.close();
+  }
 
 }
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/DocClassifier.java b/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/DocClassifier.java
index b4abbb9..95769bd 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/DocClassifier.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/DocClassifier.java
@@ -44,14 +44,13 @@ import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.util.Version;
 import org.json.JSONObject;
 
 public class DocClassifier {
 	public static final String DOC_CLASSIFIER_KEY = "doc_class";
 	public static String resourceDir = null;
 	public static final Log logger = LogFactory.getLog(DocClassifier.class);
-	private Map<String, Float> scoredClasses = new HashMap<String, Float>();
+	private Map<String, Float> scoredClasses = new HashMap<>();
 	
 
 	public static Float MIN_TOTAL_SCORE_FOR_CATEGORY = 0.3f; //3.0f;
@@ -71,9 +70,9 @@ public class DocClassifier {
 			MAX_CATEG_RESULTS = 2;
 	private static final float BEST_TO_NEX_BEST_RATIO = 2.0f;
 	// to accumulate classif results
-	private CountItemsList<String> localCats = new CountItemsList<String>();
-	private int MAX_TOKENS_TO_FORM = 30;
-	private String CAT_COMPUTING = "computing";
+	private final CountItemsList<String> localCats = new CountItemsList<>();
+	private static final int MAX_TOKENS_TO_FORM = 30;
+	private final String CAT_COMPUTING = "computing";
 	public static final String DOC_CLASSIFIER_MAP = "doc_classifier_map";
 	private static final int MIN_SENTENCE_LENGTH_TO_CATEGORIZE = 60; // if
 	// sentence
@@ -99,7 +98,7 @@ public class DocClassifier {
 			Directory indexDirectory = null;
 
 			try {
-				indexDirectory = FSDirectory.open(new File(INDEX_PATH));
+				indexDirectory = FSDirectory.open(new File(INDEX_PATH).toPath());
 			} catch (IOException e2) {
 				logger.error("problem opening index " + e2);
 			}
@@ -119,14 +118,14 @@ public class DocClassifier {
 	/* returns the class name for a sentence */
 	private List<String> classifySentence(String queryStr) {
 
-		List<String> results = new ArrayList<String>();
+		List<String> results = new ArrayList<>();
 		// too short of a query
 		if (queryStr.length() < MIN_CHARS_IN_QUERY) {
 			return results;
 		}
 
-		Analyzer std = new StandardAnalyzer(Version.LUCENE_46);
-		QueryParser parser = new QueryParser(Version.LUCENE_46, "text", std);
+		Analyzer std = new StandardAnalyzer();
+		QueryParser parser = new QueryParser("text", std);
 		parser.setDefaultOperator(QueryParser.Operator.OR);
 		Query query = null;
 		try {
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/DocClassifierTrainingSetMultilingualExtender.java b/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/DocClassifierTrainingSetMultilingualExtender.java
index 73b0d43..3b4deb5 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/DocClassifierTrainingSetMultilingualExtender.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/DocClassifierTrainingSetMultilingualExtender.java
@@ -17,26 +17,20 @@
 package opennlp.tools.doc_classifier;
 
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.nio.channels.Channels;
 import java.nio.channels.ReadableByteChannel;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 
-import opennlp.tools.similarity.apps.BingQueryRunner;
-import opennlp.tools.similarity.apps.utils.PageFetcher;
-
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.tika.Tika;
-import org.apache.tika.exception.TikaException;
 import org.json.JSONObject;
 
 /*
@@ -45,8 +39,7 @@ import org.json.JSONObject;
  */
 public class DocClassifierTrainingSetMultilingualExtender {
 	private static final String LANG_TEMPL = "l_a_n_g";
-	private String wikiUrlsTemplate = "https://"+LANG_TEMPL+".wikipedia.org/wiki/";
-	
+
 	public static String projectHome = new File(".").getAbsolutePath().replace("contentinspection/.", "");
 	public static String resourceDir = new File(".").getAbsolutePath().replace("/.", "") + "/src/main/resources";
 	DocClassifier classifier = null;
@@ -58,18 +51,16 @@ public class DocClassifierTrainingSetMultilingualExtender {
 		{"interwiki-de\"><a href=\"", "lang=\"de\""},
 	};
 	
-	private static String[] langs = new String[]{ "fr", "es", "de"};
+	private static final String[] LANGS = new String[]{ "fr", "es", "de"};
 
-	protected ArrayList<File> queue = new ArrayList<File>();
+	protected ArrayList<File> queue = new ArrayList<>();
 
-	protected Tika tika = new Tika();
 	public DocClassifierTrainingSetMultilingualExtender(String resource) {
 
 		classifier = new DocClassifier("", new JSONObject());
 
 	}
 	private int FRAGMENT_LENGTH = 500;
-	
 
 	protected void addFiles(File file) {
 
@@ -82,23 +73,23 @@ public class DocClassifierTrainingSetMultilingualExtender {
 					try {
 						addFiles(f);
 					} catch (Exception e) {
+						e.printStackTrace();
 					}
 				}
 			} else {
 				queue.add(file);
 			}
 		} catch (Exception e) {
-
+			e.printStackTrace();
 		}
 	}
 	
 	public List<String> extractEntriesFromSpecial_Export(String filename){
-		List<String> filteredEntries = new ArrayList<String>();
+		List<String> filteredEntries = new ArrayList<>();
 		String content=null;
 		try {
-			content = FileUtils.readFileToString(new File(filename));
+			content = FileUtils.readFileToString(new File(filename), StandardCharsets.UTF_8);
 		} catch (IOException e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
 		String[] entries = StringUtils.substringsBetween(content, "[[", "]]");
@@ -109,37 +100,32 @@ public class DocClassifierTrainingSetMultilingualExtender {
 			if (e.indexOf(':')>-1)
 				continue;
 			
-			if (e.indexOf(":")>-1)
+			if (e.contains(":"))
 				continue;
 			int endofEntry = e.indexOf('|');
 			if (endofEntry>-1) e = e.substring(0, endofEntry);
 			filteredEntries.add(e);
 		}
 		
-		filteredEntries = new ArrayList<String> (new HashSet<String>(filteredEntries));
+		filteredEntries = new ArrayList<> (new HashSet<>(filteredEntries));
 		return filteredEntries;
 	}
 
 	public void processDirectory(String fileName) throws IOException {
-		List<String[]> report = new ArrayList<String[]>();
-		report.add(new String[] { "filename", "category",
-				"confirmed?" ,
-		});
-		
 		addFiles(new File(fileName));
 	//	FileUtils.deleteDirectory(new File(destinationDir));
 	//	FileUtils.forceMkdir(new File(destinationDir));
 		
 
 		for (File f : queue) {
-			String content = null;
+			String content;
 			try {// should be wiki page
 				//if (f.getName().toString().toLowerCase().indexOf(" wiki")<0 && 
 						
 			//	if (		f.getAbsolutePath().indexOf("wiki-new")<0)
 			//		continue;
 				// should not be a page already derived by a link
-				if (f.getName().toString().toLowerCase().indexOf(".html_")>-1)
+				if (f.getName().toLowerCase().contains(".html_"))
 					continue;
 				
 				System.out.println("processing "+f.getName());
@@ -155,14 +141,14 @@ public class DocClassifierTrainingSetMultilingualExtender {
 						
 						String[] parts  = url.split("/");
 						String multilingualName = parts[parts.length-1];
-						String destFileName = f.getAbsolutePath().replace(sourceDir, destinationDir).replace(" - Wikipedia, the free encyclopedia.html", "-wiki")+"."+langs[langIndex]+"."
+						String destFileName = f.getAbsolutePath().replace(sourceDir, destinationDir).replace(" - Wikipedia, the free encyclopedia.html", "-wiki")+"."+ LANGS[langIndex]+"."
 								+"_"+multilingualName+".html";
 						if (!new File(destFileName).exists()){
 							saveDocFromTheWeb(url, destFileName);
 							System.out.println(f.getName()+ " => "+destFileName);
 						}
 					} else {
-						System.out.println("Unable to extract multilingual urls for'" +langs[langIndex] +"' from file "+ f.getCanonicalPath());
+						System.out.println("Unable to extract multilingual urls for'" + LANGS[langIndex] +"' from file "+ f.getCanonicalPath());
 					}
 					langIndex++;
 				}
@@ -171,52 +157,38 @@ public class DocClassifierTrainingSetMultilingualExtender {
 			}
 		}
 
-
 		queue.clear();
 	}
 
 	private void copyURLToFile(URL url, File file) {
-		ReadableByteChannel rbc=null;
-		try {
-			rbc = Channels.newChannel(url.openStream());
-		} catch (IOException e1) {
-			// TODO Auto-generated catch block
-			e1.printStackTrace();
-		}
-		FileOutputStream fos=null;
-		try {
-			fos = new FileOutputStream(file.getAbsolutePath());
-		} catch (FileNotFoundException e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		}
-		try {
+		try (ReadableByteChannel rbc = Channels.newChannel(url.openStream());
+				 FileOutputStream fos = new FileOutputStream(file.getAbsolutePath()) ) {
 			fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
 		} catch (IOException e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
-		
 	}
 	
 	public void crawlWikiOnTopic( String filename, String lang, String destinationDir){
 		List<String> entries = extractEntriesFromSpecial_Export(filename);
 		for(String e: entries){
-			String url  = wikiUrlsTemplate.replace(LANG_TEMPL, lang) + e; 
+			String wikiUrlsTemplate = "https://" + LANG_TEMPL + ".wikipedia.org/wiki/";
+			String url  = wikiUrlsTemplate.replace(LANG_TEMPL, lang) + e;
 			saveDocFromTheWeb(url, destinationDir+e.replace(' ', '_')+".html"); 
 		}
 	}
 	
 	public static void saveDocFromTheWeb(String docUrl, String destinationFile) {
-		try {
-			URL url = new URL(docUrl);
-			InputStream is = url.openStream();
-			if (!new File(destinationFile).exists()) {
+		if (!new File(destinationFile).exists()) {
+			try {
 				new File(destinationFile).createNewFile();
+			} catch (IOException e) {
+				throw new RuntimeException(e.getLocalizedMessage(), e);
 			}
+		}
 
-			OutputStream os = new FileOutputStream(destinationFile);
-
+		try (InputStream is = new URL(docUrl).openStream();
+				 OutputStream os = new FileOutputStream(destinationFile)) {
 
 			byte[] b = new byte[2048];
 			int length;
@@ -224,21 +196,10 @@ public class DocClassifierTrainingSetMultilingualExtender {
 			while ((length = is.read(b)) != -1) {
 				os.write(b, 0, length);
 			}
-
-			is.close();
-			os.close();
-		} catch (MalformedURLException e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		} catch (FileNotFoundException e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
 		} catch (IOException e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
 	}
-	
 
 	public static void main(String[] args) {
 		if (args.length < 2) {
@@ -252,17 +213,17 @@ public class DocClassifierTrainingSetMultilingualExtender {
 		DocClassifierTrainingSetMultilingualExtender runner = new DocClassifierTrainingSetMultilingualExtender(null);
 		
 		if (args.length==2) {
-		runner.sourceDir = args[0]; runner.destinationDir = args[1];
-		runner.sourceDir =
-				"/Users/borisgalitsky/Documents/svm_tk_july2015/milkyway/training_corpus_multilingual_verif";
-		runner.destinationDir =
-				"/Users/borisgalitsky/Documents/new_corpus/milkyway/training_corpus_new_multilingual_refined";
-
-		try {
-			runner.processDirectory( runner.sourceDir);
-		} catch (IOException e) {
-			e.printStackTrace();
-		}
+			runner.sourceDir = args[0]; runner.destinationDir = args[1];
+			runner.sourceDir =
+					"/Users/borisgalitsky/Documents/svm_tk_july2015/milkyway/training_corpus_multilingual_verif";
+			runner.destinationDir =
+					"/Users/borisgalitsky/Documents/new_corpus/milkyway/training_corpus_new_multilingual_refined";
+
+			try {
+				runner.processDirectory( runner.sourceDir);
+			} catch (IOException e) {
+				e.printStackTrace();
+			}
 		} else {  
 			runner.crawlWikiOnTopic("/Users/borisgalitsky/Downloads/Wikipedia-20150730124756.xml",
 					//Wikipedia-20150730053619.xml",
@@ -275,10 +236,5 @@ public class DocClassifierTrainingSetMultilingualExtender {
 					"/Users/borisgalitsky/Documents/merged_svm_tk/milkyway/training_corpus_new_multilingual/business/wiki/wiki-new/");
 		}
 
-
 	}
 }
-
-/*
-/Users/borisgalitsky/Documents/workspace/deepContentInspection/src/main/resources/docs/netflix
- */
\ No newline at end of file
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/enron_email_recognizer/EmailNormalizer.java b/opennlp-similarity/src/main/java/opennlp/tools/enron_email_recognizer/EmailNormalizer.java
index 01aaa12..a501641 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/enron_email_recognizer/EmailNormalizer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/enron_email_recognizer/EmailNormalizer.java
@@ -2,12 +2,14 @@ package opennlp.tools.enron_email_recognizer;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 
 import org.apache.commons.io.FileUtils;
 
 public class EmailNormalizer {
-	protected ArrayList<File> queue = new ArrayList<File>();
+
+	protected ArrayList<File> queue = new ArrayList<>();
 	
 	protected void addFilesPos(File file) {
 
@@ -47,20 +49,15 @@ public class EmailNormalizer {
 		"@", "<", ">"
 	};
 
-	private String OrigFolder = "maildir_ENRON_EMAILS", NewFolder = "data";
-
-	
-	
 	public void normalizeAndWriteIntoANewFile(File f){
 		String content="";
-        try {
-	        content = FileUtils.readFileToString(f);
-        } catch (IOException e) {
-	        // TODO Auto-generated catch block
-	        e.printStackTrace();
-        }
+		try {
+			content = FileUtils.readFileToString(f, StandardCharsets.UTF_8);
+		} catch (IOException e) {
+			e.printStackTrace();
+		}
 		String[] lines = content.split("\n");
-		StringBuffer buf = new StringBuffer();
+		StringBuilder buf = new StringBuilder();
 		for(String l: lines){
 			boolean bAccept = true;
 			for(String h: headers){
@@ -74,14 +71,15 @@ public class EmailNormalizer {
 				}
 			}
 			if (bAccept)
-				buf.append(l+"\n");
+				buf.append(l).append("\n");
 		}
-		String directoryNew = f.getAbsolutePath().replace(OrigFolder, NewFolder);
+		String origFolder = "maildir_ENRON_EMAILS";
+		String newFolder = "data";
+		String directoryNew = f.getAbsolutePath().replace(origFolder, newFolder);
 		try {
 			String fullFileNameNew = directoryNew +"txt";
-	        FileUtils.writeStringToFile(new File(fullFileNameNew), buf.toString());
+	        FileUtils.writeStringToFile(new File(fullFileNameNew), buf.toString(), StandardCharsets.UTF_8);
         } catch (IOException e) {
-	        // TODO Auto-generated catch block
 	        e.printStackTrace();
         }
 	}
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/enron_email_recognizer/EmailTrainingSetFormer.java b/opennlp-similarity/src/main/java/opennlp/tools/enron_email_recognizer/EmailTrainingSetFormer.java
index 9cb713f..4aabaf6 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/enron_email_recognizer/EmailTrainingSetFormer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/enron_email_recognizer/EmailTrainingSetFormer.java
@@ -2,6 +2,7 @@ package opennlp.tools.enron_email_recognizer;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.List;
 
 import org.apache.commons.io.FileUtils;
@@ -15,17 +16,14 @@ public class EmailTrainingSetFormer {
 	//enron_with_categories/5/70665.cats:4,10,1
 	public static void  createPosTrainingSet(){
 		try {
-			List<String> lines = FileUtils.readLines(new File(dataDir+fileListFile));
+			List<String> lines = FileUtils.readLines(new File(dataDir+fileListFile), StandardCharsets.UTF_8);
 			for(String l: lines){
-				Integer endOfFname = l.indexOf('.'),
-						startOfFname = l.lastIndexOf('/');
+				int endOfFname = l.indexOf('.'), startOfFname = l.lastIndexOf('/');
 				String filenameOld =dataDir+ l.substring(0, endOfFname)+".txt";
-
 				String content = normalize(new File(filenameOld));
-
 				String filenameNew = destinationDir  + l.substring(startOfFname+1, endOfFname)+".txt";
 				//FileUtils.copyFile(new File(filenameOld), new File(filenameNew));
-				FileUtils.writeStringToFile(new File(filenameNew), content);
+				FileUtils.writeStringToFile(new File(filenameNew), content, StandardCharsets.UTF_8);
 			}
 		} catch (Exception e) {
 			e.printStackTrace();
@@ -37,12 +35,12 @@ public class EmailTrainingSetFormer {
 	public static String normalize(File f){
 		String content="";
 		try {
-			content = FileUtils.readFileToString(f);
+			content = FileUtils.readFileToString(f, StandardCharsets.UTF_8);
 		} catch (IOException e) {
 			e.printStackTrace();
 		}
 		String[] lines = content.split("\n");
-		StringBuffer buf = new StringBuffer();
+		StringBuilder buf = new StringBuilder();
 		for(String l: lines){
 			boolean bAccept = true;
 			for(String h: EmailNormalizer.headers){
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/fca/BasicLevelMetrics.java b/opennlp-similarity/src/main/java/opennlp/tools/fca/BasicLevelMetrics.java
index a391557..1757537 100755
--- a/opennlp-similarity/src/main/java/opennlp/tools/fca/BasicLevelMetrics.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/fca/BasicLevelMetrics.java
@@ -18,26 +18,20 @@
 package opennlp.tools.fca;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.List;
 import java.util.Set;
 
 import org.apache.commons.collections.ListUtils;
 
-
 public class BasicLevelMetrics {
-
 	
 	ConceptLattice cl;	
-	ArrayList<ArrayList<Integer>> attributesExtent  = null; 
+	ArrayList<ArrayList<Integer>> attributesExtent;
 	ArrayList<ArrayList<Integer>> objectsIntent  = null; 
 	ArrayList<Integer> attributes = null; 
-	double[][] objectsSimilarityJ = null;
-	double [][] objectsSimilaritySMC = null; 
-					
-	
+	private final double[][] objectsSimilarityJ;
+	private final double [][] objectsSimilaritySMC;
+
 	public BasicLevelMetrics (ConceptLattice cl){
 		this.cl = cl;
 		this.attributesExtent = null;
@@ -46,9 +40,9 @@ public class BasicLevelMetrics {
 	}
 	
 	public void setUp(){
-		attributesExtent = new ArrayList<ArrayList<Integer>>();
-		objectsIntent = new ArrayList<ArrayList<Integer>>();
-		attributes = new ArrayList<Integer>();
+		attributesExtent = new ArrayList<>();
+		objectsIntent = new ArrayList<>();
+		attributes = new ArrayList<>();
 		
 		for (int i=0;i<cl.attributeCount;i++){
 			attributesExtent.add((ArrayList<Integer>) cl.getAttributeExtByID(i));
@@ -56,10 +50,10 @@ public class BasicLevelMetrics {
 		}	
 		
 		for (int i=0;i<cl.objectCount;i++){
-			objectsIntent.add((ArrayList<Integer>) cl.getObjectIntByID(i));
+			objectsIntent.add(cl.getObjectIntByID(i));
 		}
 	
-		double [] buf = new double[2];
+		double [] buf;
 		
 		for (int i = 0; i < cl.objectCount; i++){
 			for (int j = i + 1 ; j < cl.objectCount; j++){
@@ -83,8 +77,7 @@ public class BasicLevelMetrics {
 	//Utility functions for  Similarity approach (S)
 	public double simSMC (ArrayList<Integer> intent1, ArrayList<Integer>intent2){
 		int tp = (ListUtils.intersection(intent1,intent2)).size();
-		ArrayList<Integer> fnlst = new ArrayList<Integer>();
-		fnlst.addAll(this.attributes);
+		ArrayList<Integer> fnlst = new ArrayList<>(this.attributes);
 		fnlst.removeAll(ListUtils.union(intent1,intent2)); 
 		int fn = fnlst.size();
 		return (this.attributes.size()>0) ? 1.*(tp + fn)/this.attributes.size() : 0;
@@ -95,18 +88,15 @@ public class BasicLevelMetrics {
 	}
 	
 	public  double [] simJ_SMC(ArrayList<Integer> intent1, ArrayList<Integer>intent2){
-		double simJ = 0;
-		double simSMC = 0;	
-		Set<Integer> intersection = new HashSet<Integer>(); 
-		intersection.addAll(intent1);
+		double simJ;
+		double simSMC;
+		Set<Integer> intersection = new HashSet<>(intent1);
 		intersection.retainAll(intent2);
 		
-		Set<Integer> union = new HashSet<Integer>(); 
+		Set<Integer> union = new HashSet<>(); 
 		union.addAll(intent1);
 		union.addAll(intent2);
-		int fn = 0;
-		Set<Integer> unionOut = new HashSet<Integer>();
-		unionOut.addAll(this.attributes);
+		Set<Integer> unionOut = new HashSet<>(this.attributes);
 		unionOut.removeAll(union);	
 		simSMC = (this.attributes.size() > 0) ? 1.*(intersection.size() + unionOut.size())/this.attributes.size() : 0;
 		simJ = (union.size() > 0) ? 1.*intersection.size()/union.size() : 0;
@@ -151,8 +141,7 @@ public class BasicLevelMetrics {
 	}
 	
 	public double minCohJ (FormalConcept c){
-		double min = Integer.MAX_VALUE,
-				val = 0;
+		double min = Integer.MAX_VALUE, val;
 		
 		for (Integer i:c.extent){
 			for (Integer j: c.extent){
@@ -165,8 +154,7 @@ public class BasicLevelMetrics {
 	}
 	
 	public double minCohSMC (FormalConcept c){
-		double min = Integer.MAX_VALUE,
-				val = 0;
+		double min = Integer.MAX_VALUE, val;
 		for (Integer i:c.extent){
 			for (Integer j: c.extent){
 					val = objectsSimilaritySMC[i][j];
@@ -183,7 +171,7 @@ public class BasicLevelMetrics {
 		double sum = 0;
 		Set<Integer> upperNeighbors =c.parents;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		for (Integer i: upperNeighbors){
 			if (c.cohAvgJ > cl.conceptList.get(i).cohAvgJ){
 				rightNeighborsNumber++;
@@ -204,7 +192,7 @@ public class BasicLevelMetrics {
 		double sum = 0;
 		Set<Integer> upperNeighbors =c.parents;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		for (Integer i: upperNeighbors){
 			if (c.cohMinJ > cl.conceptList.get(i).cohMinJ){
 				rightNeighborsNumber++;
@@ -222,10 +210,9 @@ public class BasicLevelMetrics {
 	
 	public double upperCohMinByAvgJ(FormalConcept c,float tetta){
 		//min alpha whth average cohesion J
-		double max = Integer.MIN_VALUE,
-				val = 0; 
+		double max = Integer.MIN_VALUE, val;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> upperNeighbors =c.parents;
 		for (Integer i: upperNeighbors){
 			if (c.cohAvgJ > cl.conceptList.get(i).cohAvgJ){
@@ -246,10 +233,9 @@ public class BasicLevelMetrics {
 	
 	public double upperCohMinByMinJ(FormalConcept c,float tetta){
 		//min alpha whth average cohesion J
-		double max = Integer.MIN_VALUE,
-				val = 0; 
+		double max = Integer.MIN_VALUE, val;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> upperNeighbors =c.parents;
 		for (Integer i: upperNeighbors){
 			if (c.cohMinJ > cl.conceptList.get(i).cohMinJ){
@@ -274,7 +260,7 @@ public class BasicLevelMetrics {
 		double sum = 0;
 		Set<Integer> upperNeighbors =c.parents;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		for (Integer i: upperNeighbors){
 			if (c.cohAvgSMC > cl.conceptList.get(i).cohAvgSMC){
 				rightNeighborsNumber++;
@@ -294,7 +280,7 @@ public class BasicLevelMetrics {
 		double sum = 0;
 		Set<Integer> upperNeighbors =c.parents;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		for (Integer i: upperNeighbors){
 			if (c.cohMinSMC > cl.conceptList.get(i).cohMinSMC){
 				rightNeighborsNumber++;
@@ -313,10 +299,9 @@ public class BasicLevelMetrics {
 	public double upperCohMinByAvgSMC(FormalConcept c,float tetta){
 		//min alpha whth average cohesion J
 		
-		double max = Integer.MIN_VALUE,
-				val = 0; 
+		double max = Integer.MIN_VALUE, val;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> upperNeighbors =c.parents;
 		for (Integer i: upperNeighbors){
 			if (c.cohAvgSMC > cl.conceptList.get(i).cohAvgSMC){
@@ -337,10 +322,9 @@ public class BasicLevelMetrics {
 	
 	public double upperCohMinByMinSMC(FormalConcept c,float tetta){
 		//min alpha whth average cohesion J
-		double max = Integer.MIN_VALUE,
-				val = 0; 
+		double max = Integer.MIN_VALUE, val;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> upperNeighbors =c.parents;
 		for (Integer i: upperNeighbors){
 			if (c.cohMinSMC > cl.conceptList.get(i).cohMinSMC){
@@ -365,7 +349,7 @@ public class BasicLevelMetrics {
 		double sum = 0;
 		Set<Integer> lowerNeighbors =c.childs;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		for (Integer i: lowerNeighbors){
 			if (c.cohAvgJ < cl.conceptList.get(i).cohAvgJ){
 				rightNeighborsNumber++;
@@ -384,7 +368,7 @@ public class BasicLevelMetrics {
 	public double lowerCohAvgByMinJ(FormalConcept c,float tetta){
 		double sum = 0;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> lowerNeighbors =c.childs;
 		for (Integer i: lowerNeighbors){
 			if (c.cohMinJ< cl.conceptList.get(i).cohMinJ){
@@ -403,10 +387,9 @@ public class BasicLevelMetrics {
 	}
 	
 	public double lowerCohMinByAvgJ(FormalConcept c,float tetta){
-		double min = Integer.MAX_VALUE,
-				val = 0;
+		double min = Integer.MAX_VALUE, val;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> lowerNeighbors =c.childs;
 		for (Integer i: lowerNeighbors){
 			if (c.cohAvgJ< cl.conceptList.get(i).cohAvgJ){
@@ -426,10 +409,9 @@ public class BasicLevelMetrics {
 	}
 	
 	public double lowerCohMinByMinJ(FormalConcept c,float tetta){
-		double min = Integer.MAX_VALUE,
-				val = 0;
+		double min = Integer.MAX_VALUE, val;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> lowerNeighbors =c.childs;
 		for (Integer i: lowerNeighbors){
 			if (c.cohMinJ< cl.conceptList.get(i).cohMinJ){
@@ -451,7 +433,7 @@ public class BasicLevelMetrics {
 	public double lowerCohAvgByAvgSMC(FormalConcept c,float tetta){
 		double sum = 0;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> lowerNeighbors =c.childs;
 		for (Integer i: lowerNeighbors){
 			if (c.cohAvgSMC < cl.conceptList.get(i).cohAvgSMC){
@@ -471,7 +453,7 @@ public class BasicLevelMetrics {
 	public double lowerCohAvgByMinSMC(FormalConcept c,float tetta){
 		double sum = 0;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> lowerNeighbors =c.childs;
 		for (Integer i: lowerNeighbors){
 			if (c.cohMinSMC < cl.conceptList.get(i).cohMinSMC){
@@ -489,10 +471,9 @@ public class BasicLevelMetrics {
 	}
 	
 	public double lowerCohMinByAvgSMC(FormalConcept c,float tetta){
-		double min = Integer.MAX_VALUE,
-				val = 0;
+		double min = Integer.MAX_VALUE, val;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> lowerNeighbors =c.childs;
 		for (Integer i: lowerNeighbors){
 			if (c.cohAvgSMC<=cl.conceptList.get(i).cohAvgSMC){
@@ -513,10 +494,9 @@ public class BasicLevelMetrics {
 	
 	
 	public double lowerCohMinByMinSMC(FormalConcept c,float tetta){
-		double min = Integer.MAX_VALUE,
-				val = 0;
+		double min = Integer.MAX_VALUE, val;
 		int rightNeighborsNumber = 0;
-		float truthDegree = 0;
+		float truthDegree;
 		Set<Integer> lowerNeighbors =c.childs;
 		for (Integer i: lowerNeighbors){
 			if (c.cohMinSMC< cl.conceptList.get(i).cohMinSMC){
@@ -578,15 +558,14 @@ public class BasicLevelMetrics {
 		
 		ArrayList<Integer> attrExtent;
 		Set<Integer> intersection;
-		double sum = 0;
+		double sum;
 		for (FormalConcept c: cl.conceptList){
 			sum = 0;
 			for (Integer i: c.intent){
-				intersection = new HashSet<Integer>();
-				intersection.addAll(c.extent);
+				intersection = new HashSet<>(c.extent);
 				attrExtent = attributesExtent.get(i);				
 				intersection.retainAll(attrExtent);
-				sum+=(double)intersection.size()*1./attrExtent.size();
+				sum+= (double) intersection.size() /attrExtent.size();
 				}	
 			c.blCV = Double.isNaN(sum) ? 0 : sum;
 			
@@ -599,16 +578,15 @@ public class BasicLevelMetrics {
 		
 		ArrayList<Integer> attrExtent;
 		Set<Integer> intersection;
-		double sum = 0;
+		double sum;
 		int latticeSize = cl.conceptList.size();
 		for (FormalConcept c: cl.conceptList){
 			sum = 0;
 			for (int i = 0; i < cl.attributeCount; i++){
-				intersection = new HashSet<Integer>();
-				intersection.addAll(c.extent);
+				intersection = new HashSet<>(c.extent);
 				attrExtent = attributesExtent.get(i);				
 				intersection.retainAll(attrExtent);
-				sum+=(double)intersection.size()*1./attrExtent.size()*intersection.size()/c.extent.size();
+				sum+=(double)intersection.size()/attrExtent.size()*intersection.size()/c.extent.size();
 				}	
 			c.blCFC = Double.isNaN(sum) ? 0 : sum;
 		}
@@ -621,18 +599,17 @@ public class BasicLevelMetrics {
 		
 		ArrayList<Integer> attrExtent;
 		Set<Integer> intersection;
-		double sum = 0;
+		double sum;
 		int attrSize = cl.objectCount;
 		int cExtentSize = 0;
 		for (FormalConcept c: cl.conceptList){
 			sum = 0;
 			for (int i = 0; i < cl.attributeCount; i++){
-				intersection = new HashSet<Integer>();
-				intersection.addAll(c.extent);
+				intersection = new HashSet<>(c.extent);
 				cExtentSize = c.extent.size();
 				attrExtent = attributesExtent.get(i);				
 				intersection.retainAll(attrExtent);
-				sum+=(double)Math.pow(intersection.size()*1./cExtentSize,2)-Math.pow(1.*attrExtent.size()/attrSize,2);
+				sum += Math.pow(intersection.size()*1./cExtentSize,2)-Math.pow(1.*attrExtent.size()/attrSize,2);
 				}	
 			c.blCU =Double.isNaN(1.*cExtentSize/attrSize*sum) ? 0 : 1.*cExtentSize/attrSize*sum;
 		}					
@@ -643,8 +620,8 @@ public class BasicLevelMetrics {
 		
 		if (attributesExtent == null)
 			this.setUp();
-		ArrayList<Integer> attributes = new ArrayList<Integer>();
-		ArrayList<Integer> outOfIntent = new ArrayList<Integer>();
+		ArrayList<Integer> attributes = new ArrayList<>();
+		ArrayList<Integer> outOfIntent;
 		Set<Integer> intersection;
 		ArrayList<Integer> attrExtent;
 		double sum, term;
@@ -654,12 +631,10 @@ public class BasicLevelMetrics {
 		}
 		for (FormalConcept c: cl.conceptList){
 			sum = 0;
-			outOfIntent = new ArrayList<Integer>();
-			outOfIntent.addAll(attributes);
+			outOfIntent = new ArrayList<>(attributes);
 			outOfIntent.removeAll(c.intent);
 			for (Integer y: outOfIntent){
-				intersection = new HashSet<Integer>();
-				intersection.addAll(c.extent);	
+				intersection = new HashSet<>(c.extent);
 				attrExtent = attributesExtent.get(y);				
 				intersection.retainAll(attrExtent);
 				term = 1.*intersection.size()/c.extent.size();
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/fca/ConceptLattice.java b/opennlp-similarity/src/main/java/opennlp/tools/fca/ConceptLattice.java
index 6bd546c..ed21650 100755
--- a/opennlp-similarity/src/main/java/opennlp/tools/fca/ConceptLattice.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/fca/ConceptLattice.java
@@ -25,12 +25,10 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.List;
-import java.util.ListIterator;
 import java.util.Set;
 
 import org.apache.commons.collections.ListUtils;
 
-
 public class ConceptLattice {
 	int objectCount;
 	int attributeCount;
@@ -71,7 +69,7 @@ public class ConceptLattice {
 		this.attributeCount = fr.getAttributesCount();
 		this.binaryContext = fr.getBinaryContext();
 		
-		this.conceptList = new ArrayList<FormalConcept>();
+		this.conceptList = new ArrayList<>();
 		FormalConcept bottom = new FormalConcept();
 		bottom.setPosition(0);
 		conceptList.add(bottom);
@@ -118,7 +116,7 @@ public class ConceptLattice {
 	
 	public int AddIntent(List<Integer> intent,LinkedHashSet<Integer>extent, int generator) {
 		//System.out.println("add intent "+intent+extent+generator);
-		int generator_tmp = GetMaximalConcept(intent, generator);	
+		int generator_tmp = GetMaximalConcept(intent, generator);
 		generator = generator_tmp;
 		//System.out.println("	max gen "+generator);
 		if (conceptList.get(generator).getIntent().equals(intent)) {
@@ -127,11 +125,11 @@ public class ConceptLattice {
 			return generator;
 		}
 		Set<Integer> generatorParents = conceptList.get(generator).getParents();
-		Set<Integer> newParents = new HashSet<Integer>();
+		Set<Integer> newParents = new HashSet<>();
 		for (int candidate : generatorParents) {
 			if (!intent.containsAll(conceptList.get(candidate).getIntent())) {
 				List<Integer> intersection = ListUtils.intersection(intent, conceptList.get(candidate).getIntent());				
-				LinkedHashSet<Integer> new_extent = new LinkedHashSet<Integer>();
+				LinkedHashSet<Integer> new_extent = new LinkedHashSet<>();
 				new_extent.addAll(conceptList.get(candidate).extent);
 				new_extent.addAll(extent);
 				candidate = AddIntent(intersection,new_extent,candidate);
@@ -158,7 +156,7 @@ public class ConceptLattice {
 		
 		FormalConcept newConcept = new FormalConcept();
 		newConcept.setIntent(intent);
-		LinkedHashSet<Integer> new_extent = new LinkedHashSet<Integer>();
+		LinkedHashSet<Integer> new_extent = new LinkedHashSet<>();
 		new_extent.addAll(conceptList.get(generator).extent);
 		new_extent.addAll(extent);
 		newConcept.addExtents(new_extent);
@@ -222,20 +220,15 @@ public class ConceptLattice {
 		LinkedHashSet<Integer> obj;
 		ArrayList<Integer> intent;
 		// attributes list
-		ArrayList<Integer> attributes = new ArrayList<Integer>();
+		ArrayList<Integer> attributes = new ArrayList<>();
 		for (int i = 0; i <attributeCount; i++){
 			attributes.add(i);
 		}
-		// objects set
-		LinkedHashSet<Integer> objects = new LinkedHashSet<Integer>();
-		for (int i = 0; i <objectCount; i++){
-			objects.add(i);
-		}
-		
 		this.conceptList.get(0).setIntent(attributes);
+
 		for (int i = 0; i < objectCount; i++){
-			intent = new ArrayList<Integer>();
-			obj = new LinkedHashSet<Integer>();
+			intent = new ArrayList<>();
+			obj = new LinkedHashSet<>();
 			obj.add(i);
 			for (int j = 0; j < attributeCount; j++){
 				if (binaryContext[i][j] == 1){
@@ -247,14 +240,12 @@ public class ConceptLattice {
 	}
 	
 	public static void main(String []args) throws FileNotFoundException, IOException {
-
 		ConceptLattice cl = new ConceptLattice("sports.cxt", true);
 		cl.printLattice();	
 	}
 	
-	
 	public List<Integer> getAttributeExtByID(int ind){
-		ArrayList<Integer> attrExt = new ArrayList<Integer>();
+		ArrayList<Integer> attrExt = new ArrayList<>();
 		for (int i=0;i<objectCount; i++)
 			if (binaryContext[i][ind]==1)
 				attrExt.add(i); 
@@ -262,7 +253,7 @@ public class ConceptLattice {
 	}
 	
 	public ArrayList<Integer> getObjectIntByID(int ind){
-		ArrayList<Integer> objInt = new ArrayList<Integer>();
+		ArrayList<Integer> objInt = new ArrayList<>();
 		for (int i=0;i<attributeCount; i++)
 			if (binaryContext[ind][i]==1)
 				objInt.add(i); 
@@ -285,14 +276,10 @@ public class ConceptLattice {
 		return conceptList.size();
 	}
 
-	
 	public void printBinContext() {
 		for (int i = 0; i < binaryContext.length; i++ ){
 				System.out.println(Arrays.toString(binaryContext[i]));
 		}	
 	}
-
-
 	
 }
-
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/jsmlearning/IntersectionSetBuilder.java b/opennlp-similarity/src/main/java/opennlp/tools/jsmlearning/IntersectionSetBuilder.java
index e1d748e..642309c 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/jsmlearning/IntersectionSetBuilder.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/jsmlearning/IntersectionSetBuilder.java
@@ -51,7 +51,7 @@ The set of attribute in analysis is hard coded
  */
 public class IntersectionSetBuilder{
 	private FeatureSpaceCoverageProcessor distProcessorPos, distProcessorNeg;
-	private float percentageOfAllowedSetCover = 0.001f;
+	private final float percentageOfAllowedSetCover = 0.001f;
 	//The set of attribute in analysis is hard coded
 	String[] fieldsToAggr = new String[]{
 			"reason_code",	"risk_rating", "service_type", 	"device_match_result", 	"device_result", 	"http_referer", 	"device_id_reason_code",
@@ -60,7 +60,7 @@ public class IntersectionSetBuilder{
 
 
 	};
-	public IntersectionSetBuilder() {};
+	public IntersectionSetBuilder() {}
 	
 	/*
 	 * Takes a file generated by public String ruleFormer(String dataFile)
@@ -81,9 +81,9 @@ public class IntersectionSetBuilder{
 		negativeSet.remove(0); positiveSet.remove(0);
 		
 		List<String[]> ruleStrings = ProfileReaderWriter.readProfiles(ruleFile);
-		List<Map<String, String>> rules = new ArrayList<Map<String, String>>(), dedupedRules = new ArrayList<Map<String, String>>() ;
+		List<Map<String, String>> rules = new ArrayList<>(), dedupedRules = new ArrayList<>() ;
 		for(String[] l : ruleStrings){
-			Map<String, String> rule = new HashMap<String, String>();
+			Map<String, String> rule = new HashMap<>();
 			String lstr = l[0].substring(1, l[0].length()-1);
 			String[] ruleStr= lstr.split(",");
 			for(String attr_valueStr: ruleStr){
@@ -113,7 +113,7 @@ public class IntersectionSetBuilder{
 		
 		rules = dedupedRules;
 
-		List<String[]> output = new ArrayList<String[]>();
+		List<String[]> output = new ArrayList<>();
 		output.add(new String[]{"rule", "# covers positive", "# covers negative"});
 		for(Map<String, String> rule: rules){
 			int countCoverNeg = 0, countCoverPos=0;
@@ -128,7 +128,7 @@ public class IntersectionSetBuilder{
 				}
 
 			}
-			output.add(new String[]{rule.toString(), new Integer(countCoverPos).toString(), new Integer(countCoverNeg).toString()});	
+			output.add(new String[]{rule.toString(), Integer.toString(countCoverPos), Integer.toString(countCoverNeg)});
 
 		}
 		ProfileReaderWriter.writeReport(output, ruleFile+"Verif1.csv");
@@ -153,7 +153,7 @@ public class IntersectionSetBuilder{
 		List<Map<String, String>> intersections = formIntersectionAmongMembersOfTrainingSetAndVerifyThatDoesNotCoverOppositeTrainingS(negativeSet, positiveSet);
 		List<Map<String, String>> superIntersections = formIntersections(intersections, negativeSet, positiveSet);
 
-		List<String[]> output = new ArrayList<String[]>();
+		List<String[]> output = new ArrayList<>();
 		for(Map<String, String> rule: superIntersections){
 			int countCover = 0;
 			for(String[] line: positiveSet){
@@ -161,7 +161,7 @@ public class IntersectionSetBuilder{
 					countCover++;
 				}
 			}
-			output.add(new String[]{rule.toString(), new Integer(countCover).toString()});	
+			output.add(new String[]{rule.toString(), Integer.toString(countCover)});
 
 		}
 		String outputFile = "learnedRulesForNegativeSetJune23-1.csv";
@@ -171,7 +171,7 @@ public class IntersectionSetBuilder{
 	}
 
 	private List<Map<String, String>> formIntersections(List<Map<String, String>> intersectionsIn, List<String[]> negativeSet, List<String[]> positiveSet) {
-		List<Map<String, String>> intersectionsNew = new ArrayList<Map<String, String>>();
+		List<Map<String, String>> intersectionsNew = new ArrayList<>();
 		for(int i=0; i<intersectionsIn.size(); i++){
 			for(int j=i+1; j<intersectionsIn.size(); j++){
 				Map<String, String> intersection = distProcessorNeg.computeIntersection(intersectionsIn.get(i), intersectionsIn.get(j));
@@ -190,7 +190,7 @@ public class IntersectionSetBuilder{
 				if (!(cover<this.percentageOfAllowedSetCover))
 					continue;
 
-				List<Map<String, String>> rulesToBeRemoved = new ArrayList<Map<String, String>>();
+				List<Map<String, String>> rulesToBeRemoved = new ArrayList<>();
 				boolean nothingCoversThisRule = true;
 				for(Map<String, String> intersChecker: intersectionsIn){ // more general rule covers more specific
 					if (distProcessorNeg.ruleCoversRule(intersChecker, intersection)){
@@ -212,7 +212,7 @@ public class IntersectionSetBuilder{
 	}
 
 	private List<Map<String, String>> formIntersectionAmongMembersOfTrainingSetAndVerifyThatDoesNotCoverOppositeTrainingS(List<String[]> negativeSet, List<String[]> positiveSet){
-		List<Map<String, String>> intersections = new ArrayList<Map<String, String>>();
+		List<Map<String, String>> intersections = new ArrayList<>();
 
 		for(int i=0; i<negativeSet.size() && i<1000; i++){
 			for(int j=i+1; j<negativeSet.size(); j++){
@@ -248,7 +248,7 @@ public class IntersectionSetBuilder{
 				if (!(cover<this.percentageOfAllowedSetCover))
 					continue;
 
-				List<Map<String, String>> rulesToBeRemoved = new ArrayList<Map<String, String>>();
+				List<Map<String, String>> rulesToBeRemoved = new ArrayList<>();
 				boolean nothingCoversThisRule = true;
 				for(Map<String, String> intersChecker: intersections){ // more general rule covers more specific
 					if (distProcessorNeg.ruleCoversRule(intersChecker, intersection)){
@@ -269,7 +269,7 @@ public class IntersectionSetBuilder{
 	}
 
 	private List<Map<String, String>> filterIntersectionsByOppositeTrainingSet(List<Map<String, String>> intersections, List<String[]> positiveSet){
-		List<Map<String, String>> filteredIntersections = new ArrayList<Map<String, String>>();
+		List<Map<String, String>> filteredIntersections = new ArrayList<>();
 		for(Map<String, String> rule: intersections){
 			int countCover = 0;
 			for(String[] line: positiveSet){
@@ -293,7 +293,7 @@ public class IntersectionSetBuilder{
 		IntersectionSetBuilder iBuilder = new IntersectionSetBuilder ();
 		
 		// builds the set of rules
-	    String resFile = iBuilder.ruleFormer("C:/workspace/relevanceEngine/src/test/resources/maps/anomaly/negativeSet1.csv");
+		String resFile = iBuilder.ruleFormer("C:/workspace/relevanceEngine/src/test/resources/maps/anomaly/negativeSet1.csv");
 		// verifies and cleans the rules
 		iBuilder.ruleVerifier("C:/workspace/relevanceEngine/src/test/resources/maps/anomaly/negativeSet1.csv", 
 				"C:/workspace/relevanceEngine/learnedRulesForNegativeSetJune23-1.csv");
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/Pair.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/Pair.java
index 850e1ee..c19f617 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/Pair.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/Pair.java
@@ -59,7 +59,7 @@ public class Pair<T1, T2> {
   
   public class PairComparable implements Comparator<Pair<T1, T2>> {
     // @Override
-    public int compare(Pair o1, Pair o2) {
+    public int compare(Pair<T1, T2> o1, Pair<T1, T2> o2) {
       int b = -2;
       if ( o1.second instanceof Float && o2.second instanceof Float){
         
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/VerbNetProcessor.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/VerbNetProcessor.java
index 166d4ff..75d1616 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/VerbNetProcessor.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/VerbNetProcessor.java
@@ -11,15 +11,12 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import edu.mit.jverbnet.data.Frame;
-import edu.mit.jverbnet.data.Frame.FrameBuilder;
 import edu.mit.jverbnet.data.FrameType;
 import edu.mit.jverbnet.data.IFrame;
 import edu.mit.jverbnet.data.IMember;
 import edu.mit.jverbnet.data.IThematicRole;
 import edu.mit.jverbnet.data.IVerbClass;
 import edu.mit.jverbnet.data.IWordnetKey;
-import edu.mit.jverbnet.data.VerbClass;
 import edu.mit.jverbnet.index.IVerbIndex;
 import edu.mit.jverbnet.index.VerbIndex;
 
@@ -251,7 +248,7 @@ public class VerbNetProcessor implements IGeneralizer<Map<String, List<String>>>
 		System.out.println(proc.buildTreeRepresentationForTreeKernelLearning("abandon"));
 		System.out.println(proc.buildTreeRepresentationForTreeKernelLearning("earn"));
 		
-		List res = proc.generalize("marry", "engage");
+		List<Map<String, List<String>>> res = proc.generalize("marry", "engage");
 		System.out.println (res);
 
 		res = proc.generalize("assume", "alert");
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/SnippetToParagraph.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/SnippetToParagraph.java
index dd7eaf7..0fb50ec 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/SnippetToParagraph.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/SnippetToParagraph.java
@@ -103,7 +103,7 @@ public class SnippetToParagraph extends ContentGeneratorSupport /*RelatedSentenc
 					&& sents.size() > 0)
 				try {
 					String[] mainAndFollowSent = getFullOriginalSentenceFromWebpageBySnippetFragment(
-							fragment.replace("_should_find_orig_", ""), (String[])sents.toArray(new String[]{}));
+							fragment.replace("_should_find_orig_", ""), sents.toArray(new String[]{}));
 					pageSentence = mainAndFollowSent[0];
 					followSent = mainAndFollowSent[1];
 
@@ -299,7 +299,7 @@ public class SnippetToParagraph extends ContentGeneratorSupport /*RelatedSentenc
 		if (sentenceOrMultSent==null || sentenceOrMultSent.length()<20)
 			continue;
 		if (GeneratedSentenceProcessor.acceptableMinedSentence(sentenceOrMultSent)==null){
-			System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+sentenceOrMultSent);
+			// System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+sentenceOrMultSent);
 			continue;
 		}
 		// aaa. hhh hhh.  kkk . kkk ll hhh. lll kkk n.
@@ -328,7 +328,7 @@ public class SnippetToParagraph extends ContentGeneratorSupport /*RelatedSentenc
 		}
 	}
 
-	return (String[]) sentsClean.toArray(new String[0]);
+	return sentsClean.toArray(new String[0]);
 }
 	private String[] verifyEnforceStartsUpperCase(String[] sents) {
 		for(int i=0; i<sents.length; i++){
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/WebPageContentSentenceExtractor.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/WebPageContentSentenceExtractor.java
index 038fcfc..1210e64 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/WebPageContentSentenceExtractor.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/WebPageContentSentenceExtractor.java
@@ -107,7 +107,7 @@ public class WebPageContentSentenceExtractor extends WebPageExtractor {
 		for (String sentenceOrMultSent : longestSents)
 		{
 			if (GeneratedSentenceProcessor.acceptableMinedSentence(sentenceOrMultSent)==null){
-				System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+sentenceOrMultSent);
+				// System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+sentenceOrMultSent);
 				continue;
 			}
 			// aaa. hhh hhh.  kkk . kkk ll hhh. lll kkk n.
@@ -124,7 +124,7 @@ public class WebPageContentSentenceExtractor extends WebPageExtractor {
 			sentsClean.add(sentenceOrMultSent);
 		}
 
-		return (String[]) sentsClean.toArray(new String[0]);
+		return sentsClean.toArray(new String[0]);
 	}
 
 	
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/WebPageExtractor.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/WebPageExtractor.java
index b91f5cb..ce73977 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/WebPageExtractor.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/apps/WebPageExtractor.java
@@ -29,8 +29,7 @@ import opennlp.tools.similarity.apps.utils.PageFetcher;
 import opennlp.tools.textsimilarity.TextProcessor;
 import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
 
-public class WebPageExtractor
-{
+public class WebPageExtractor {
 	protected PageFetcher pageFetcher = new PageFetcher();
 	
 	protected ParserChunker2MatcherProcessor nlProc;
@@ -116,13 +115,13 @@ public class WebPageExtractor
 				if (s == null || s.trim().length() < sentThresholdLength || s.length() < sentThresholdLength + 10)
 					continue;
 				if (GeneratedSentenceProcessor.acceptableMinedSentence(s)==null){
-					System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+s);
+					// System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+s);
 					continue;
 				}
 				sentsClean.add(s);
 			}
 		}
-		return (String[]) sentsClean.toArray(new String[0]);
+		return sentsClean.toArray(new String[0]);
 	}
 
 	public class TextChunk {
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/communicative_actions/CommunicativeActionsArcBuilder.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/communicative_actions/CommunicativeActionsArcBuilder.java
index aea85b5..84955b3 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/communicative_actions/CommunicativeActionsArcBuilder.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/communicative_actions/CommunicativeActionsArcBuilder.java
@@ -7,59 +7,59 @@ import opennlp.tools.parse_thicket.IGeneralizer;
 import opennlp.tools.parse_thicket.Pair;
 import opennlp.tools.parse_thicket.ParseTreeNode;
 
+public class CommunicativeActionsArcBuilder implements IGeneralizer<Pair<String, Integer[]>> {
 
-public class CommunicativeActionsArcBuilder implements IGeneralizer<Pair<String, Integer[]>>{
-
-	private List<Pair<String, Integer[]>> commActionsAttr = new ArrayList<Pair<String, Integer[]>>();
+	private final List<Pair<String, Integer[]>> commActionsAttr = new ArrayList<>();
+	
 	public CommunicativeActionsArcBuilder(){
 
-		commActionsAttr.add(new Pair<String, Integer[]>("agree", new Integer[]{	1,	-1,	-1,	1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("accept", new Integer[]{	1,	-1,	-1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("explain", new Integer[]{	0,	-1,	1,	1,	-1}));
+		commActionsAttr.add(new Pair<>("agree", new Integer[]{	1,	-1,	-1,	1,	-1}));
+		commActionsAttr.add(new Pair<>("accept", new Integer[]{	1,	-1,	-1,	1,	1}));
+		commActionsAttr.add(new Pair<>("explain", new Integer[]{	0,	-1,	1,	1,	-1}));
 
-		commActionsAttr.add(new Pair<String, Integer[]>("suggest", new Integer[]{	1,	0,	1,	-1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("claim", new Integer[]{	1,	0,	1,	-1,	-1}));
+		commActionsAttr.add(new Pair<>("suggest", new Integer[]{	1,	0,	1,	-1,	-1}));
+		commActionsAttr.add(new Pair<>("claim", new Integer[]{	1,	0,	1,	-1,	-1}));
 
 		// bring-attention
-		commActionsAttr.add(new Pair<String, Integer[]>("bring_attention", new Integer[]{	1,	1,	1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("remind", new Integer[]{	-1,	0,	1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("allow", new Integer[]{	1,	-1,	-1,	-1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("try", new Integer[]{	1,	0,	-1,	-1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("request", new Integer[]{	0,	1,	-1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("understand", new Integer[]{	0,	-1,	-1,	1,	-1}));
-
-		commActionsAttr.add(new Pair<String, Integer[]>("inform", new Integer[]{	0,	0,	1,	1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("notify", new Integer[]{	0,	0,	1,	1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("report", new Integer[]{	0,	0,	1,	1,	-1}));
-
-
-		commActionsAttr.add(new Pair<String, Integer[]>("confirm", new Integer[]{	0,	-1,	1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("ask", new Integer[]{	0,	1,	-1,	-1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("check", new Integer[]{	-1,	1,	-1,	-1,	1}));
-
-		commActionsAttr.add(new Pair<String, Integer[]>("ignore", new Integer[]{	-1,	-1,	-1,	-1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("wait", new Integer[]{	-1,	-1,	-1,	-1,	1}));
-
-		commActionsAttr.add(new Pair<String, Integer[]>("convince", new Integer[]{	0,	1,	1,	1, -1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("disagree", new Integer[]{	-1,	-1,	-1,	1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("appeal", new Integer[]{	-1,	1,	1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("deny", new Integer[]{	-1,	-1,	-1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("threaten", new Integer[]{	-1,	1, -1,	1,	1}));
-
-		commActionsAttr.add(new Pair<String, Integer[]>("concern", new Integer[]{	1,	-1, -1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("afraid", new Integer[]{	1,	-1, -1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("worri", new Integer[]{	1,	-1, -1,	1,	1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("scare", new Integer[]{	1,	-1, -1,	1,	1}));
-
-		commActionsAttr.add(new Pair<String, Integer[]>("want", new Integer[]{	1,	0,	-1,	-1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("know", new Integer[]{	0,	-1,	-1,	1,	-1}));
-		commActionsAttr.add(new Pair<String, Integer[]>("believe", new Integer[]{	0,	-1,	-1,	1,	-1}));
+		commActionsAttr.add(new Pair<>("bring_attention", new Integer[]{	1,	1,	1,	1,	1}));
+		commActionsAttr.add(new Pair<>("remind", new Integer[]{	-1,	0,	1,	1,	1}));
+		commActionsAttr.add(new Pair<>("allow", new Integer[]{	1,	-1,	-1,	-1,	-1}));
+		commActionsAttr.add(new Pair<>("try", new Integer[]{	1,	0,	-1,	-1,	-1}));
+		commActionsAttr.add(new Pair<>("request", new Integer[]{	0,	1,	-1,	1,	1}));
+		commActionsAttr.add(new Pair<>("understand", new Integer[]{	0,	-1,	-1,	1,	-1}));
+
+		commActionsAttr.add(new Pair<>("inform", new Integer[]{	0,	0,	1,	1,	-1}));
+		commActionsAttr.add(new Pair<>("notify", new Integer[]{	0,	0,	1,	1,	-1}));
+		commActionsAttr.add(new Pair<>("report", new Integer[]{	0,	0,	1,	1,	-1}));
+
+
+		commActionsAttr.add(new Pair<>("confirm", new Integer[]{	0,	-1,	1,	1,	1}));
+		commActionsAttr.add(new Pair<>("ask", new Integer[]{	0,	1,	-1,	-1,	-1}));
+		commActionsAttr.add(new Pair<>("check", new Integer[]{	-1,	1,	-1,	-1,	1}));
+
+		commActionsAttr.add(new Pair<>("ignore", new Integer[]{	-1,	-1,	-1,	-1,	1}));
+		commActionsAttr.add(new Pair<>("wait", new Integer[]{	-1,	-1,	-1,	-1,	1}));
+
+		commActionsAttr.add(new Pair<>("convince", new Integer[]{	0,	1,	1,	1, -1}));
+		commActionsAttr.add(new Pair<>("disagree", new Integer[]{	-1,	-1,	-1,	1,	-1}));
+		commActionsAttr.add(new Pair<>("appeal", new Integer[]{	-1,	1,	1,	1,	1}));
+		commActionsAttr.add(new Pair<>("deny", new Integer[]{	-1,	-1,	-1,	1,	1}));
+		commActionsAttr.add(new Pair<>("threaten", new Integer[]{	-1,	1, -1,	1,	1}));
+
+		commActionsAttr.add(new Pair<>("concern", new Integer[]{	1,	-1, -1,	1,	1}));
+		commActionsAttr.add(new Pair<>("afraid", new Integer[]{	1,	-1, -1,	1,	1}));
+		commActionsAttr.add(new Pair<>("worri", new Integer[]{	1,	-1, -1,	1,	1}));
+		commActionsAttr.add(new Pair<>("scare", new Integer[]{	1,	-1, -1,	1,	1}));
+
+		commActionsAttr.add(new Pair<>("want", new Integer[]{	1,	0,	-1,	-1,	-1}));
+		commActionsAttr.add(new Pair<>("know", new Integer[]{	0,	-1,	-1,	1,	-1}));
+		commActionsAttr.add(new Pair<>("believe", new Integer[]{	0,	-1,	-1,	1,	-1}));
 	}
 
 	public Pair<String, Integer[]> findCAInSentence(List<ParseTreeNode> sentence){
 		for(ParseTreeNode node: sentence){
 			for(Pair<String, Integer[]> ca: commActionsAttr){
-				String lemma = (String)ca.getFirst();
+				String lemma = ca.getFirst();
 				// canonical form lemma is a sub-string of an actual form in parseTreeNode
 				if (node.getWord().toLowerCase().startsWith(lemma))
 					return ca;
@@ -72,7 +72,7 @@ public class CommunicativeActionsArcBuilder implements IGeneralizer<Pair<String,
 		for(int index = 1; index< sentence.size(); index++){
 			ParseTreeNode node = sentence.get(index);
 			for(Pair<String, Integer[]> ca: commActionsAttr){
-				String lemma = (String)ca.getFirst();
+				String lemma = ca.getFirst();
 				String[] lemmas = lemma.split("_");
 				if (lemmas==null || lemmas.length<2){
 					if (node.getWord().toLowerCase().startsWith(lemma))
@@ -87,11 +87,12 @@ public class CommunicativeActionsArcBuilder implements IGeneralizer<Pair<String,
 	}
 
 
+	@Override
 	public List<Pair<String, Integer[]>> generalize(Object o1, Object o2) {
-		List<Pair<String, Integer[]>> results = new ArrayList<Pair<String, Integer[]>>();
+		List<Pair<String, Integer[]>> results = new ArrayList<>();
 
 
-		String ca1 = null, ca2=null;
+		String ca1, ca2;
 
 		if (o1 instanceof String){
 			ca1 = (String)o1;
@@ -101,11 +102,10 @@ public class CommunicativeActionsArcBuilder implements IGeneralizer<Pair<String,
 			ca2 = ((Pair<String, Integer[]>)o2).getFirst();
 		}
 
-
 		// find entry for ca1
 		Pair<String, Integer[]> caP1=null, caP2=null;
 		for(Pair<String, Integer[]> ca: commActionsAttr){
-			String lemma = (String)ca.getFirst();
+			String lemma = ca.getFirst();
 			if (lemma.equals(ca1)){
 				caP1=ca;
 				break;
@@ -114,7 +114,7 @@ public class CommunicativeActionsArcBuilder implements IGeneralizer<Pair<String,
 
 		// find entry for ca2
 		for(Pair<String, Integer[]> ca: commActionsAttr){
-			String lemma = (String)ca.getFirst();
+			String lemma = ca.getFirst();
 			if (lemma.equals(ca2)){
 				caP2=ca;
 				break;
@@ -128,36 +128,34 @@ public class CommunicativeActionsArcBuilder implements IGeneralizer<Pair<String,
 			// we take Integer[] which is a first element of as resultant list
 			Integer[] res = new CommunicativeActionsAttribute().
 					generalize(caP1.getSecond(), caP2.getSecond()).get(0);
-			results.add(new Pair<String, Integer[]>("", res ));
+			results.add(new Pair<>("", res ));
 		}
 
 		return results;
 	}
 
 
-
-
-	/*Pair<String, Integer[]>[] commActionsAttrAr = new Pair<String, Integer[]>[] {
-			new Pair<String, Integer[]>("agree", new Integer[]{	1,	-1,	-1,	1,	-1}),
-			new Pair<String, Integer[]>("accept", new Integer[]{	1,	-1,	-1,	1,	1}),
-			new Pair<String, Integer[]>("explain", new Integer[]{	0,	-1,	1,	1,	-1}),
-			new Pair<String, Integer[]>("suggest", new Integer[]{	1,	0,	1,	-1,	-1}),
-			new Pair<String, Integer[]>("bring attention", new Integer[]{	1,	1,	1,	1,	1}),
-			new Pair<String, Integer[]>("remind", new Integer[]{	-1,	0,	1,	1,	1}),
-		    new Pair<String, Integer[]>("allow", new Integer[]{	1,	-1,	-1,	-1,	-1}),
-			new Pair<String, Integer[]>("try", new Integer[]{	1,	0,	-1,	-1,	-1}),
-			new Pair<String, Integer[]>("request", new Integer[]{	0,	1,	-1,	1,	1}),
-			new Pair<String, Integer[]>("understand", new Integer[]{	0,	-1,	-1,	1,	-1}),
-			new Pair<String, Integer[]>("inform", new Integer[]{	0,	0,	1,	1,	-1}),
-			new Pair<String, Integer[]>("confirm", new Integer[]{	0,	-1,	1,	1,	1}),
-			new Pair<String, Integer[]>("ask", new Integer[]{	0,	1,	-1,	-1,	-1}),
-			new Pair<String, Integer[]>("check", new Integer[]{	-1,	1,	-1,	-1,	1}),
-			new Pair<String, Integer[]>("ignore", new Integer[]{	-1,	-1,	-1,	-1,	1}),
-			new Pair<String, Integer[]>("convince", new Integer[]{	0,	1,	1,	1, -1}),
-			new Pair<String, Integer[]>("disagree", new Integer[]{	-1,	-1,	-1,	1,	-1}),
-			new Pair<String, Integer[]>("appeal", new Integer[]{	-1,	1,	1,	1,	1}),
-			new Pair<String, Integer[]>("deny", new Integer[]{	-1,	-1,	-1,	1,	1}),
-			new Pair<String, Integer[]>("threaten", new Integer[]{	-1,	1, -1,	1,	1}),	
+	/*Pair<String, Integer[]>[] commActionsAttrAr = new Pair<>[] {
+			new Pair<>("agree", new Integer[]{	1,	-1,	-1,	1,	-1}),
+			new Pair<>("accept", new Integer[]{	1,	-1,	-1,	1,	1}),
+			new Pair<>("explain", new Integer[]{	0,	-1,	1,	1,	-1}),
+			new Pair<>("suggest", new Integer[]{	1,	0,	1,	-1,	-1}),
+			new Pair<>("bring attention", new Integer[]{	1,	1,	1,	1,	1}),
+			new Pair<>("remind", new Integer[]{	-1,	0,	1,	1,	1}),
+		    new Pair<>("allow", new Integer[]{	1,	-1,	-1,	-1,	-1}),
+			new Pair<>("try", new Integer[]{	1,	0,	-1,	-1,	-1}),
+			new Pair<>("request", new Integer[]{	0,	1,	-1,	1,	1}),
+			new Pair<>("understand", new Integer[]{	0,	-1,	-1,	1,	-1}),
+			new Pair<>("inform", new Integer[]{	0,	0,	1,	1,	-1}),
+			new Pair<>("confirm", new Integer[]{	0,	-1,	1,	1,	1}),
+			new Pair<>("ask", new Integer[]{	0,	1,	-1,	-1,	-1}),
+			new Pair<>("check", new Integer[]{	-1,	1,	-1,	-1,	1}),
+			new Pair<>("ignore", new Integer[]{	-1,	-1,	-1,	-1,	1}),
+			new Pair<>("convince", new Integer[]{	0,	1,	1,	1, -1}),
+			new Pair<>("disagree", new Integer[]{	-1,	-1,	-1,	1,	-1}),
+			new Pair<>("appeal", new Integer[]{	-1,	1,	1,	1,	1}),
+			new Pair<>("deny", new Integer[]{	-1,	-1,	-1,	1,	1}),
+			new Pair<>("threaten", new Integer[]{	-1,	1, -1,	1,	1}),
 	} */
 
 }
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/LemmaGeneralizer.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/LemmaGeneralizer.java
index de966e5..52b0e2e 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/LemmaGeneralizer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/LemmaGeneralizer.java
@@ -79,7 +79,7 @@ public class LemmaGeneralizer implements IGeneralizer<String> {
 			// if different words, then compute word2vec distance and write the value as a string
 			if (w2v.vec!=null){
 				double value = w2v.vec.similarity(lemma1,  lemma2);
-				results.add(w2vPrefix+new Float(value).toString());
+				results.add(w2vPrefix + value);
 			}
 			return results;
 		}
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/ParseTreeNodeGeneralizer.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/ParseTreeNodeGeneralizer.java
index 8001a7b..de10af9 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/ParseTreeNodeGeneralizer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/ParseTreeNodeGeneralizer.java
@@ -10,15 +10,16 @@ import opennlp.tools.parse_thicket.IGeneralizer;
 import opennlp.tools.parse_thicket.ParseTreeNode;
 import opennlp.tools.parse_thicket.VerbNetProcessor;
 
-public class ParseTreeNodeGeneralizer implements IGeneralizer<ParseTreeNode>{
-	private LemmaGeneralizer lGen = new LemmaGeneralizer ();
-	private PartOfSpeechGeneralizer posGen = new PartOfSpeechGeneralizer ();
-	private VerbNetProcessor vnProc = VerbNetProcessor.getInstance(null);
+public class ParseTreeNodeGeneralizer implements IGeneralizer<ParseTreeNode> {
+
+	private final LemmaGeneralizer lGen = new LemmaGeneralizer ();
+	private final PartOfSpeechGeneralizer posGen = new PartOfSpeechGeneralizer ();
+	private final VerbNetProcessor vnProc = VerbNetProcessor.getInstance(null);
 
 	@Override
 	public List<ParseTreeNode> generalize(Object o1, Object o2) {
 
-		List<ParseTreeNode> results = new ArrayList<ParseTreeNode>();
+		List<ParseTreeNode> results = new ArrayList<>();
 
 		ParseTreeNode newNode = null;
 		ParseTreeNode ch1 = (ParseTreeNode)o1, ch2 = (ParseTreeNode)o2;
@@ -44,11 +45,11 @@ public class ParseTreeNodeGeneralizer implements IGeneralizer<ParseTreeNode>{
 				Map<String, List<String>> verbNetGen = verbNetGenList.get(0);
 				Map<String, Object> attr = newNode.getAttributes();
 				if (attr == null)
-					attr = new HashMap<String, Object> ();
+					attr = new HashMap<> ();
 				try {
 					List<String> phrDscr = (List<String>) attr.get("phrDescr");
 					if (phrDscr!=null) // && phrDscr.size()>1)
-						phrDscr = new ArrayList<String>(new HashSet<String>(phrDscr));
+						phrDscr = new ArrayList<>(new HashSet<>(phrDscr));
 				} catch (Exception e) {
 					System.err.println("Problem de-duplicating verbnet expr" + attr);
 				}
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/PhraseGroupGeneralizer.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/PhraseGroupGeneralizer.java
index 094f093..bc2eabb 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/PhraseGroupGeneralizer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/matching/PhraseGroupGeneralizer.java
@@ -21,32 +21,23 @@ import java.util.ArrayList;
 import java.util.List;
 
 import opennlp.tools.parse_thicket.IGeneralizer;
-import opennlp.tools.parse_thicket.ParseTreeNode;
-import opennlp.tools.stemmer.PStemmer;
 import opennlp.tools.textsimilarity.GeneralizationListReducer;
-import opennlp.tools.textsimilarity.LemmaFormManager;
-import opennlp.tools.textsimilarity.POSManager;
 import opennlp.tools.textsimilarity.ParseTreeChunk;
 
-public class PhraseGroupGeneralizer implements IGeneralizer<List<ParseTreeChunk>>{
+public class PhraseGroupGeneralizer implements IGeneralizer<List<ParseTreeChunk>> {
 
-  private GeneralizationListReducer generalizationListReducer = new GeneralizationListReducer();
-
-  private LemmaFormManager lemmaFormManager = new LemmaFormManager();
-
-  private POSManager posManager = new POSManager();
-
-  private PhraseGeneralizer pGen = new PhraseGeneralizer();
-  private NERPhraseGeneralizer pGenNER = new NERPhraseGeneralizer();
+  private final GeneralizationListReducer generalizationListReducer = new GeneralizationListReducer();
+  private final PhraseGeneralizer pGen = new PhraseGeneralizer();
+  private final NERPhraseGeneralizer pGenNER = new NERPhraseGeneralizer();
 
   /**
    * main function to generalize two expressions grouped by phrase types returns
    * a list of generalizations for each phrase type with filtered
    * sub-expressions
    * 
-   * @param sent1
-   * @param sent2
-   * @return List<List<ParseTreeChunk>> list of list of POS-words pairs for each
+   * @param o1
+   * @param o2
+   * @return List<List<ParseTreeChunk>> list of POS-words pairs for each
    *         resultant matched / overlapped phrase
    */
   @Override
@@ -55,11 +46,11 @@ public class PhraseGroupGeneralizer implements IGeneralizer<List<ParseTreeChunk>
   
       List<List<ParseTreeChunk>> sent1 = (List<List<ParseTreeChunk>>)o1, 
     	 sent2 = (List<List<ParseTreeChunk>>) o2 ;
-    List<List<ParseTreeChunk>> results = new ArrayList<List<ParseTreeChunk>>();
+    List<List<ParseTreeChunk>> results = new ArrayList<>();
     // first iterate through component
     for (int comp = 0; comp < 2 && // just np & vp
         comp < sent1.size() && comp < sent2.size(); comp++) {
-      List<ParseTreeChunk> resultComps = new ArrayList<ParseTreeChunk>();
+      List<ParseTreeChunk> resultComps = new ArrayList<>();
       // then iterate through each phrase in each component
       // first try lemma-based alignment
       for (ParseTreeChunk ch1 : sent1.get(comp)) {
@@ -72,9 +63,9 @@ public class PhraseGroupGeneralizer implements IGeneralizer<List<ParseTreeChunk>
 			e.printStackTrace();
 		}
           if (chunkToAdd == null){
-        	     chunkToAdd = new ArrayList<ParseTreeChunk>();
+        	     chunkToAdd = new ArrayList<>();
           }       
-          Boolean alreadyThere = false;
+          boolean alreadyThere = false;
           for (ParseTreeChunk chunk : resultComps) {
             if (chunkToAdd.contains(chunk)) {
               alreadyThere = true;
@@ -95,10 +86,10 @@ public class PhraseGroupGeneralizer implements IGeneralizer<List<ParseTreeChunk>
     	              ch1, ch2);
 
     	          if (chunkToAdd == null){
-    	        	   chunkToAdd = new ArrayList<ParseTreeChunk>();
+    	        	   chunkToAdd = new ArrayList<>();
     	          }
     	         
-    	          Boolean alreadyThere = false;
+    	          boolean alreadyThere = false;
     	          for (ParseTreeChunk chunk : resultComps) {
     	            if (chunkToAdd.contains(chunk)) {
     	              alreadyThere = true;
@@ -113,10 +104,8 @@ public class PhraseGroupGeneralizer implements IGeneralizer<List<ParseTreeChunk>
     	        }
     	      }
       }
-      
-      List<ParseTreeChunk> resultCompsRed = generalizationListReducer.applyFilteringBySubsumption(resultComps);
 
-      resultComps = resultCompsRed;
+      resultComps = generalizationListReducer.applyFilteringBySubsumption(resultComps);
       results.add(resultComps);
     }
 
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/LinguisticPatternStructure.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/LinguisticPatternStructure.java
index 3c88b41..50c1612 100755
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/LinguisticPatternStructure.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/LinguisticPatternStructure.java
@@ -45,8 +45,7 @@ public class LinguisticPatternStructure extends PhrasePatternStructure {
 	}
 	
 	public int AddIntent(List<List<ParseTreeChunk>> intent, LinkedHashSet<Integer>extent,int generator) {
-		System.out.println("debug");
-		System.out.println("called for " + intent);
+		// System.out.println("debug called for " + intent);
 		//printLattice();
 		int generator_tmp = GetMaximalConcept(intent, generator);
 		generator = generator_tmp;
@@ -67,14 +66,14 @@ public class LinguisticPatternStructure extends PhrasePatternStructure {
 				new_extent.addAll(conceptList.get(candidate).extent);
 				new_extent.addAll(extent);
 				if (intent.size()!=intersection.size()){
-					System.out.println("recursive call (inclusion)");
-					System.out.println(intent + "----" + intersection);
+					// System.out.println("recursive call (inclusion)");
+					// System.out.println(intent + "----" + intersection);
 					candidate = AddIntent(intersection,new_extent, candidate);
 				}
 			}
 			
 			boolean addParents = true;
-			System.out.println("now iterating over parents");
+			// System.out.println("now iterating over parents");
 			Iterator<Integer> iterator = newParents.iterator();
 			while (iterator.hasNext()) {
 				Integer parent = iterator.next();
@@ -92,7 +91,7 @@ public class LinguisticPatternStructure extends PhrasePatternStructure {
 				newParents.add(candidate);
 			}
 		}
-		System.out.println("size of lattice: " + conceptList.size());
+		// System.out.println("size of lattice: " + conceptList.size());
 		PhraseConcept newConcept = new PhraseConcept();
 		newConcept.setIntent(intent);
 
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/PhrasePatternStructure.java b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/PhrasePatternStructure.java
index 25d5ac5..b01afc1 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/PhrasePatternStructure.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/PhrasePatternStructure.java
@@ -16,8 +16,6 @@
  */
 package opennlp.tools.parse_thicket.pattern_structure;
 
-
-
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -28,7 +26,6 @@ import opennlp.tools.parse_thicket.ParseTreeNode;
 import opennlp.tools.textsimilarity.ParseTreeChunk;
 import opennlp.tools.textsimilarity.ParseTreeMatcherDeterministic;
 
-
 public class PhrasePatternStructure {
 	int objectCount;
 	int attributeCount;
@@ -63,8 +60,7 @@ public class PhrasePatternStructure {
 		return Generator;
 	}
 	public int AddIntent(List<List<ParseTreeChunk>> intent, int generator) {
-		System.out.println("debug");
-		System.out.println("called for " + intent);
+		// System.out.println("debug called for " + intent);
 		//printLattice();
 		int generator_tmp = GetMaximalConcept(intent, generator);
 		generator = generator_tmp;
@@ -84,11 +80,11 @@ public class PhrasePatternStructure {
 				//intersection.retainAll(intent);
 				List<List<ParseTreeChunk>> intersection = md
 						.matchTwoSentencesGroupedChunksDeterministic(intent, conceptList.get(candidate).intent);
-				System.out.println("recursive call (inclusion)");
+				// System.out.println("recursive call (inclusion)");
 				candidate = AddIntent(intersection, candidate);
 			}
 			boolean addParents = true;
-			System.out.println("now iterating over parents");
+			// System.out.println("now iterating over parents");
 			Iterator<Integer> iterator = newParents.iterator();
 			while (iterator.hasNext()) {
 				Integer parent = iterator.next();
@@ -121,7 +117,7 @@ public class PhrasePatternStructure {
 				newParents.add(candidate);
 			}
 		}
-		System.out.println("size of lattice: " + conceptList.size());
+		// System.out.println("size of lattice: " + conceptList.size());
 		PhraseConcept newConcept = new PhraseConcept();
 		newConcept.setIntent(intent);
 		newConcept.setPosition(conceptList.size());
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/ContentGenerator.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/ContentGenerator.java
index 4bff64f..e13fe8f 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/ContentGenerator.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/ContentGenerator.java
@@ -20,7 +20,6 @@ package opennlp.tools.similarity.apps;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-import java.util.logging.Logger;
 
 import opennlp.tools.parse_thicket.Triple;
 import opennlp.tools.similarity.apps.utils.PageFetcher;
@@ -39,11 +38,8 @@ import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcess
  */
 
 public class ContentGenerator /*extends RelatedSentenceFinder*/ {
-	private static Logger LOG = Logger
-			.getLogger("opennlp.tools.similarity.apps.ContentGenerator");
 	PageFetcher pFetcher = new PageFetcher();
-	ParserChunker2MatcherProcessor sm = ParserChunker2MatcherProcessor
-			.getInstance();
+	ParserChunker2MatcherProcessor sm = ParserChunker2MatcherProcessor.getInstance();
 	protected ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
 	protected ParseTreeChunk parseTreeChunk = new ParseTreeChunk();
 	protected static StringDistanceMeasurer stringDistanceMeasurer = new StringDistanceMeasurer();
@@ -63,11 +59,11 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 	}
 
 	public ContentGenerator() {
-		// TODO Auto-generated constructor stub
+
 	}
+	
 	public void setLang(String lang) {
 		yrunner.setLang(lang);
-
 	}
 
 
@@ -76,7 +72,7 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 	 * group, or other entity name and produce a list of text fragments by web
 	 * mining for <br>
 	 * 
-	 * @param String
+	 * @param sentence
 	 *          entity name
 	 * @return List<HitBase> of text fragment structures which contain approved
 	 *         (in terms of relevance) mined sentences, as well as original search
@@ -84,7 +80,7 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 	 */
 
 	public List<HitBase> generateContentAbout(String sentence) throws Exception {
-		List<HitBase> opinionSentencesToAdd = new ArrayList<HitBase>();
+		List<HitBase> opinionSentencesToAdd = new ArrayList<>();
 		System.out.println(" \n=== Entity to write about = " + sentence);
 	
 		int stepCount=0;
@@ -119,35 +115,31 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 	 * general (irrelevant search results) or too specific (too few search
 	 * results)
 	 * 
-	 * @param String
-	 *          input sentence to form queries
+	 * @param sentence The input sentence to form queries
 	 * @return List<String> of search expressions
 	 */
 	public static List<String> buildSearchEngineQueryFromSentence(String sentence) {
-		ParseTreeChunk matcher = new ParseTreeChunk();
-		ParserChunker2MatcherProcessor pos = ParserChunker2MatcherProcessor
-				.getInstance();
-		List<List<ParseTreeChunk>> sent1GrpLst = null;
+		ParserChunker2MatcherProcessor pos = ParserChunker2MatcherProcessor.getInstance();
 
 		List<ParseTreeChunk> nPhrases = pos
 				.formGroupedPhrasesFromChunksForSentence(sentence).get(0);
-		List<String> queryArrayStr = new ArrayList<String>();
+		List<String> queryArrayStr = new ArrayList<>();
 		for (ParseTreeChunk ch : nPhrases) {
-			String query = "";
+			StringBuilder query = new StringBuilder();
 			int size = ch.getLemmas().size();
 
 			for (int i = 0; i < size; i++) {
 				if (ch.getPOSs().get(i).startsWith("N")
 						|| ch.getPOSs().get(i).startsWith("J")) {
-					query += ch.getLemmas().get(i) + " ";
+					query.append(ch.getLemmas().get(i)).append(" ");
 				}
 			}
-			query = query.trim();
-			int len = query.split(" ").length;
+			query = new StringBuilder(query.toString().trim());
+			int len = query.toString().split(" ").length;
 			if (len < 2 || len > 5)
 				continue;
 			if (len < 4) { // every word should start with capital
-				String[] qs = query.split(" ");
+				String[] qs = query.toString().split(" ");
 				boolean bAccept = true;
 				for (String w : qs) {
 					if (w.toLowerCase().equals(w)) // idf only two words then
@@ -159,33 +151,33 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 					continue;
 			}
 
-			query = query.trim().replace(" ", " +");
-			query = " +" + query;
+			query = new StringBuilder(query.toString().trim().replace(" ", " +"));
+			query.insert(0, " +");
 
-			queryArrayStr.add(query);
+			queryArrayStr.add(query.toString());
 
 		}
 		if (queryArrayStr.size() < 1) { // release constraints on NP down to 2
 			// keywords
 			for (ParseTreeChunk ch : nPhrases) {
-				String query = "";
+				StringBuilder query = new StringBuilder();
 				int size = ch.getLemmas().size();
 
 				for (int i = 0; i < size; i++) {
 					if (ch.getPOSs().get(i).startsWith("N")
 							|| ch.getPOSs().get(i).startsWith("J")) {
-						query += ch.getLemmas().get(i) + " ";
+						query.append(ch.getLemmas().get(i)).append(" ");
 					}
 				}
-				query = query.trim();
-				int len = query.split(" ").length;
+				query = new StringBuilder(query.toString().trim());
+				int len = query.toString().split(" ").length;
 				if (len < 2)
 					continue;
 
-				query = query.trim().replace(" ", " +");
-				query = " +" + query;
+				query = new StringBuilder(query.toString().trim().replace(" ", " +"));
+				query.insert(0, " +");
 
-				queryArrayStr.add(query);
+				queryArrayStr.add(query.toString());
 
 			}
 		}
@@ -198,28 +190,23 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 	}
 
 	private Triple<List<String>, String, String[]> formCandidateFragmentsForPage(HitBase item, String originalSentence, List<String> sentsAll){
-		if (sentsAll == null)
-			sentsAll = new ArrayList<String>();
 		// put orig sentence in structure
-		List<String> origs = new ArrayList<String>();
+		List<String> origs = new ArrayList<>();
 		origs.add(originalSentence);
 		item.setOriginalSentences(origs);
 		String title = item.getTitle().replace("<b>", " ").replace("</b>", " ")
 				.replace("  ", " ").replace("  ", " ");
 		// generation results for this sentence
-		List<Fragment> result = new ArrayList<Fragment>();
 		// form plain text from snippet
 		String snapshot = item.getAbstractText().replace("<b>", " ")
 				.replace("</b>", " ").replace("  ", " ").replace("  ", " ");
 
-
 		// fix a template expression which can be substituted by original if
 		// relevant
 		String snapshotMarked = snapshot.replace("...",
 				" _should_find_orig_ . _should_find_orig_");
 		String[] fragments = sm.splitSentences(snapshotMarked);
-		List<String> allFragms = new ArrayList<String>();
-		allFragms.addAll(Arrays.asList(fragments));
+		List<String> allFragms = new ArrayList<>(Arrays.asList(fragments));
 
 		String[] sents = null;
 		String downloadedPage = null;
@@ -239,23 +226,18 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 				}
 			}
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
-			// e.printStackTrace();
-			System.err
-			.println("Problem downloading  the page and splitting into sentences");
-			return new Triple(allFragms, downloadedPage, sents);
+			System.err.println("Problem downloading  the page and splitting into sentences");
+			return new Triple<>(allFragms, downloadedPage, sents);
 		}
-		return new Triple(allFragms, downloadedPage, sents);
+		return new Triple<>(allFragms, downloadedPage, sents);
 	}
 
 	private String[] formCandidateSentences(String fragment, Triple<List<String>, String, String[]> fragmentExtractionResults){
 		String[] mainAndFollowSent = null;
 
-		List<String> allFragms = (List<String>)fragmentExtractionResults.getFirst();
-		String downloadedPage = (String)fragmentExtractionResults.getSecond();
-		String[] sents = (String[])fragmentExtractionResults.getThird();
+		String downloadedPage = fragmentExtractionResults.getSecond();
+		String[] sents = fragmentExtractionResults.getThird();
 
-		String followSent = null;
 		if (fragment.length() < 50)
 			return null;
 		String pageSentence = "";
@@ -266,12 +248,10 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 				// first try sorted sentences from page by length approach
 				String[] sentsSortedByLength = support.extractSentencesFromPage(downloadedPage);
 
-
 				try {
 					mainAndFollowSent = ContentGeneratorSupport.getFullOriginalSentenceFromWebpageBySnippetFragment(
 							fragment.replace("_should_find_orig_", ""), sentsSortedByLength);
 				} catch (Exception e) {
-					// TODO Auto-generated catch block
 					e.printStackTrace();
 				}
 				// if the above gives null than try to match all sentences from snippet fragment
@@ -280,10 +260,7 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 							fragment.replace("_should_find_orig_", ""), sents);
 				}
 
-
 			} catch (Exception e) {
-
-				// TODO Auto-generated catch block
 				e.printStackTrace();
 			}
 		}
@@ -302,9 +279,9 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 		Fragment result = null;	
 
 		String pageSentence = candidateSentences[0];
-		String followSent = "";
+		StringBuilder followSent = new StringBuilder();
 		for(int i = 1; i< candidateSentences.length; i++)
-			followSent+= candidateSentences[i];
+			followSent.append(candidateSentences[i]);
 		String title = item.getTitle();
 
 		// resultant sentence SHOULD NOT be longer than for times the size of
@@ -316,20 +293,17 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 
 		try { // get score from syntactic match between sentence in
 			// original text and mined sentence
-			double measScore = 0.0, syntScore = 0.0, mentalScore = 0.0;
+			double measScore, syntScore, mentalScore = 0.0;
 
 			SentencePairMatchResult matchRes = sm.assessRelevance(pageSentence
 					+ " " + title, originalSentence);
 			List<List<ParseTreeChunk>> match = matchRes.getMatchResult();
 			if (!matchRes.isVerbExists() || matchRes.isImperativeVerb()) {
-				System.out
-				.println("Rejected Sentence : No verb OR Yes imperative verb :"
-						+ pageSentence);
+				System.out.println("Rejected Sentence : No verb OR Yes imperative verb :" + pageSentence);
 				return null;
 			}
 
-			syntScore = parseTreeChunkListScorer
-					.getParseTreeChunkListScore(match);
+			syntScore = parseTreeChunkListScorer.getParseTreeChunkListScore(match);
 			System.out.println(parseTreeChunk.listToString(match) + " "
 					+ syntScore + "\n pre-processed sent = '" + pageSentence);
 
@@ -337,10 +311,8 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 				for (String currSent : sentsAll) {
 					if (currSent.startsWith(originalSentence))
 						continue;
-					match = sm.assessRelevance(currSent, pageSentence)
-							.getMatchResult();
-					double syntScoreCurr = parseTreeChunkListScorer
-							.getParseTreeChunkListScore(match);
+					match = sm.assessRelevance(currSent, pageSentence).getMatchResult();
+					double syntScoreCurr = parseTreeChunkListScorer.getParseTreeChunkListScore(match);
 					if (syntScoreCurr > syntScore) {
 						syntScore = syntScoreCurr;
 					}
@@ -351,8 +323,7 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 				}
 			}
 
-			measScore = stringDistanceMeasurer.measureStringDistance(
-					originalSentence, pageSentence);
+			measScore = stringDistanceMeasurer.measureStringDistance(originalSentence, pageSentence);
 
 
 			if ((syntScore > RELEVANCE_THRESHOLD || measScore > 0.5)
@@ -363,7 +334,7 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 				if (pageSentenceProc != null) {
 					pageSentenceProc = GeneratedSentenceProcessor
 							.processSentence(pageSentenceProc);
-					followSent = GeneratedSentenceProcessor.processSentence(followSent);
+					followSent = new StringBuilder(GeneratedSentenceProcessor.processSentence(followSent.toString()));
 					if (followSent != null) {
 						pageSentenceProc += " "+ followSent;
 					}
@@ -379,25 +350,22 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 							+ "| with title= " + title);
 					System.out.println("For fragment = " + fragment);
 				} else
-					System.out
-					.println("Rejected sentence due to wrong area at webpage: "
-							+ pageSentence);
+					System.out.println("Rejected sentence due to wrong area at webpage: " + pageSentence);
 			} else
-				System.out.println("Rejected sentence due to low score: "
-						+ pageSentence);
+				System.out.println("Rejected sentence due to low score: " + pageSentence);
 			// }
 		} catch (Throwable t) {
 			t.printStackTrace();
 		}
+		return result;
+	}
 
-	return result;
-}
 	/**
 	 * Takes single search result for an entity which is the subject of the essay
-	 * to be written and forms essey sentences from the title, abstract, and
+	 * to be written and forms essay sentences from the title, abstract, and
 	 * possibly original page
 	 * 
-	 * @param HitBase
+	 * @param item
 	 *          item : search result
 	 * @param originalSentence
 	 *          : seed for the essay to be written
@@ -408,13 +376,11 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 	 */
 	public HitBase buildParagraphOfGeneratedText(HitBase item,
 			String originalSentence, List<String> sentsAll) {
-		List<Fragment> results = new ArrayList<Fragment>() ;
+		List<Fragment> results = new ArrayList<>() ;
 		
 		Triple<List<String>, String, String[]> fragmentExtractionResults = formCandidateFragmentsForPage(item, originalSentence, sentsAll);
 
-		List<String> allFragms = (List<String>)fragmentExtractionResults.getFirst();
-		String downloadedPage = (String)fragmentExtractionResults.getSecond();
-		String[] sents = (String[])fragmentExtractionResults.getThird();
+		List<String> allFragms = fragmentExtractionResults.getFirst();
 
 		for (String fragment : allFragms) {
 			String[] candidateSentences = formCandidateSentences(fragment, fragmentExtractionResults);
@@ -423,45 +389,38 @@ public class ContentGenerator /*extends RelatedSentenceFinder*/ {
 			Fragment res = verifyCandidateSentencesAndFormParagraph(candidateSentences, item, fragment, originalSentence, sentsAll);
 			if (res!=null)
 				results.add(res);
-
 		}
 		
 		item.setFragments(results );
 		return item;
 	}
 
+	public static void main(String[] args) {
+		ContentGenerator f = new ContentGenerator();
 
+		List<HitBase> hits;
+		try {
+			// uncomment the sentence you would like to serve as a seed sentence for
+			// content generation for an event description
+
+			// uncomment the sentence you would like to serve as a seed sentence for
+			// content generation for an event description
+			hits = f.generateContentAbout("Albert Einstein"
+					// "Britney Spears - The Femme Fatale Tour"
+					// "Rush Time Machine",
+					// "Blue Man Group" ,
+					// "Belly Dance With Zaharah",
+					// "Hollander Musicology Lecture: Danielle Fosler-Lussier, Guest Lecturer",
+					// "Jazz Master and arguably the most famous jazz musician alive, trumpeter Wynton Marsalis",
+					);
+			System.out.println(HitBase.toString(hits));
+			System.out.println(HitBase.toResultantString(hits));
+			// WordFileGenerator.createWordDoc("Essey about Albert Einstein",
+			// hits.get(0).getTitle(), hits);
 
+		} catch (Exception e) {
+			e.printStackTrace();
+		}
 
-public static void main(String[] args) {
-	ContentGenerator f = new ContentGenerator();
-
-	List<HitBase> hits = null;
-	try {
-		// uncomment the sentence you would like to serve as a seed sentence for
-		// content generation for an event description
-
-		// uncomment the sentence you would like to serve as a seed sentence for
-		// content generation for an event description
-		hits = f.generateContentAbout("Albert Einstein"
-				// "Britney Spears - The Femme Fatale Tour"
-				// "Rush Time Machine",
-				// "Blue Man Group" ,
-				// "Belly Dance With Zaharah",
-				// "Hollander Musicology Lecture: Danielle Fosler-Lussier, Guest Lecturer",
-				// "Jazz Master and arguably the most famous jazz musician alive, trumpeter Wynton Marsalis",
-				);
-		System.out.println(HitBase.toString(hits));
-		System.out.println(HitBase.toResultantString(hits));
-		// WordFileGenerator.createWordDoc("Essey about Albert Einstein",
-		// hits.get(0).getTitle(), hits);
-
-	} catch (Exception e) {
-		e.printStackTrace();
 	}
-
-}
-
-
-
 }
\ No newline at end of file
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/ContentGeneratorSupport.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/ContentGeneratorSupport.java
index a017105..765c64d 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/ContentGeneratorSupport.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/ContentGeneratorSupport.java
@@ -140,7 +140,7 @@ public class ContentGeneratorSupport {
 				continue;
 			sentsClean.add(s);
 		}
-		return (String[]) sentsClean.toArray(new String[0]);
+		return sentsClean.toArray(new String[0]);
 	}
 
 	public static String cleanSpacesInCleanedHTMLpage(String pageContent){ //was 4 spaces 
@@ -386,7 +386,7 @@ public class ContentGeneratorSupport {
 			if (sentenceOrMultSent==null || sentenceOrMultSent.length()<20)
 				continue;
 			if (GeneratedSentenceProcessor.acceptableMinedSentence(sentenceOrMultSent)==null){
-				System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+sentenceOrMultSent);
+				// System.out.println("Rejected sentence by GeneratedSentenceProcessor.acceptableMinedSentence = "+sentenceOrMultSent);
 				continue;
 			}
 			// aaa. hhh hhh.  kkk . kkk ll hhh. lll kkk n.
@@ -414,7 +414,7 @@ public class ContentGeneratorSupport {
 				sentsClean.add(s);
 			}
 		}
-		return (String[]) sentsClean.toArray(new String[0]);
+		return sentsClean.toArray(new String[0]);
 	}	
 
 	protected String[] cleanSplitListOfSentsFirstSplit(String[] longestSents){
@@ -457,7 +457,7 @@ public class ContentGeneratorSupport {
 				sentsClean.add(sentence);
 			}
 		}
-		return (String[]) sentsClean.toArray(new String[0]);
+		return sentsClean.toArray(new String[0]);
 	}
 
 	public static String getPortionOfTitleWithoutDelimiters(String title){
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/GeneratedSentenceProcessor.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/GeneratedSentenceProcessor.java
index 17421fd..1864d3a 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/GeneratedSentenceProcessor.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/GeneratedSentenceProcessor.java
@@ -20,10 +20,10 @@ package opennlp.tools.similarity.apps;
 import java.util.Arrays;
 import java.util.List;
 
-import opennlp.tools.similarity.apps.utils.Utils;
-
 import org.apache.commons.lang.StringUtils;
 
+import opennlp.tools.similarity.apps.utils.Utils;
+
 public class GeneratedSentenceProcessor {
 
 	public static String[] occurs = new String[]{ "click here", "wikipedia", "retrieved", "isbn",
@@ -114,8 +114,6 @@ public class GeneratedSentenceProcessor {
 		if (isProhibitiveWordsOccurOrStartWith(sentTry))
 			return null;
 
-		
-
 		// count symbols indicating wrong parts of page to mine for text
 		// if short and contains too many symbols indicating wrong area: reject
 		String sentWrongSym = sentTry.replace(">", "&&&").replace("�", "&&&")
@@ -152,7 +150,7 @@ public class GeneratedSentenceProcessor {
 
 	public static String processSentence(String pageSentence) {
 		if (acceptableMinedSentence(pageSentence)==null){
-			System.out.println("Rejected sentence by GenerSentProc.processSentence.acceptableMinedSentence()");
+			// System.out.println("Rejected sentence by GeneratedSentenceProcessor.processSentence.acceptableMinedSentence()");
 			return "";
 		}
 		if (pageSentence == null)
@@ -211,9 +209,6 @@ public class GeneratedSentenceProcessor {
 				return true;
 			}
 		}
-
-
-
 		//  || sentTry.endsWith("the")
 		//  || sentTry.endsWith("the.") || sentTry.startsWith("below") 
 		return false;
@@ -224,7 +219,6 @@ public class GeneratedSentenceProcessor {
 		String sentence = "Accepted sentence: Educational. Video. About Us menu. Home. Nobel Prizes and Laureates. Nobel Prizes and Laureates. Physics Prize. Chemistry Prize. Medicine Prize. Literature Prize. Peace Prize. Prize in Economic Sciences. Quick Facts. Nomination. Nomination. Physics Prize. Chemistry Prize. Medicine Prize. Literature Prize. Peace Prize. Prize in Economic Sciences. Nomination Archive. Ceremonies. Ceremonies. Ceremony Archive. Nobel Banquet Menus. Nobel Banquet Dress C [...]
 		
 		String res = GeneratedSentenceProcessor.acceptableMinedSentence(sentence);
-
 		String para = "About Albert Einstein     15 External links  16 Credits         Youth and schooling  Albert Einstein was born into a Jewish family";
 		para = "inventions of albert einstein                            what was albert einsteins invention                            invention of einstein                            what were albert einsteins inventions ";
 
@@ -236,12 +230,13 @@ public class GeneratedSentenceProcessor {
 		System.exit(0);
 		RelatedSentenceFinder f = new RelatedSentenceFinder();
 		try {
-			List<HitBase> hits = f
-					.findRelatedOpinionsForSentence(
+			List<HitBase> hits = f.findRelatedOpinionsForSentence(
 							"Give me a break, there is no reason why you can't retire in ten years if you had been a rational investor and not a crazy trader",
-							Arrays
-							.asList(new String[] { "Give me a break there is no reason why you can't retire in ten years if you had been a rational investor and not a crazy trader. For example you went to cash in 2008 and stay in cash until now you made nothing. Whereas people who rode out the storm are doing fine so let's quit focusing on the loser who think they are so smart and went to 100% cash and are wondering what happen. Its a market that always moves unlike your mattress.", }));
-			StringBuffer buf = new StringBuffer();
+							Arrays.asList("Give me a break there is no reason why you can't retire in ten years if you had been a rational investor and not a crazy trader. " +
+											"For example you went to cash in 2008 and stay in cash until now you made nothing. " +
+											"Whereas people who rode out the storm are doing fine so let's quit focusing on the loser who think they are so smart and went to 100% cash and are wondering what happen. " +
+											"Its a market that always moves unlike your mattress."));
+			StringBuilder buf = new StringBuilder();
 
 			for (HitBase h : hits) {
 				List<Fragment> frags = h.getFragments();
@@ -252,7 +247,6 @@ public class GeneratedSentenceProcessor {
 			}
 
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
 
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/RelatedSentenceFinder.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/RelatedSentenceFinder.java
index 91f6fda..29d0eb6 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/RelatedSentenceFinder.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/RelatedSentenceFinder.java
@@ -19,7 +19,6 @@ package opennlp.tools.similarity.apps;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashSet;
 import java.util.List;
@@ -27,9 +26,6 @@ import java.util.Set;
 import java.util.logging.Logger;
 
 import opennlp.tools.parse_thicket.Triple;
-import opennlp.tools.parse_thicket.apps.SnippetToParagraph;
-import opennlp.tools.parse_thicket.apps.SnippetToParagraph.TextChunk;
-import opennlp.tools.parse_thicket.apps.SnippetToParagraph.TextChunkComparable;
 import opennlp.tools.similarity.apps.utils.PageFetcher;
 import opennlp.tools.similarity.apps.utils.StringDistanceMeasurer;
 import opennlp.tools.similarity.apps.utils.Utils;
@@ -52,11 +48,9 @@ import org.apache.commons.lang.StringUtils;
  */
 
 public class RelatedSentenceFinder {
-	private static Logger LOG = Logger
-			.getLogger("opennlp.tools.similarity.apps.RelatedSentenceFinder");
+	private static final Logger LOG = Logger.getLogger("opennlp.tools.similarity.apps.RelatedSentenceFinder");
 	PageFetcher pFetcher = new PageFetcher();
-	ParserChunker2MatcherProcessor sm = ParserChunker2MatcherProcessor
-			.getInstance();
+	ParserChunker2MatcherProcessor sm = ParserChunker2MatcherProcessor.getInstance();
 	protected ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
 	protected ParseTreeChunk parseTreeChunk = new ParseTreeChunk();
 	protected static StringDistanceMeasurer stringDistanceMeasurer = new StringDistanceMeasurer();
@@ -64,10 +58,10 @@ public class RelatedSentenceFinder {
 	protected int MAX_STEPS = 1;
 	protected int MAX_SEARCH_RESULTS = 1;
 	protected float RELEVANCE_THRESHOLD = 1.1f;
-	protected Set<String> visitedURLs = new HashSet();
+	protected Set<String> visitedURLs = new HashSet<>();
 
 	// used to indicate that a sentence is an opinion, so more appropriate
-	static List<String> MENTAL_VERBS = new ArrayList<String>(
+	static List<String> MENTAL_VERBS = new ArrayList<>(
 			Arrays.asList(new String[] { "want", "know", "believe", "appeal", "ask",
 					"accept", "agree", "allow", "appeal", "ask", "assume", "believe",
 					"check", "confirm", "convince", "deny", "disagree", "explain",
@@ -86,22 +80,20 @@ public class RelatedSentenceFinder {
 	int generateContentAboutIter = 0;
 
 	public RelatedSentenceFinder() {
-		// TODO Auto-generated constructor stub
+
 	}
+
 	public void setLang(String lang) {
 		yrunner.setLang(lang);
 
 	}
-	public List<HitBase> findRelatedOpinionsForSentenceFastAndDummy(String word,
-			List<String> sents) throws Exception {
+	public List<HitBase> findRelatedOpinionsForSentenceFastAndDummy(String word, List<String> sents) {
 
-		List<HitBase> searchResult = yrunner.runSearch(word, 100);
-		return searchResult;
+		return yrunner.runSearch(word, 100);
 	}
 
-	public List<HitBase> findRelatedOpinionsForSentence(String sentence,
-			List<String> sents) throws Exception {
-		List<HitBase> opinionSentencesToAdd = new ArrayList<HitBase>();
+	public List<HitBase> findRelatedOpinionsForSentence(String sentence, List<String> sents) {
+		List<HitBase> opinionSentencesToAdd = new ArrayList<>();
 		System.out.println(" \n\n=== Sentence  = " + sentence);
 		List<String> nounPhraseQueries = buildSearchEngineQueryFromSentence(sentence);
 
@@ -124,8 +116,7 @@ public class RelatedSentenceFinder {
 			}
 		}
 
-		opinionSentencesToAdd = removeDuplicatesFromResultantHits(opinionSentencesToAdd);
-		return opinionSentencesToAdd;
+		return removeDuplicatesFromResultantHits(opinionSentencesToAdd);
 	}
 
 	/**
@@ -133,7 +124,7 @@ public class RelatedSentenceFinder {
 	 * group, or other entity name and produce a list of text fragments by web
 	 * mining for <br>
 	 * 
-	 * @param String
+	 * @param sentence
 	 *          entity name
 	 * @return List<HitBase> of text fragment structures which contain approved
 	 *         (in terms of relevance) mined sentences, as well as original search
@@ -141,9 +132,8 @@ public class RelatedSentenceFinder {
 	 */
 
 	public List<HitBase> generateContentAbout(String sentence) throws Exception {
-		List<HitBase> opinionSentencesToAdd = new ArrayList<HitBase>();
+		List<HitBase> opinionSentencesToAdd = new ArrayList<>();
 		System.out.println(" \n=== Entity to write about = " + sentence);
-		List<String> nounPhraseQueries = new ArrayList<String>();
 
 		String[] extraKeywords = new StoryDiscourseNavigator().obtainAdditionalKeywordsForAnEntity(sentence);
 		System.out.println("Found  extraKeywords "+ Arrays.asList(extraKeywords));
@@ -184,12 +174,10 @@ public class RelatedSentenceFinder {
 				opinionSentencesToAdd =  generateContentAbout(discoveredSimilarTopic);
 			}
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
 
-		opinionSentencesToAdd = removeDuplicatesFromResultantHits(opinionSentencesToAdd);
-		return opinionSentencesToAdd;
+		return removeDuplicatesFromResultantHits(opinionSentencesToAdd);
 	}
 
 	/**
@@ -199,35 +187,31 @@ public class RelatedSentenceFinder {
 	 * general (irrelevant search results) or too specific (too few search
 	 * results)
 	 * 
-	 * @param String
+	 * @param sentence
 	 *          input sentence to form queries
 	 * @return List<String> of search expressions
 	 */
 	public static List<String> buildSearchEngineQueryFromSentence(String sentence) {
-		ParseTreeChunk matcher = new ParseTreeChunk();
-		ParserChunker2MatcherProcessor pos = ParserChunker2MatcherProcessor
-				.getInstance();
-		List<List<ParseTreeChunk>> sent1GrpLst = null;
-
-		List<ParseTreeChunk> nPhrases = pos
-				.formGroupedPhrasesFromChunksForSentence(sentence).get(0);
-		List<String> queryArrayStr = new ArrayList<String>();
+		ParserChunker2MatcherProcessor pos = ParserChunker2MatcherProcessor.getInstance();
+
+		List<ParseTreeChunk> nPhrases = pos.formGroupedPhrasesFromChunksForSentence(sentence).get(0);
+		List<String> queryArrayStr = new ArrayList<>();
 		for (ParseTreeChunk ch : nPhrases) {
-			String query = "";
+			StringBuilder query = new StringBuilder();
 			int size = ch.getLemmas().size();
 
 			for (int i = 0; i < size; i++) {
 				if (ch.getPOSs().get(i).startsWith("N")
 						|| ch.getPOSs().get(i).startsWith("J")) {
-					query += ch.getLemmas().get(i) + " ";
+					query.append(ch.getLemmas().get(i)).append(" ");
 				}
 			}
-			query = query.trim();
-			int len = query.split(" ").length;
+			query = new StringBuilder(query.toString().trim());
+			int len = query.toString().split(" ").length;
 			if (len < 2 || len > 5)
 				continue;
 			if (len < 4) { // every word should start with capital
-				String[] qs = query.split(" ");
+				String[] qs = query.toString().split(" ");
 				boolean bAccept = true;
 				for (String w : qs) {
 					if (w.toLowerCase().equals(w)) // idf only two words then
@@ -239,33 +223,33 @@ public class RelatedSentenceFinder {
 					continue;
 			}
 
-			query = query.trim().replace(" ", " +");
-			query = " +" + query;
+			query = new StringBuilder(query.toString().trim().replace(" ", " +"));
+			query.insert(0, " +");
 
-			queryArrayStr.add(query);
+			queryArrayStr.add(query.toString());
 
 		}
 		if (queryArrayStr.size() < 1) { // release constraints on NP down to 2
 			// keywords
 			for (ParseTreeChunk ch : nPhrases) {
-				String query = "";
+				StringBuilder query = new StringBuilder();
 				int size = ch.getLemmas().size();
 
 				for (int i = 0; i < size; i++) {
 					if (ch.getPOSs().get(i).startsWith("N")
 							|| ch.getPOSs().get(i).startsWith("J")) {
-						query += ch.getLemmas().get(i) + " ";
+						query.append(ch.getLemmas().get(i)).append(" ");
 					}
 				}
-				query = query.trim();
-				int len = query.split(" ").length;
+				query = new StringBuilder(query.toString().trim());
+				int len = query.toString().split(" ").length;
 				if (len < 2)
 					continue;
 
-				query = query.trim().replace(" ", " +");
-				query = " +" + query;
+				query = new StringBuilder(query.toString().trim().replace(" ", " +"));
+				query.insert(0, " +");
 
-				queryArrayStr.add(query);
+				queryArrayStr.add(query.toString());
 
 			}
 		}
@@ -281,17 +265,16 @@ public class RelatedSentenceFinder {
 	 * remove dupes from queries to easy cleaning dupes and repetitive search
 	 * afterwards
 	 * 
-	 * @param List
-	 *          <String> of sentences (search queries, or search results
-	 *          abstracts, or titles
+	 * @param hits List<String> of sentences (search queries, or search results
+	 *             abstracts, or titles
 	 * @return List<String> of sentences where dupes are removed
 	 */
 	public static List<String> removeDuplicatesFromQueries(List<String> hits) {
 		StringDistanceMeasurer meas = new StringDistanceMeasurer();
 		double dupeThresh = 0.8; // if more similar, then considered dupes was
 		// 0.7
-		List<Integer> idsToRemove = new ArrayList<Integer>();
-		List<String> hitsDedup = new ArrayList<String>();
+		List<Integer> idsToRemove = new ArrayList<>();
+		List<String> hitsDedup = new ArrayList<>();
 		try {
 			for (int i = 0; i < hits.size(); i++)
 				for (int j = i + 1; j < hits.size(); j++) {
@@ -326,8 +309,7 @@ public class RelatedSentenceFinder {
 	/**
 	 * remove dupes from search results
 	 * 
-	 * @param List
-	 *          <HitBase> of search results objects
+	 * @param hits List<HitBase> of search results objects
 	 * @return List<String> of search results objects where dupes are removed
 	 */
 	public static List<HitBase> removeDuplicatesFromResultantHits(
@@ -335,20 +317,18 @@ public class RelatedSentenceFinder {
 		StringDistanceMeasurer meas = new StringDistanceMeasurer();
 		double dupeThresh = // 0.8; // if more similar, then considered dupes was
 				0.7;
-		List<Integer> idsToRemove = new ArrayList<Integer>();
-		List<HitBase> hitsDedup = new ArrayList<HitBase>();
 		try {
 			for (int i = 0; i < hits.size(); i++)
 				for (int j = i + 1; j < hits.size(); j++) {
 					HitBase hit2 = hits.get(j);
 					List<Fragment> fragmList1 = hits.get(i).getFragments();
 					List<Fragment> fragmList2 = hits.get(j).getFragments();
-					List<Fragment> fragmList2Results = new ArrayList<Fragment>(fragmList2);
+					List<Fragment> fragmList2Results = new ArrayList<>(fragmList2);
 					for (Fragment f1 : fragmList1)
 						for (Fragment f2 : fragmList2) {
 							String sf1 = f1.getResultText();
 							String sf2 = f2.getResultText();
-							if (StringUtils.isEmpty(sf1) || StringUtils.isEmpty(sf1))
+							if (StringUtils.isEmpty(sf1))
 								continue;
 							if (meas.measureStringDistance(sf1, sf2) > dupeThresh) {
 								fragmList2Results.remove(f2);
@@ -368,13 +348,11 @@ public class RelatedSentenceFinder {
 
 	/**
 	 * Takes single search result for an entity which is the subject of the essay
-	 * to be written and forms essey sentences from the title, abstract, and
+	 * to be written and forms essay sentences from the title, abstract, and
 	 * possibly original page
 	 * 
-	 * @param HitBase
-	 *          item : search result
-	 * @param originalSentence
-	 *          : seed for the essay to be written
+	 * @param item The HitBase search result
+	 * @param originalSentence The seed for the essay to be written
 	 * @param sentsAll
 	 *          : list<String> of other sentences in the seed if it is
 	 *          multi-sentence
@@ -384,15 +362,15 @@ public class RelatedSentenceFinder {
 	public HitBase augmentWithMinedSentencesAndVerifyRelevance(HitBase item,
 			String originalSentence, List<String> sentsAll) {
 		if (sentsAll == null)
-			sentsAll = new ArrayList<String>();
+			sentsAll = new ArrayList<>();
 		// put orig sentence in structure
-		List<String> origs = new ArrayList<String>();
+		List<String> origs = new ArrayList<>();
 		origs.add(originalSentence);
 		item.setOriginalSentences(origs);
 		String title = item.getTitle().replace("<b>", " ").replace("</b>", " ")
 				.replace("  ", " ").replace("  ", " ");
 		// generation results for this sentence
-		List<Fragment> result = new ArrayList<Fragment>();
+		List<Fragment> result = new ArrayList<>();
 		// form plain text from snippet
 		String snapshot = item.getAbstractText().replace("<b>", " ")
 				.replace("</b>", " ").replace("  ", " ").replace("  ", " ");
@@ -403,8 +381,7 @@ public class RelatedSentenceFinder {
 		String snapshotMarked = snapshot.replace("...",
 				" _should_find_orig_ . _should_find_orig_");
 		String[] fragments = sm.splitSentences(snapshotMarked);
-		List<String> allFragms = new ArrayList<String>();
-		allFragms.addAll(Arrays.asList(fragments));
+		List<String> allFragms = new ArrayList<>(Arrays.asList(fragments));
 
 		String[] sents = null;
 		String downloadedPage = null;
@@ -427,21 +404,17 @@ public class RelatedSentenceFinder {
 				}
 			}
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
-			// e.printStackTrace();
-			System.err
-			.println("Problem downloading  the page and splitting into sentences");
+			System.err.println("Problem downloading  the page and splitting into sentences");
 			return item;
 		}
 
 		for (String fragment : allFragms) {
-			String followSent = "";
+			StringBuilder followSent = new StringBuilder();
 			if (fragment.length() < 50)
 				continue;
 			String pageSentence = "";
 			// try to find original sentence from webpage
-			if (fragment.indexOf("_should_find_orig_") > -1 && sents != null
-					&& sents.length > 0){
+			if (fragment.contains("_should_find_orig_") && sents != null && sents.length > 0){
 				try { 
 					// first try sorted sentences from page by length approach
 					String[] sentsSortedByLength = extractSentencesFromPage(downloadedPage);
@@ -451,7 +424,6 @@ public class RelatedSentenceFinder {
 						mainAndFollowSent = getFullOriginalSentenceFromWebpageBySnippetFragment(
 								fragment.replace("_should_find_orig_", ""), sentsSortedByLength);
 					} catch (Exception e) {
-						// TODO Auto-generated catch block
 						e.printStackTrace();
 					}
 					// if the above gives null than try to match all sentences from snippet fragment
@@ -464,12 +436,10 @@ public class RelatedSentenceFinder {
 						pageSentence = mainAndFollowSent[0];
 						for(int i = 1; i< mainAndFollowSent.length; i++)
 							if (mainAndFollowSent[i]!=null)
-								followSent+= mainAndFollowSent[i];
+								followSent.append(mainAndFollowSent[i]);
 					}
 
 				} catch (Exception e) {
-
-					// TODO Auto-generated catch block
 					e.printStackTrace();
 				}
 			}
@@ -488,7 +458,7 @@ public class RelatedSentenceFinder {
 
 				try { // get score from syntactic match between sentence in
 					// original text and mined sentence
-					double measScore = 0.0, syntScore = 0.0, mentalScore = 0.0;
+					double measScore, syntScore, mentalScore = 0.0;
 
 					SentencePairMatchResult matchRes = sm.assessRelevance(pageSentence
 							+ " " + title, originalSentence);
@@ -535,7 +505,7 @@ public class RelatedSentenceFinder {
 						if (pageSentenceProc != null) {
 							pageSentenceProc = GeneratedSentenceProcessor
 									.processSentence(pageSentenceProc);
-							followSent = GeneratedSentenceProcessor.processSentence(followSent);
+							followSent = new StringBuilder(GeneratedSentenceProcessor.processSentence(followSent.toString()));
 							if (followSent != null) {
 								pageSentenceProc += " "+ followSent;
 							}
@@ -570,20 +540,21 @@ public class RelatedSentenceFinder {
 	
 
 	// given a fragment from snippet, finds an original sentence at a webpage by
-	// optimizing alignmemt score
+	// optimizing alignment score
 	public static String[] getFullOriginalSentenceFromWebpageBySnippetFragment(
 			String fragment, String[] sents) {
 		if (fragment.trim().length() < 15)
 			return null;
 
 		StringDistanceMeasurer meas = new StringDistanceMeasurer();
-		Double dist = 0.0;
-		String result = null, followSent = "";
+		double dist = 0.0;
+		String result = null;
+		StringBuilder followSent = new StringBuilder();
 		for (int i = 0; i < sents.length; i++) {
 			String s = sents[i];
 			if (s == null || s.length() < 30)
 				continue;
-			Double distCurr = meas.measureStringDistance(s, fragment);
+			double distCurr = meas.measureStringDistance(s, fragment);
 			if (distCurr > dist && distCurr > 0.4) {
 				result = s;
 				dist = distCurr;
@@ -591,23 +562,22 @@ public class RelatedSentenceFinder {
 					if (i < sents.length - 1 && sents[i + 1].length() > 60) { 
 						String f1 = GeneratedSentenceProcessor.acceptableMinedSentence(sents[i+1]);
 						if (f1!=null){
-							followSent = f1;
+							followSent = new StringBuilder(f1);
 						}
 					}
 
 					if (i < sents.length - 2 && sents[i + 2].length() > 60) {
 						String f2 = GeneratedSentenceProcessor.acceptableMinedSentence(sents[i+2]);
 						if (f2!=null){
-							followSent += " "+f2;
+							followSent.append(" ").append(f2);
 						}
 					}
 				} catch (Exception e) {
-					// TODO Auto-generated catch block
 					e.printStackTrace();
 				}
 			}
 		}
-		return new String[] { result, followSent };
+		return new String[] { result, followSent.toString()};
 	}
 
 	// given a fragment from snippet, finds an original sentence at a webpage by
@@ -618,13 +588,13 @@ public class RelatedSentenceFinder {
 			return null;
 		int bestSentIndex = -1;
 		StringDistanceMeasurer meas = new StringDistanceMeasurer();
-		Double distBest = 10.0; // + sup
+		double distBest = 10.0; // + sup
 		String result = null, followSent = null;
 		for (int i = 0; i < sents.length; i++) {
 			String s = sents[i];
 			if (s == null || s.length() < 30)
 				continue;
-			Double distCurr = meas.measureStringDistance(s, fragment);
+			double distCurr = meas.measureStringDistance(s, fragment);
 			if (distCurr > distBest) {
 				distBest = distCurr;
 				bestSentIndex = i;
@@ -648,14 +618,11 @@ public class RelatedSentenceFinder {
 	{
 
 		int maxSentsFromPage= 100;
-		List<String[]> results = new ArrayList<String[]>();
-
-		//String pageOrigHTML = pFetcher.fetchOrigHTML(url);
 
 		downloadedPage= downloadedPage.replace("     ", "&");
 		downloadedPage = downloadedPage.replaceAll("(?:&)+", "#");
 		String[] sents = downloadedPage.split("#");
-		List<TextChunk> sentsList = new ArrayList<TextChunk>();
+		List<TextChunk> sentsList = new ArrayList<>();
 		for(String s: sents){
 			s = ContentGeneratorSupport.cleanSpacesInCleanedHTMLpage(s);
 		/*	s = s.trim().replace("  ", ". ").replace("..", ".").replace(". . .", " ")
@@ -663,8 +630,8 @@ public class RelatedSentenceFinder {
 					replace (". .",".").trim(); */
 			sentsList.add(new TextChunk(s, s.length()));
 		}
+		sentsList.sort(new TextChunkComparable());
 
-		Collections.sort(sentsList, new TextChunkComparable());
 		String[] longestSents = new String[maxSentsFromPage];
 		int j=0;
 		int initIndex = sentsList.size()-1 -maxSentsFromPage;
@@ -683,7 +650,7 @@ public class RelatedSentenceFinder {
 		return sents;
 	}
 
-	public class TextChunk {
+	public static class TextChunk {
 		public TextChunk(String s, int length) {
 			this.text = s;
 			this.len = length;
@@ -692,23 +659,17 @@ public class RelatedSentenceFinder {
 		public int len;
 	}
 
-	public class TextChunkComparable implements Comparator<TextChunk>
-	{
-		public int compare(TextChunk ch1, TextChunk ch2)
-		{
-			if (ch1.len>ch2.len)
-				return 1;
-			else if (ch1.len<ch2.len)
-				return  -1;
-			else return 0;
-
+	public static class TextChunkComparable implements Comparator<TextChunk> {
+		@Override
+		public int compare(TextChunk ch1, TextChunk ch2) {
+			return Integer.compare(ch1.len, ch2.len);
 		}
 	}
 
 	protected String[] cleanSplitListOfSents(String[] longestSents){
 		float minFragmentLength = 40, minFragmentLengthSpace=4;
 
-		List<String> sentsClean = new ArrayList<String>();
+		List<String> sentsClean = new ArrayList<>();
 		for (String sentenceOrMultSent : longestSents)
 		{
 			if (sentenceOrMultSent==null || sentenceOrMultSent.length()<20)
@@ -742,32 +703,27 @@ public class RelatedSentenceFinder {
 				sentsClean.add(s);
 			}
 		}
-		return (String[]) sentsClean.toArray(new String[0]);
+		return sentsClean.toArray(new String[0]);
 	}
 
 	public Triple<List<String>, String, String[]> formCandidateFragmentsForPage(HitBase item, String originalSentence, List<String> sentsAll){
-		if (sentsAll == null)
-			sentsAll = new ArrayList<String>();
 		// put orig sentence in structure
-		List<String> origs = new ArrayList<String>();
+		List<String> origs = new ArrayList<>();
 		origs.add(originalSentence);
 		item.setOriginalSentences(origs);
 		String title = item.getTitle().replace("<b>", " ").replace("</b>", " ")
 				.replace("  ", " ").replace("  ", " ");
 		// generation results for this sentence
-		List<Fragment> result = new ArrayList<Fragment>();
 		// form plain text from snippet
 		String snapshot = item.getAbstractText().replace("<b>", " ")
 				.replace("</b>", " ").replace("  ", " ").replace("  ", " ");
 
-
 		// fix a template expression which can be substituted by original if
 		// relevant
 		String snapshotMarked = snapshot.replace("...",
 				" _should_find_orig_ . _should_find_orig_");
 		String[] fragments = sm.splitSentences(snapshotMarked);
-		List<String> allFragms = new ArrayList<String>();
-		allFragms.addAll(Arrays.asList(fragments));
+		List<String> allFragms = new ArrayList<>(Arrays.asList(fragments));
 
 		String[] sents = null;
 		String downloadedPage = null;
@@ -791,28 +747,23 @@ public class RelatedSentenceFinder {
 				}
 			}
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
-			// e.printStackTrace();
-			System.err
-			.println("Problem downloading  the page and splitting into sentences");
-			return new Triple(allFragms, downloadedPage, sents);
+			System.err.println("Problem downloading  the page and splitting into sentences");
+			return new Triple<>(allFragms, downloadedPage, sents);
 		}
-		return new Triple(allFragms, downloadedPage, sents);
+		return new Triple<>(allFragms, downloadedPage, sents);
 	}
 
 	String[] formCandidateSentences(String fragment, Triple<List<String>, String, String[]> fragmentExtractionResults){
 		String[] mainAndFollowSent = null;
 
-		List<String> allFragms = (List<String>)fragmentExtractionResults.getFirst();
-		String downloadedPage = (String)fragmentExtractionResults.getSecond();
-		String[] sents = (String[])fragmentExtractionResults.getThird();
+		String downloadedPage = fragmentExtractionResults.getSecond();
+		String[] sents = fragmentExtractionResults.getThird();
 
-		String followSent = null;
 		if (fragment.length() < 50)
 			return null;
 		String pageSentence = "";
 		// try to find original sentence from webpage
-		if (fragment.indexOf("_should_find_orig_") > -1 && sents != null
+		if (fragment.contains("_should_find_orig_") && sents != null
 				&& sents.length > 0){
 			try { 
 				// first try sorted sentences from page by length approach
@@ -823,7 +774,6 @@ public class RelatedSentenceFinder {
 					mainAndFollowSent = getFullOriginalSentenceFromWebpageBySnippetFragment(
 							fragment.replace("_should_find_orig_", ""), sentsSortedByLength);
 				} catch (Exception e) {
-					// TODO Auto-generated catch block
 					e.printStackTrace();
 				}
 				// if the above gives null than try to match all sentences from snippet fragment
@@ -834,8 +784,6 @@ public class RelatedSentenceFinder {
 
 
 			} catch (Exception e) {
-
-				// TODO Auto-generated catch block
 				e.printStackTrace();
 			}
 		}
@@ -854,9 +802,9 @@ public class RelatedSentenceFinder {
 		Fragment result = null;	
 
 		String pageSentence = candidateSentences[0];
-		String followSent = "";
+		StringBuilder followSent = new StringBuilder();
 		for(int i = 1; i< candidateSentences.length; i++)
-			followSent+= candidateSentences[i];
+			followSent.append(candidateSentences[i]);
 		String title = item.getTitle();
 
 		// resultant sentence SHOULD NOT be longer than for times the size of
@@ -871,7 +819,7 @@ public class RelatedSentenceFinder {
 
 		try { // get score from syntactic match between sentence in
 			// original text and mined sentence
-			double measScore = 0.0, syntScore = 0.0, mentalScore = 0.0;
+			double measScore, syntScore, mentalScore = 0.0;
 
 			SentencePairMatchResult matchRes = sm.assessRelevance(pageSentence
 					+ " " + title, originalSentence);
@@ -928,7 +876,7 @@ public class RelatedSentenceFinder {
 				if (pageSentenceProc != null) {
 					pageSentenceProc = GeneratedSentenceProcessor
 							.processSentence(pageSentenceProc);
-					followSent = GeneratedSentenceProcessor.processSentence(followSent);
+					followSent = new StringBuilder(GeneratedSentenceProcessor.processSentence(followSent.toString()));
 					if (followSent != null) {
 						pageSentenceProc += " "+ followSent;
 					}
@@ -960,13 +908,11 @@ public class RelatedSentenceFinder {
 
 public HitBase buildParagraphOfGeneratedText(HitBase item,
 		String originalSentence, List<String> sentsAll) {
-	List<Fragment> results = new ArrayList<Fragment>() ;
+	List<Fragment> results = new ArrayList<>() ;
 	
 	Triple<List<String>, String, String[]> fragmentExtractionResults = formCandidateFragmentsForPage(item, originalSentence, sentsAll);
 
-	List<String> allFragms = (List<String>)fragmentExtractionResults.getFirst();
-	String downloadedPage = (String)fragmentExtractionResults.getSecond();
-	String[] sents = (String[])fragmentExtractionResults.getThird();
+	List<String> allFragms = fragmentExtractionResults.getFirst();
 
 	for (String fragment : allFragms) {
 		String[] candidateSentences = formCandidateSentences(fragment, fragmentExtractionResults);
@@ -988,7 +934,7 @@ public HitBase buildParagraphOfGeneratedText(HitBase item,
 public static void main(String[] args) {
 	RelatedSentenceFinder f = new RelatedSentenceFinder();
 
-	List<HitBase> hits = null;
+	List<HitBase> hits;
 	try {
 		// uncomment the sentence you would like to serve as a seed sentence for
 		// content generation for an event description
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/SearchResultsProcessor.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/SearchResultsProcessor.java
index 5c40ce0..215030d 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/SearchResultsProcessor.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/SearchResultsProcessor.java
@@ -27,9 +27,9 @@ import opennlp.tools.textsimilarity.SentencePairMatchResult;
 import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
 
 public class SearchResultsProcessor extends BingQueryRunner {
-  private static Logger LOG = Logger
-      .getLogger("opennlp.tools.similarity.apps.SearchResultsProcessor");
-  private ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
+  private static final Logger LOG =
+          Logger.getLogger("opennlp.tools.similarity.apps.SearchResultsProcessor");
+  private final ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
   ParserChunker2MatcherProcessor sm;
   WebSearchEngineResultsScraper scraper = new WebSearchEngineResultsScraper();
 
@@ -53,26 +53,30 @@ public class SearchResultsProcessor extends BingQueryRunner {
           .replace("<br>", "").replace("</br>", "").replace("...", ". ")
           .replace("|", " ").replace(">", " ");
       snapshot += " . " + hit.getTitle();
-      Double score = 0.0;
+      double score = 0.0;
       try {
         SentencePairMatchResult matchRes = sm.assessRelevance(snapshot,
             searchQuery);
         List<List<ParseTreeChunk>> match = matchRes.getMatchResult();
         score = parseTreeChunkListScorer.getParseTreeChunkListScore(match);
-        LOG.finest(score + " | " + snapshot);
+        /*
+          LOG.finest(score + " | " + snapshot);
+         */
       } catch (Exception e) {
-        LOG.severe("Problem processing snapshot " + snapshot);
+        LOG.warning("Problem processing snapshot " + snapshot);
         e.printStackTrace();
       }
       hit.setGenerWithQueryScore(score);
       newHitList.add(hit);
     }
-    Collections.sort(newHitList, new HitBaseComparable());
+    newHitList.sort(new HitBaseComparable());
    
-    LOG.info("\n\n ============= NEW ORDER ================= ");
+    // LOG.info("\n\n ============= NEW ORDER ================= ");
+    /*
     for (HitBase hit : newHitList) {
       LOG.info(hit.toString());
     }
+    */
 
     return newHitList;
   }
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/SpeechRecognitionResultsProcessor.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/SpeechRecognitionResultsProcessor.java
index 2607eb6..c5daba4 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/SpeechRecognitionResultsProcessor.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/SpeechRecognitionResultsProcessor.java
@@ -18,7 +18,6 @@ package opennlp.tools.similarity.apps;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 import java.util.logging.Logger;
 
@@ -28,11 +27,11 @@ import opennlp.tools.textsimilarity.SentencePairMatchResult;
 import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
 
 public class SpeechRecognitionResultsProcessor /*extends BingWebQueryRunner*/ {
-  private static Logger LOG = Logger
-      .getLogger("opennlp.tools.similarity.apps.SpeechRecognitionResultsProcessor");
-  private ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
-  ParserChunker2MatcherProcessor sm;
-  WebSearchEngineResultsScraper scraper = new WebSearchEngineResultsScraper();
+  private static final Logger LOG =
+          Logger.getLogger("opennlp.tools.similarity.apps.SpeechRecognitionResultsProcessor");
+  private final ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
+  private ParserChunker2MatcherProcessor sm;
+  private final WebSearchEngineResultsScraper scraper = new WebSearchEngineResultsScraper();
 
   /**
    * Gets an expression and tries to find it on the web. If search results are
@@ -60,15 +59,17 @@ public class SpeechRecognitionResultsProcessor /*extends BingWebQueryRunner*/ {
           .replace("<br>", "").replace("</br>", "").replace("...", ". ")
           .replace("|", " ").replace(">", " ");
       snapshot += " . " + hit.getTitle();
-      Double score = 0.0;
+      double score = 0.0;
       try {
         SentencePairMatchResult matchRes = sm.assessRelevance(snapshot,
             searchQuery);
         List<List<ParseTreeChunk>> match = matchRes.getMatchResult();
         score = parseTreeChunkListScorer.getParseTreeChunkListScore(match);
+        /*
         if (score > 1.5) {
           LOG.info(score + " | " + match);
         }
+        */
       } catch (Exception e) {
         LOG.severe("Problem processing snapshot " + snapshot);
         e.printStackTrace();
@@ -97,24 +98,19 @@ public class SpeechRecognitionResultsProcessor /*extends BingWebQueryRunner*/ {
    */
   public List<SentenceMeaningfullnessScore> runSearchAndScoreMeaningfulness(
       List<String> sents) {
-    List<SentenceMeaningfullnessScore> res = new ArrayList<SentenceMeaningfullnessScore>();
+    List<SentenceMeaningfullnessScore> res = new ArrayList<>();
     double bestSentScore = -1;
-    String bestSent = null;
     for (String sentence : sents) {
       try {
         List<HitBase> resultList = scraper.runSearch(sentence);
-        double scoreForSentence = calculateTotalMatchScoreForHits(resultList,
-            sentence);
-        System.out.println("Total meaningfulness score = " + scoreForSentence
-            + " for sentence = " + sentence);
+        double scoreForSentence = calculateTotalMatchScoreForHits(resultList, sentence);
+        // System.out.println("Total meaningfulness score = " + scoreForSentence + " for sentence = " + sentence);
         if (scoreForSentence > bestSentScore) {
           bestSentScore = scoreForSentence;
-          bestSent = sentence;
         }
         res.add(new SentenceMeaningfullnessScore(sentence, scoreForSentence));
       } catch (Exception e) {
-        // e.printStackTrace();
-        LOG.info("No search results for query '" + sentence);
+        LOG.warning("No search results for query '" + sentence);
         e.printStackTrace();
         return null;
       }
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/YahooAnswersMiner.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/YahooAnswersMiner.java
index 50db87c..16144d1 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/YahooAnswersMiner.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/YahooAnswersMiner.java
@@ -19,33 +19,26 @@ package opennlp.tools.similarity.apps;
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.logging.Logger;
 
 import opennlp.tools.jsmlearning.ProfileReaderWriter;
 import opennlp.tools.parse_thicket.Triple;
 
-import net.billylieurance.azuresearch.AzureSearchImageQuery;
-import net.billylieurance.azuresearch.AzureSearchImageResult;
 import net.billylieurance.azuresearch.AzureSearchResultSet;
-import net.billylieurance.azuresearch.AzureSearchWebQuery;
 import net.billylieurance.azuresearch.AzureSearchWebResult;
 
 public class YahooAnswersMiner extends BingQueryRunner{
 
-	private static final Logger LOG = Logger
-			.getLogger("opennlp.tools.similarity.apps.YahooAnswersMiner");
 	private int page = 0;
 	private static final int hitsPerPage = 50;
 
 	public List<HitBase> runSearch(String query) {
 		aq.setAppid(BING_KEY);
-		aq.setQuery("site:answers.yahoo.com "+
-				query);		
+		aq.setQuery("site:answers.yahoo.com "+ query);
 		aq.setPerPage(hitsPerPage);
 		aq.setPage(page);
 
 		aq.doQuery();
-		List<HitBase> results = new ArrayList<HitBase> ();
+		List<HitBase> results = new ArrayList<> ();
 		AzureSearchResultSet<AzureSearchWebResult> ars = aq.getQueryResult();
 
 		for (AzureSearchWebResult anr : ars){
@@ -60,10 +53,9 @@ public class YahooAnswersMiner extends BingQueryRunner{
 		return results;
 	}
 
-
 	public List<HitBase> runSearch(String query, int totalPages) {
 		int count=0;
-		List<HitBase> results = new ArrayList<HitBase>();
+		List<HitBase> results = new ArrayList<>();
 		while(totalPages>page*hitsPerPage){
 			List<HitBase> res = runSearch(query);
 			results.addAll(res);
@@ -75,7 +67,6 @@ public class YahooAnswersMiner extends BingQueryRunner{
 		return results;
 	}
 
-
 	public static void main(String[] args) {
 		YahooAnswersMiner self = new YahooAnswersMiner();
 		RelatedSentenceFinder extractor = new RelatedSentenceFinder();
@@ -84,27 +75,20 @@ public class YahooAnswersMiner extends BingQueryRunner{
 		List<HitBase> resp = self
 				.runSearch(topic, 150);
 		System.out.print(resp.get(0));
-		List<String[]> data = new ArrayList<String[]>();
-
+		List<String[]> data = new ArrayList<>();
 
 		for(HitBase item: resp){	      
 			Triple<List<String>, String, String[]> fragmentExtractionResults = 
 					extractor.formCandidateFragmentsForPage(item, topic, null);
 
-			List<String> allFragms = (List<String>)fragmentExtractionResults.getFirst();
-			String downloadedPage = (String)fragmentExtractionResults.getSecond();
-			String[] sents = (String[])fragmentExtractionResults.getThird();
-
+			List<String> allFragms = fragmentExtractionResults.getFirst();
 			for (String fragment : allFragms) {
 				String[] candidateSentences = extractor.formCandidateSentences(fragment, fragmentExtractionResults);
 				System.out.println(candidateSentences);
 				data.add(candidateSentences);
 			}
-			
 		}
-
 		ProfileReaderWriter.writeReport(data, "multi_sentence_queries.csv");
-
 	}
 
 }
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/ContentGeneratorRequestHandler.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/ContentGeneratorRequestHandler.java
index 41afe36..a769115 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/ContentGeneratorRequestHandler.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/ContentGeneratorRequestHandler.java
@@ -22,64 +22,26 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
-import java.util.Map;
-import java.util.Set;
 import java.util.logging.Logger;
 
 import javax.mail.internet.AddressException;
 import javax.mail.internet.InternetAddress;
 
-import opennlp.tools.similarity.apps.HitBase;
-import opennlp.tools.similarity.apps.RelatedSentenceFinder;
-import opennlp.tools.similarity.apps.RelatedSentenceFinderML;
-import opennlp.tools.similarity.apps.utils.Pair;
-import opennlp.tools.textsimilarity.ParseTreeChunk;
-import opennlp.tools.textsimilarity.ParseTreeChunkListScorer;
-import opennlp.tools.textsimilarity.SentencePairMatchResult;
-import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.queryparser.classic.ParseException;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.CachingWrapperFilter;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryWrapperFilter;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.handler.component.SearchHandler;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
 
-
+import opennlp.tools.similarity.apps.HitBase;
+import opennlp.tools.similarity.apps.RelatedSentenceFinder;
+import opennlp.tools.similarity.apps.RelatedSentenceFinderML;
+import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
 
 public class ContentGeneratorRequestHandler extends SearchHandler {
-	private static Logger LOG = Logger
-			.getLogger("com.become.search.requestHandlers.SearchResultsReRankerRequestHandler");
-	private ParserChunker2MatcherProcessor sm = null;
-	WordDocBuilderEndNotes docBuilder = new WordDocBuilderEndNotes ();
-
+	private static final Logger LOG =
+					Logger.getLogger("com.become.search.requestHandlers.SearchResultsReRankerRequestHandler");
+	private final WordDocBuilderEndNotes docBuilder = new WordDocBuilderEndNotes ();
 
 	public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp){
 
@@ -108,24 +70,23 @@ public class ContentGeneratorRequestHandler extends SearchHandler {
 			resultText = cgRunner(runInternal);
 		} catch (Exception e1) {
 			
-/*
-		Runtime r = Runtime.getRuntime();
-		Process mStartProcess = null;
-		String workDir = req.getParams().get("workDir"); 
-		if (workDir == null)
-			System.err.println("workDir = null");
+		/*
+				Runtime r = Runtime.getRuntime();
+				Process mStartProcess = null;
+				String workDir = req.getParams().get("workDir");
+				if (workDir == null)
+					System.err.println("workDir = null");
 
-		try {
-			mStartProcess = r.exec(runCommand, null, new File(workDir));
-		} catch (IOException e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		}
+				try {
+					mStartProcess = r.exec(runCommand, null, new File(workDir));
+				} catch (IOException e) {
+					e.printStackTrace();
+				}
 
-		StreamLogger outputGobbler = new StreamLogger(mStartProcess.getInputStream());
-		outputGobbler.start();
-		}
-*/
+				StreamLogger outputGobbler = new StreamLogger(mStartProcess.getInputStream());
+				outputGobbler.start();
+				}
+		*/
 		}
 		
 		NamedList<Object> values = rsp.getValues();
@@ -136,10 +97,9 @@ public class ContentGeneratorRequestHandler extends SearchHandler {
 
 	}
 
+	static class StreamLogger extends Thread{
 
-	class StreamLogger extends Thread{
-
-		private InputStream mInputStream;
+		private final InputStream mInputStream;
 
 		public StreamLogger(InputStream is) {
 			this.mInputStream = is;
@@ -165,17 +125,14 @@ public class ContentGeneratorRequestHandler extends SearchHandler {
 			System.out.print(count+">>" + a + " | ");
 			count++;
 		}
-		
-
 		try {
 			String resourceDir = args[2];
+			ParserChunker2MatcherProcessor sm = null;
 			if (resourceDir!=null)
 				sm = ParserChunker2MatcherProcessor.getInstance(resourceDir);
 			else
 				sm = ParserChunker2MatcherProcessor.getInstance();
-
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
 
@@ -224,7 +181,6 @@ public class ContentGeneratorRequestHandler extends SearchHandler {
 				s.sendMail("smtp.rambler.ru", "bg7550@rambler.ru", "pill0693", new InternetAddress("bg7550@rambler.ru"), new InternetAddress[]{new InternetAddress(args[1])}, new InternetAddress[]{}, new InternetAddress[]{}, 
 						"Generated content for you on '"+args[0].replace('+', ' ')+"'", generatedContent, attachmentFileName);
 			} catch (AddressException e) {
-				// TODO Auto-generated catch block
 				e.printStackTrace();
 			} catch (Exception e) {
 
@@ -233,7 +189,6 @@ public class ContentGeneratorRequestHandler extends SearchHandler {
 					s.sendMail("smtp.rambler.ru", "bg7550@rambler.ru", "pill0693", new InternetAddress("bg7550@rambler.ru"), new InternetAddress[]{new InternetAddress(args[1])}, new InternetAddress[]{}, new InternetAddress[]{}, 
 							"Generated content for you on '"+args[0].replace('+', ' ')+"'", generatedContent, attachmentFileName);
 				} catch (Exception e1) {
-					// TODO Auto-generated catch block
 					e1.printStackTrace();
 				}
 			}
@@ -284,5 +239,4 @@ public class ContentGeneratorRequestHandler extends SearchHandler {
 
 /*
 http://173.255.254.250:8983/solr/contentgen/?q=human+body+anatomy&email=bgalitsky@hotmail.com&resourceDir=/home/solr/solr-4.4.0/example/src/test/resources&workDir=/home/solr/solr-4.4.0/example/solr-webapp/webapp/WEB-INF/lib&stepsNum=20&searchResultsNum=10&relevanceThreshold=1.5
-
  */
\ No newline at end of file
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeQueryComponent.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeQueryComponent.java
index 6693bbf..78cbfb3 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeQueryComponent.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeQueryComponent.java
@@ -21,24 +21,19 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.queryparser.classic.ParseException;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Query;
-import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.handler.component.QueryComponent;
 import org.apache.solr.handler.component.ResponseBuilder;
-import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.ResultContext;
-import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.search.DocList;
 import org.apache.solr.search.QParser;
 import org.apache.solr.search.QParserPlugin;
 import org.apache.solr.search.QueryParsing;
 
-
 public class IterativeQueryComponent extends QueryComponent{
 	public static final String COMPONENT_NAME = "iterative_query";
 	public static final String[] fieldSequence = new String[]{"cat", "name", "content", "author"}; 
@@ -49,7 +44,7 @@ public class IterativeQueryComponent extends QueryComponent{
 	@Override
 	public void process(ResponseBuilder rb) throws IOException {
 
-		NamedList nameValuePairs = rb.rsp.getValues();
+		NamedList<Object> nameValuePairs = rb.rsp.getValues();
 		nameValuePairs.remove("response");
 		rb.rsp.setAllValues(nameValuePairs);
 		rb = substituteField(rb, fieldSequence[0] );
@@ -59,7 +54,7 @@ public class IterativeQueryComponent extends QueryComponent{
 			nameValuePairs = rb.rsp.getValues();
 			ResultContext c = (ResultContext) nameValuePairs.get("response");
 			if (c!=null){			
-				DocList dList = c.docs;
+				DocList dList = c.getDocList();
 				if (dList.size()<1){
 					nameValuePairs.remove("response");
 					rb.rsp.setAllValues(nameValuePairs);
@@ -110,10 +105,10 @@ public class IterativeQueryComponent extends QueryComponent{
 		String currField = StringUtils.substringBetween(" "+query, " ", ":");
 		if ( currField !=null && newFieldName!=null)
 			query = query.replace(currField, newFieldName);
-		NamedList values = params.toNamedList();
+		NamedList<Object> values = params.toNamedList();
 		values.remove("q");
 		values.add("q", query);
-		params = SolrParams.toSolrParams(values);
+		params = values.toSolrParams();
 		rb.req.setParams(params);
 		rb.setQueryString(query);
 
@@ -133,40 +128,37 @@ public class IterativeQueryComponent extends QueryComponent{
 		try {
 			parser = QParser.getParser(rb.getQueryString(), defType, rb.req);
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
 		Query q = null;
 		try {
 			q = parser.getQuery();
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
 		if (q == null) {
 			// normalize a null query to a query that matches nothing
-			q = new BooleanQuery();        
+			q = new BooleanQuery.Builder().build();
 		}
 		rb.setQuery( q );
 		try {
-			rb.setSortSpec( parser.getSort(true) );
+			rb.setSortSpec(parser.getSortSpec(true));
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
 		rb.setQparser(parser);
-	/*	try {
-			rb.setScoreDoc(parser.getPaging());
-		} catch (Exception e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
+		/*
+		try {
+				rb.setScoreDoc(parser.getPaging());
+			} catch (Exception e) {
+				e.printStackTrace();
 		}
-*/
+		*/
 		String[] fqs = rb.req.getParams().getParams(CommonParams.FQ);
 		if (fqs!=null && fqs.length!=0) {
 			List<Query> filters = rb.getFilters();
 			if (filters==null) {
-				filters = new ArrayList<Query>(fqs.length);
+				filters = new ArrayList<>(fqs.length);
 			}
 			for (String fq : fqs) {
 				if (fq != null && fq.trim().length()!=0) {
@@ -174,13 +166,11 @@ public class IterativeQueryComponent extends QueryComponent{
 					try {
 						fqp = QParser.getParser(fq, null, rb.req);
 					} catch (Exception e) {
-						// TODO Auto-generated catch block
 						e.printStackTrace();
 					}
 					try {
 						filters.add(fqp.getQuery());
 					} catch (Exception e) {
-						// TODO Auto-generated catch block
 						e.printStackTrace();
 					}
 				}
@@ -192,9 +182,6 @@ public class IterativeQueryComponent extends QueryComponent{
 				rb.setFilters( filters );
 			}
 		}
-
-
 		return rb;
 	}
-
 }
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeSearchRequestHandler.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeSearchRequestHandler.java
index be125b7..98d4540 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeSearchRequestHandler.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeSearchRequestHandler.java
@@ -18,22 +18,14 @@ package opennlp.tools.similarity.apps.solr;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import opennlp.tools.similarity.apps.HitBaseComparable;
-import opennlp.tools.similarity.apps.utils.Pair;
-import opennlp.tools.textsimilarity.ParseTreeChunkListScorer;
-import opennlp.tools.textsimilarity.SentencePairMatchResult;
-import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
-
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.lucene.document.Document;
@@ -42,27 +34,15 @@ import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.queryparser.classic.ParseException;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.CachingWrapperFilter;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryWrapperFilter;
 import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TotalHits;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.handler.component.ResponseBuilder;
-import org.apache.solr.handler.component.SearchComponent;
 import org.apache.solr.handler.component.SearchHandler;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.handler.component.ShardResponse;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.ResultContext;
 import org.apache.solr.response.SolrQueryResponse;
@@ -72,19 +52,22 @@ import org.apache.solr.search.DocList;
 import org.apache.solr.search.DocSlice;
 import org.apache.solr.search.QParser;
 import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.SolrPluginUtils;
+
+import opennlp.tools.similarity.apps.utils.Pair;
+import opennlp.tools.textsimilarity.ParseTreeChunkListScorer;
+import opennlp.tools.textsimilarity.SentencePairMatchResult;
+import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
+
 
 public class IterativeSearchRequestHandler extends SearchHandler {
 
-	private ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
+	private final ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
 
 	public SolrQueryResponse runSearchIteration(SolrQueryRequest req, SolrQueryResponse rsp, String fieldToTry){
 		try {
 			req = substituteField(req, fieldToTry);
 			super.handleRequestBody(req, rsp);
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
 		return rsp;
@@ -96,29 +79,26 @@ public class IterativeSearchRequestHandler extends SearchHandler {
 		String currField = StringUtils.substringBetween(" "+query, " ", ":");
 		if ( currField !=null && newFieldName!=null)
 			query = query.replace(currField, newFieldName);
-		NamedList values = params.toNamedList();
+		NamedList<Object> values = params.toNamedList();
 		values.remove("q");
 		values.add("q", query);
-		params = SolrParams.toSolrParams(values);
+		params = values.toSolrParams();
 		req.setParams(params);
 		return req;
-
 	}
 
 	public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp){
          
 		SolrQueryResponse rsp1 = new SolrQueryResponse(), rsp2=new SolrQueryResponse(), rsp3=new SolrQueryResponse();
-		NamedList list = rsp.getValues();
 		rsp1.setAllValues(rsp.getValues().clone());
 		rsp2.setAllValues(rsp.getValues().clone());
 		rsp3.setAllValues(rsp.getValues().clone());
-		
-		
+
 		rsp1 = runSearchIteration(req, rsp1, "cat");
-		NamedList values = rsp1.getValues();
+		NamedList<Object> values = rsp1.getValues();
 		ResultContext c = (ResultContext) values.get("response");
 		if (c!=null){			
-			DocList dList = c.docs;
+			DocList dList = c.getDocList();
 			if (dList.size()<1){
 				rsp2 = runSearchIteration(req, rsp2, "name");
 			}
@@ -131,7 +111,7 @@ public class IterativeSearchRequestHandler extends SearchHandler {
 		values = rsp2.getValues();
 		c = (ResultContext) values.get("response");
 		if (c!=null){
-			DocList dList = c.docs;
+			DocList dList = c.getDocList();
 			if (dList.size()<1){
 				rsp3 = runSearchIteration(req, rsp3, "content");
 			}
@@ -140,243 +120,220 @@ public class IterativeSearchRequestHandler extends SearchHandler {
 				return;
 			}
 		}
-		
 		rsp.setAllValues(rsp3.getValues());
-
 	}
 
-	
-
-
-
-public DocList filterResultsBySyntMatchReduceDocSet(DocList docList,
-		SolrQueryRequest req,  SolrParams params) {		
-	//if (!docList.hasScores()) 
-	//	return docList;
+	public DocList filterResultsBySyntMatchReduceDocSet(DocList docList,
+			SolrQueryRequest req,  SolrParams params) {
+		//if (!docList.hasScores())
+		//	return docList;
+
+		int len = docList.size();
+		if (len < 1) // do nothing
+			return docList;
+		ParserChunker2MatcherProcessor pos = ParserChunker2MatcherProcessor .getInstance();
+
+		DocIterator iter = docList.iterator();
+		float[] syntMatchScoreArr = new float[len];
+		String requestExpression = req.getParamString();
+		String[] exprParts = requestExpression.split("&");
+		for(String part: exprParts){
+			if (part.startsWith("q="))
+				requestExpression = part;
+		}
+		String fieldNameQuery = StringUtils.substringBetween(requestExpression, "=", ":");
+		// extract phrase query (in double-quotes)
+		String[] queryParts = requestExpression.split("\"");
+		if  (queryParts.length>=2 && queryParts[1].length()>5)
+			requestExpression = queryParts[1].replace('+', ' ');
+		else if (requestExpression.contains(":")) {// still field-based expression
+			requestExpression = requestExpression.replaceAll(fieldNameQuery+":", "").replace('+',' ').replaceAll("  ", " ").replace("q=", "");
+		}
 
-	int len = docList.size();
-	if (len < 1) // do nothing
-		return docList;
-	ParserChunker2MatcherProcessor pos = ParserChunker2MatcherProcessor .getInstance();
+		if (fieldNameQuery ==null)
+			return docList;
+		if (requestExpression==null || requestExpression.length()<5  || requestExpression.split(" ").length<3)
+			return docList;
+		int[] docIDsHits = new int[len];
 
-	DocIterator iter = docList.iterator();
-	float[] syntMatchScoreArr = new float[len];
-	String requestExpression = req.getParamString();
-	String[] exprParts = requestExpression.split("&");
-	for(String part: exprParts){
-		if (part.startsWith("q="))
-			requestExpression = part;			
-	}
-	String fieldNameQuery = StringUtils.substringBetween(requestExpression, "=", ":");
-	// extract phrase query (in double-quotes)
-	String[] queryParts = requestExpression.split("\"");
-	if  (queryParts.length>=2 && queryParts[1].length()>5)
-		requestExpression = queryParts[1].replace('+', ' ');	
-	else if (requestExpression.indexOf(":") > -1 ) {// still field-based expression
-		requestExpression = requestExpression.replaceAll(fieldNameQuery+":", "").replace('+',' ').replaceAll("  ", " ").replace("q=", "");
-	}
+		IndexReader indexReader = req.getSearcher().getIndexReader();
+		List<Integer> bestMatchesDocIds = new ArrayList<>(); List<Float> bestMatchesScore = new ArrayList<>();
+		List<Pair<Integer, Float>> docIdsScores = new ArrayList<> ();
+		try {
+			for (int i=0; i<docList.size(); ++i) {
+				int docId = iter.nextDoc();
+				docIDsHits[i] = docId;
+				Document doc = indexReader.document(docId);
+
+				// get text for event
+				String answerText = doc.get(fieldNameQuery);
+				if (answerText==null)
+					continue;
+				SentencePairMatchResult matchResult = pos.assessRelevance( requestExpression , answerText);
+				float syntMatchScore =  new Double(parseTreeChunkListScorer.getParseTreeChunkListScore(matchResult.getMatchResult())).floatValue();
+				bestMatchesDocIds.add(docId);
+				bestMatchesScore.add(syntMatchScore);
+				syntMatchScoreArr[i] = (float)syntMatchScore; //*iter.score();
+				System.out.println(" Matched query = '"+requestExpression + "' with answer = '"+answerText +"' | doc_id = '"+docId);
+				System.out.println(" Match result = '"+matchResult.getMatchResult() + "' with score = '"+syntMatchScore +"';" );
+				docIdsScores.add(new Pair(docId, syntMatchScore));
+			}
 
-	if (fieldNameQuery ==null)
-		return docList;
-	if (requestExpression==null || requestExpression.length()<5  || requestExpression.split(" ").length<3)
-		return docList;
-	int[] docIDsHits = new int[len]; 
-
-	IndexReader indexReader = req.getSearcher().getIndexReader();
-	List<Integer> bestMatchesDocIds = new ArrayList<Integer>(); List<Float> bestMatchesScore = new ArrayList<Float>();
-	List<Pair<Integer, Float>> docIdsScores = new ArrayList<Pair<Integer, Float>> ();
-	try {
-		for (int i=0; i<docList.size(); ++i) {
-			int docId = iter.nextDoc();
-			docIDsHits[i] = docId;
-			Document doc = indexReader.document(docId);
-
-			// get text for event
-			String answerText = doc.get(fieldNameQuery);
-			if (answerText==null)
-				continue;
-			SentencePairMatchResult matchResult = pos.assessRelevance( requestExpression , answerText);
-			float syntMatchScore =  new Double(parseTreeChunkListScorer.getParseTreeChunkListScore(matchResult.getMatchResult())).floatValue();
-			bestMatchesDocIds.add(docId);
-			bestMatchesScore.add(syntMatchScore);
-			syntMatchScoreArr[i] = (float)syntMatchScore; //*iter.score();
-			System.out.println(" Matched query = '"+requestExpression + "' with answer = '"+answerText +"' | doc_id = '"+docId);
-			System.out.println(" Match result = '"+matchResult.getMatchResult() + "' with score = '"+syntMatchScore +"';" );
-			docIdsScores.add(new Pair(docId, syntMatchScore));
+		} catch (CorruptIndexException e1) {
+			e1.printStackTrace();
+			//log.severe("Corrupt index"+e1);
+		} catch (IOException e1) {
+			e1.printStackTrace();
+			//log.severe("File read IO / index"+e1);
 		}
 
-	} catch (CorruptIndexException e1) {
-		// TODO Auto-generated catch block
-		e1.printStackTrace();
-		//log.severe("Corrupt index"+e1);
-	} catch (IOException e1) {
-		// TODO Auto-generated catch block
-		e1.printStackTrace();
-		//log.severe("File read IO / index"+e1);
-	}
-
 
-	Collections.sort(docIdsScores, new PairComparable());
-	for(int i = 0; i<docIdsScores.size(); i++){
-		bestMatchesDocIds.set(i, docIdsScores.get(i).getFirst());
-		bestMatchesScore.set(i, docIdsScores.get(i).getSecond());
+		docIdsScores.sort(new PairComparable());
+		for(int i = 0; i<docIdsScores.size(); i++){
+			bestMatchesDocIds.set(i, docIdsScores.get(i).getFirst());
+			bestMatchesScore.set(i, docIdsScores.get(i).getSecond());
+		}
+		System.out.println(bestMatchesScore);
+		float maxScore = docList.maxScore(); // do not change
+		int limit = docIdsScores.size();
+		int start = 0;
+		return new DocSlice(start, limit,
+				ArrayUtils.toPrimitive(bestMatchesDocIds.toArray(new Integer[0])),
+				ArrayUtils.toPrimitive(bestMatchesScore.toArray(new Float[0])),
+				bestMatchesDocIds.size(), maxScore, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
 	}
-	System.out.println(bestMatchesScore);
-	float maxScore = docList.maxScore(); // do not change
-	int limit = docIdsScores.size();
-	int start = 0; 
-	DocSlice ds = null;
-
-	ds = new DocSlice(start, limit, 
-			ArrayUtils.toPrimitive(bestMatchesDocIds.toArray(new Integer[0])), 
-			ArrayUtils.toPrimitive(bestMatchesScore.toArray(new Float[0])), 
-			bestMatchesDocIds.size(), maxScore);
 
 
+	public void handleRequestBody1(SolrQueryRequest req, SolrQueryResponse rsp)
+	throws Exception {
 
-	return ds;
-}
-
-
-public void handleRequestBody1(SolrQueryRequest req, SolrQueryResponse rsp)
-throws Exception {
-
-	// extract params from request
-	SolrParams params = req.getParams();
-	String q = params.get(CommonParams.Q);
-	String[] fqs = params.getParams(CommonParams.FQ);
-	int start = 0;
-	try { start = Integer.parseInt(params.get(CommonParams.START)); } 
-	catch (Exception e) { /* default */ }
-	int rows = 0;
-	try { rows = Integer.parseInt(params.get(CommonParams.ROWS)); } 
-	catch (Exception e) { /* default */ }
-	//SolrPluginUtils.setReturnFields(req, rsp);
-
-	// build initial data structures
-
-	SolrDocumentList results = new SolrDocumentList();
-	SolrIndexSearcher searcher = req.getSearcher();
-	Map<String,SchemaField> fields = req.getSchema().getFields();
-	int ndocs = start + rows;
-	Filter filter = buildFilter(fqs, req);
-	Set<Integer> alreadyFound = new HashSet<Integer>();
-
-	// invoke the various sub-handlers in turn and return results
-	doSearch1(results, searcher, q, filter, ndocs, req, 
-			fields, alreadyFound);
-
-	// ... more sub-handler calls here ...
-
-	// build and write response
-	float maxScore = 0.0F;
-	int numFound = 0;
-	List<SolrDocument> slice = new ArrayList<SolrDocument>();
-	for (Iterator<SolrDocument> it = results.iterator(); it.hasNext(); ) {
-		SolrDocument sdoc = it.next();
-		Float score = (Float) sdoc.getFieldValue("score");
-		if (maxScore < score) {
-			maxScore = score;
-		}
-		if (numFound >= start && numFound < start + rows) {
-			slice.add(sdoc);
+		// extract params from request
+		SolrParams params = req.getParams();
+		String q = params.get(CommonParams.Q);
+		String[] fqs = params.getParams(CommonParams.FQ);
+		int start = 0;
+		try { start = Integer.parseInt(params.get(CommonParams.START)); }
+		catch (Exception e) { /* default */ }
+		int rows = 0;
+		try { rows = Integer.parseInt(params.get(CommonParams.ROWS)); }
+		catch (Exception e) { /* default */ }
+		//SolrPluginUtils.setReturnFields(req, rsp);
+
+		// build initial data structures
+
+		SolrDocumentList results = new SolrDocumentList();
+		SolrIndexSearcher searcher = req.getSearcher();
+		Map<String,SchemaField> fields = req.getSchema().getFields();
+		int ndocs = start + rows;
+		Query filter = buildFilter(fqs, req);
+		Set<Integer> alreadyFound = new HashSet<>();
+
+		// invoke the various sub-handlers in turn and return results
+		doSearch1(results, searcher, q, filter, ndocs, req,
+				fields, alreadyFound);
+
+		// ... more sub-handler calls here ...
+
+		// build and write response
+		float maxScore = 0.0F;
+		int numFound = 0;
+		List<SolrDocument> slice = new ArrayList<SolrDocument>();
+		for (Iterator<SolrDocument> it = results.iterator(); it.hasNext(); ) {
+			SolrDocument sdoc = it.next();
+			Float score = (Float) sdoc.getFieldValue("score");
+			if (maxScore < score) {
+				maxScore = score;
+			}
+			if (numFound >= start && numFound < start + rows) {
+				slice.add(sdoc);
+			}
+			numFound++;
 		}
-		numFound++;
+		results.clear();
+		results.addAll(slice);
+		results.setNumFound(numFound);
+		results.setMaxScore(maxScore);
+		results.setStart(start);
+		rsp.add("response", results);
 	}
-	results.clear();
-	results.addAll(slice);
-	results.setNumFound(numFound);
-	results.setMaxScore(maxScore);
-	results.setStart(start);
-	rsp.add("response", results);
-
-}
-
 
-private Filter buildFilter(String[] fqs, SolrQueryRequest req) 
-throws IOException, ParseException {
-	if (fqs != null && fqs.length > 0) {
-		BooleanQuery fquery = new BooleanQuery();
-		for (int i = 0; i < fqs.length; i++) {
-			QParser parser = null;
-			try {
-				parser = QParser.getParser(fqs[i], null, req);
-			} catch (Exception e) {
-				// TODO Auto-generated catch block
-				e.printStackTrace();
-			}
-			try {
-				fquery.add(parser.getQuery(), Occur.MUST);
-			} catch (Exception e) {
-				// TODO Auto-generated catch block
-				e.printStackTrace();
+	private Query buildFilter(String[] fqs, SolrQueryRequest req)
+	throws IOException, ParseException {
+		if (fqs != null && fqs.length > 0) {
+			BooleanQuery.Builder fquery =  new BooleanQuery.Builder();
+			for (String fq : fqs) {
+				QParser parser;
+				try {
+					parser = QParser.getParser(fq, null, req);
+					fquery.add(parser.getQuery(), Occur.MUST);
+				} catch (Exception e) {
+					e.printStackTrace();
+				}
 			}
+			return fquery.build();
 		}
-		return new CachingWrapperFilter(new QueryWrapperFilter(fquery));
+		return null;
 	}
-	return null;
-}
 
-private void doSearch1(SolrDocumentList results,
-		SolrIndexSearcher searcher, String q, Filter filter, 
-		int ndocs, SolrQueryRequest req,
-		Map<String,SchemaField> fields, Set<Integer> alreadyFound) 
-throws IOException {
-
-	// build custom query and extra fields
-	Query query = null; //buildCustomQuery1(q);
-	Map<String,Object> extraFields = new HashMap<String,Object>();
-	extraFields.put("search_type", "search1");
-	boolean includeScore = 
-		req.getParams().get(CommonParams.FL).contains("score");
-
-	int  maxDocsPerSearcherType = 0;
-	float maprelScoreCutoff = 2.0f;
-	append(results, searcher.search(
-			query, filter, maxDocsPerSearcherType).scoreDocs,
-			alreadyFound, fields, extraFields, maprelScoreCutoff , 
-			searcher.getIndexReader(), includeScore);
-}
+	private void doSearch1(SolrDocumentList results,
+			SolrIndexSearcher searcher, String q, Query filter,
+			int ndocs, SolrQueryRequest req,
+			Map<String,SchemaField> fields, Set<Integer> alreadyFound)
+	throws IOException {
+
+		// build custom query and extra fields
+		Map<String,Object> extraFields = new HashMap<>();
+		extraFields.put("search_type", "search1");
+		boolean includeScore =
+			req.getParams().get(CommonParams.FL).contains("score");
+
+		int  maxDocsPerSearcherType = 0;
+		float maprelScoreCutoff = 2.0f;
+		append(results, searcher.search(
+				filter, maxDocsPerSearcherType).scoreDocs,
+				alreadyFound, fields, extraFields, maprelScoreCutoff ,
+				searcher.getIndexReader(), includeScore);
+	}
 
-// ... more doSearchXXX() calls here ...
+	// ... more doSearchXXX() calls here ...
 
-private void append(SolrDocumentList results, ScoreDoc[] more, 
-		Set<Integer> alreadyFound, Map<String,SchemaField> fields,
-		Map<String,Object> extraFields, float scoreCutoff, 
-		IndexReader reader, boolean includeScore) throws IOException {
-	for (ScoreDoc hit : more) {
-		if (alreadyFound.contains(hit.doc)) {
-			continue;
-		}
-		Document doc = reader.document(hit.doc);
-		SolrDocument sdoc = new SolrDocument();
-		for (String fieldname : fields.keySet()) {
-			SchemaField sf = fields.get(fieldname);
-			if (sf.stored()) {
-				sdoc.addField(fieldname, doc.get(fieldname));
+	private void append(SolrDocumentList results, ScoreDoc[] more,
+			Set<Integer> alreadyFound, Map<String,SchemaField> fields,
+			Map<String,Object> extraFields, float scoreCutoff,
+			IndexReader reader, boolean includeScore) throws IOException {
+		for (ScoreDoc hit : more) {
+			if (alreadyFound.contains(hit.doc)) {
+				continue;
 			}
+			Document doc = reader.document(hit.doc);
+			SolrDocument sdoc = new SolrDocument();
+			for (String fieldname : fields.keySet()) {
+				SchemaField sf = fields.get(fieldname);
+				if (sf.stored()) {
+					sdoc.addField(fieldname, doc.get(fieldname));
+				}
+			}
+			for (String extraField : extraFields.keySet()) {
+				sdoc.addField(extraField, extraFields.get(extraField));
+			}
+			if (includeScore) {
+				sdoc.addField("score", hit.score);
+			}
+			results.add(sdoc);
+			alreadyFound.add(hit.doc);
 		}
-		for (String extraField : extraFields.keySet()) {
-			sdoc.addField(extraField, extraFields.get(extraField));
-		}
-		if (includeScore) {
-			sdoc.addField("score", hit.score);
-		}
-		results.add(sdoc);
-		alreadyFound.add(hit.doc);
 	}
-}
-public class PairComparable implements Comparator<Pair> {
-	// @Override
-	public int compare(Pair o1, Pair o2) {
-		int b = -2;
-		if ( o1.getSecond() instanceof Float && o2.getSecond() instanceof Float){
-
-			b =  (((Float)o1.getSecond()> (Float)o2.getSecond()) ? -1
-					: (((Float)o1.getSecond() == (Float)o2.getSecond()) ? 0 : 1));
+	public class PairComparable implements Comparator<Pair> {
+		// @Override
+		public int compare(Pair o1, Pair o2) {
+			int b = -2;
+			if ( o1.getSecond() instanceof Float && o2.getSecond() instanceof Float){
+
+				b =  (((Float)o1.getSecond()> (Float)o2.getSecond()) ? -1
+						: (((Float)o1.getSecond() == (Float)o2.getSecond()) ? 0 : 1));
+			}
+			return b;
 		}
-		return b;
 	}
-}
 
 }
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/NLProgram2CodeRequestHandler.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/NLProgram2CodeRequestHandler.java
index 413dd5d..b866006 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/NLProgram2CodeRequestHandler.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/NLProgram2CodeRequestHandler.java
@@ -16,67 +16,27 @@
  */
 package opennlp.tools.similarity.apps.solr;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
 import java.util.logging.Logger;
 
 import opennlp.tools.nl2code.NL2Obj;
 import opennlp.tools.nl2code.NL2ObjCreateAssign;
 import opennlp.tools.nl2code.ObjectPhraseListForSentence;
-import opennlp.tools.similarity.apps.HitBase;
-import opennlp.tools.similarity.apps.utils.Pair;
-import opennlp.tools.textsimilarity.ParseTreeChunk;
 import opennlp.tools.textsimilarity.ParseTreeChunkListScorer;
-import opennlp.tools.textsimilarity.SentencePairMatchResult;
-import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
 
-import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.queryparser.classic.ParseException;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.CachingWrapperFilter;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryWrapperFilter;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.handler.component.SearchHandler;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
 
-
-
 public class NLProgram2CodeRequestHandler extends SearchHandler {
-	private static Logger LOG = Logger
-			.getLogger("opennlp.tools.similarity.apps.solr.NLProgram2CodeRequestHandler");
+	private static final Logger LOG =
+					Logger.getLogger("opennlp.tools.similarity.apps.solr.NLProgram2CodeRequestHandler");
 	private final static int MAX_SEARCH_RESULTS = 100;
-	private ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
-	private ParserChunker2MatcherProcessor sm = null;
-	private int MAX_QUERY_LENGTH_NOT_TO_RERANK=3;
-	private static String resourceDir = //"/home/solr/solr-4.4.0/example/src/test/resources";
+	private final ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
+	private final int MAX_QUERY_LENGTH_NOT_TO_RERANK = 3;
+	private static final String resourceDir = //"/home/solr/solr-4.4.0/example/src/test/resources";
 	"C:/workspace/TestSolr/src/test/resources";
-
 	//"/data1/solr/example/src/test/resources";
 	
 	NL2Obj compiler = new NL2ObjCreateAssign(resourceDir);
@@ -93,24 +53,21 @@ public class NLProgram2CodeRequestHandler extends SearchHandler {
 					text[count] = val;
 					count++;
 				}
+			}
 
+		StringBuilder buf = new StringBuilder();
+		for(String sent:text){
+			ObjectPhraseListForSentence opls=null;
+			try {
+				opls = compiler.convertSentenceToControlObjectPhrase(sent);
+			} catch (Exception e) {
+				e.printStackTrace();
 			}
-		
+			System.out.println(sent+"\n"+opls+"\n");
+			buf.append(sent).append("\n |=> ").append(opls).append("\n");
+		}
 
-			StringBuffer buf = new StringBuffer();
-		    for(String sent:text){
-		      ObjectPhraseListForSentence opls=null;
-		      try {
-		        opls = compiler.convertSentenceToControlObjectPhrase(sent);
-		      } catch (Exception e) {
-		        e.printStackTrace();
-		      }
-		      System.out.println(sent+"\n"+opls+"\n");
-		      buf.append(sent+"\n |=> "+opls+"\n");
-		    }
-		
-		
-		LOG.info("re-ranking results: "+buf.toString());
+		LOG.info("re-ranking results: " + buf);
 		NamedList<Object> values = rsp.getValues();
 		values.remove("response");
 		values.add("response", buf.toString().trim());
@@ -118,8 +75,6 @@ public class NLProgram2CodeRequestHandler extends SearchHandler {
 		
 	}
 
-	
-
 }
 
 /*
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/QueryExpansionRequestHandler.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/QueryExpansionRequestHandler.java
index e98e6be..df2232f 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/QueryExpansionRequestHandler.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/QueryExpansionRequestHandler.java
@@ -16,64 +16,11 @@
  */
 package opennlp.tools.similarity.apps.solr;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import opennlp.tools.similarity.apps.HitBaseComparable;
-import opennlp.tools.similarity.apps.utils.Pair;
-import opennlp.tools.textsimilarity.ParseTreeChunkListScorer;
-import opennlp.tools.textsimilarity.SentencePairMatchResult;
-import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.queryparser.classic.ParseException;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.CachingWrapperFilter;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryWrapperFilter;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.handler.component.ResponseBuilder;
-import org.apache.solr.handler.component.SearchComponent;
 import org.apache.solr.handler.component.SearchHandler;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.handler.component.ShardResponse;
 import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.ResultContext;
 import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.DocIterator;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.DocSlice;
-import org.apache.solr.search.QParser;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.SolrPluginUtils;
 
 public class QueryExpansionRequestHandler extends SearchHandler {
 
@@ -95,10 +42,10 @@ public class QueryExpansionRequestHandler extends SearchHandler {
 		System.out.println("query before ="+query);
 		query = query.replace(' ', '_');
 		System.out.println("query after ="+query);
-		NamedList values = params.toNamedList();
+		NamedList<Object> values = params.toNamedList();
 		values.remove("q");
 		values.add("q", query);
-		params = SolrParams.toSolrParams(values);
+		params = values.toSolrParams();
 		req.setParams(params);
 		return req;
 	}
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/SearchResultsReRankerRequestHandler.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/SearchResultsReRankerRequestHandler.java
index b259528..d3206b1 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/SearchResultsReRankerRequestHandler.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/SearchResultsReRankerRequestHandler.java
@@ -16,62 +16,33 @@
  */
 package opennlp.tools.similarity.apps.solr;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
-import java.util.Map;
-import java.util.Set;
 import java.util.logging.Logger;
 
 import opennlp.tools.similarity.apps.HitBase;
-import opennlp.tools.similarity.apps.utils.Pair;
 import opennlp.tools.textsimilarity.ParseTreeChunk;
 import opennlp.tools.textsimilarity.ParseTreeChunkListScorer;
 import opennlp.tools.textsimilarity.SentencePairMatchResult;
 import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
 
-import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.queryparser.classic.ParseException;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.CachingWrapperFilter;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryWrapperFilter;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.handler.component.SearchHandler;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
 
-
-
 public class SearchResultsReRankerRequestHandler extends SearchHandler {
-	private static Logger LOG = Logger
-			.getLogger("com.become.search.requestHandlers.SearchResultsReRankerRequestHandler");
+	private static final Logger LOG =
+					Logger.getLogger("com.become.search.requestHandlers.SearchResultsReRankerRequestHandler");
 	private final static int MAX_SEARCH_RESULTS = 100;
-	private ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
+	private final ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
 	private ParserChunker2MatcherProcessor sm = null;
-	private int MAX_QUERY_LENGTH_NOT_TO_RERANK=3;
-	private static String resourceDir = "/home/solr/solr-4.4.0/example/src/test/resources";
+	private static final String resourceDir = "/home/solr/solr-4.4.0/example/src/test/resources";
 	//"C:/workspace/TestSolr/src/test/resources";
 
 	//"/data1/solr/example/src/test/resources";
@@ -90,7 +61,7 @@ public class SearchResultsReRankerRequestHandler extends SearchHandler {
 
 		SolrParams ps = req.getOriginalParams();
 		Iterator<String> iter =  ps.getParameterNamesIterator();
-		List<String> keys = new ArrayList<String>();
+		List<String> keys = new ArrayList<>();
 		while(iter.hasNext()){
 			keys.add(iter.next());
 		}
@@ -101,9 +72,9 @@ public class SearchResultsReRankerRequestHandler extends SearchHandler {
 
 
 
-		for ( Integer i=0; i< MAX_SEARCH_RESULTS; i++){
-			String title = req.getParams().get("t"+i.toString());
-			String descr = req.getParams().get("d"+i.toString());
+		for (Integer i=0; i< MAX_SEARCH_RESULTS; i++){
+			String title = req.getParams().get("t"+i);
+			String descr = req.getParams().get("d"+i);
 
 			if(title==null || descr==null)
 				continue;
@@ -129,7 +100,7 @@ public class SearchResultsReRankerRequestHandler extends SearchHandler {
 					HitBase hit = new HitBase();
 					hit.setTitle("");
 					hit.setAbstractText(val);
-					hit.setSource(new Integer(count).toString());
+					hit.setSource(Integer.toString(count));
 					searchResults.add(hit);
 					count++;
 				}
@@ -148,23 +119,21 @@ public class SearchResultsReRankerRequestHandler extends SearchHandler {
 			reRankedResults = calculateMatchScoreResortHits(searchResults, query);
 		/*
 		 * <scores>
-<score index="2">3.0005</score>
-<score index="1">2.101</score>
-<score index="3">2.1003333333333334</score>
-<score index="4">2.00025</score>
-<score index="5">1.1002</score>
-</scores>
-		 * 
-		 * 
+					<score index="2">3.0005</score>
+					<score index="1">2.101</score>
+					<score index="3">2.1003333333333334</score>
+					<score index="4">2.00025</score>
+					<score index="5">1.1002</score>
+			 </scores>
 		 */
-		StringBuffer buf = new StringBuffer(); 
+		StringBuilder buf = new StringBuilder();
 		buf.append("<scores>");
 		for(HitBase hit: reRankedResults){
 			buf.append("<score index=\""+hit.getSource()+"\">"+hit.getGenerWithQueryScore()+"</score>");				
 		}
 		buf.append("</scores>");
 
-		NamedList<Object> scoreNum = new NamedList<Object>();
+		NamedList<Object> scoreNum = new NamedList<>();
 		for(HitBase hit: reRankedResults){
 			scoreNum.add(hit.getSource(), hit.getGenerWithQueryScore());				
 		}
@@ -176,7 +145,7 @@ public class SearchResultsReRankerRequestHandler extends SearchHandler {
 		}
 		bufNums.append("/order>");
 		
-		LOG.info("re-ranking results: "+buf.toString());
+		LOG.info("re-ranking results: "+ buf);
 		NamedList<Object> values = rsp.getValues();
 		values.remove("response");
 		values.add("response", scoreNum); 
@@ -194,6 +163,7 @@ public class SearchResultsReRankerRequestHandler extends SearchHandler {
 
 	private boolean tooFewKeywords(String query) {
 		String[] parts = query.split(" ");
+		int MAX_QUERY_LENGTH_NOT_TO_RERANK = 3;
 		if (parts!=null && parts.length< MAX_QUERY_LENGTH_NOT_TO_RERANK)
 			return true;
 
@@ -207,14 +177,13 @@ public class SearchResultsReRankerRequestHandler extends SearchHandler {
 		} catch (Exception e){
 			LOG.severe(e.getMessage());
 		}
-		List<HitBase> newHitList = new ArrayList<HitBase>();
-
+		List<HitBase> newHitList = new ArrayList<>();
 
 		int count=1;
 		for (HitBase hit : hits) {
 			String snapshot = hit.getAbstractText();
 			snapshot += " . " + hit.getTitle();
-			Double score = 0.0;
+			double score = 0.0;
 			try {
 				SentencePairMatchResult matchRes = sm.assessRelevance(snapshot,
 						searchQuery);
@@ -229,15 +198,16 @@ public class SearchResultsReRankerRequestHandler extends SearchHandler {
 			newHitList.add(hit);
 			count++;
 		}
-		Collections.sort(newHitList, new HitBaseComparable());
+		newHitList.sort(new HitBaseComparable());
 		LOG.info(newHitList.toString());
 
 		return newHitList;
 	}
 
 
-	public class HitBaseComparable implements Comparator<HitBase> {
-		// @Override
+	public static class HitBaseComparable implements Comparator<HitBase> {
+
+		@Override
 		public int compare(HitBase o1, HitBase o2) {
 			return (o1.getGenerWithQueryScore() > o2.getGenerWithQueryScore() ? -1
 					: (o1 == o2 ? 0 : 1));
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/SyntGenRequestHandler.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/SyntGenRequestHandler.java
index d2f4b1b..484ecb2 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/SyntGenRequestHandler.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/SyntGenRequestHandler.java
@@ -18,17 +18,14 @@ package opennlp.tools.similarity.apps.solr;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import opennlp.tools.similarity.apps.HitBaseComparable;
 import opennlp.tools.similarity.apps.utils.Pair;
 import opennlp.tools.textsimilarity.ParseTreeChunkListScorer;
 import opennlp.tools.textsimilarity.SentencePairMatchResult;
@@ -42,27 +39,15 @@ import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.queryparser.classic.ParseException;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.CachingWrapperFilter;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryWrapperFilter;
 import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TotalHits;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
-import org.apache.solr.handler.RequestHandlerBase;
-import org.apache.solr.handler.component.ResponseBuilder;
-import org.apache.solr.handler.component.SearchComponent;
 import org.apache.solr.handler.component.SearchHandler;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.handler.component.ShardResponse;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.ResultContext;
 import org.apache.solr.response.SolrQueryResponse;
@@ -72,61 +57,54 @@ import org.apache.solr.search.DocList;
 import org.apache.solr.search.DocSlice;
 import org.apache.solr.search.QParser;
 import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.SolrPluginUtils;
 
 public class SyntGenRequestHandler extends SearchHandler {
 
-	private ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
+	private final ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
 
 	public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp){
 		try {
 			super.handleRequestBody(req, rsp);
 		} catch (Exception e) {
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
-		
+
 		SolrParams reqValues = req.getOriginalParams();
 		Iterator<String> iter = reqValues.getParameterNamesIterator();
 		while(iter.hasNext()){
 			System.out.println(iter.next());
 		}
-		
-		String param = req.getParamString();
+
 		//modify rsp
-		NamedList values = rsp.getValues();
+		NamedList<Object> values = rsp.getValues();
 		ResultContext c = (ResultContext) values.get("response");
 		if (c==null)
 			return;
-		
+
 		String val1 = (String)values.get("t1");
 		String k1 = values.getName(0);
 		k1 = values.getName(1);
 		k1 = values.getName(2);
 		k1 = values.getName(3);
 		k1 = values.getName(4);
-		
-		DocList dList = c.docs;
-		DocList dListResult=null;
+
+		DocList dList = c.getDocList();
+		DocList dListResult;
 		try {
-			dListResult = filterResultsBySyntMatchReduceDocSet(dList,
-					req,  req.getParams());
+			dListResult = filterResultsBySyntMatchReduceDocSet(dList, req,  req.getParams());
 		} catch (Exception e) {
 			dListResult = dList;
-			// TODO Auto-generated catch block
 			e.printStackTrace();
 		}
-		c.docs = dListResult;
+		// c.docs = dListResult;
 		values.remove("response");
-		
+
 		rsp.setAllValues(values);
 	}
 
-
 	public DocList filterResultsBySyntMatchReduceDocSet(DocList docList,
-			SolrQueryRequest req,  SolrParams params) {		
-		//if (!docList.hasScores()) 
+			SolrQueryRequest req,  SolrParams params) {
+		//if (!docList.hasScores())
 		//	return docList;
 
 		int len = docList.size();
@@ -140,26 +118,26 @@ public class SyntGenRequestHandler extends SearchHandler {
 		String[] exprParts = requestExpression.split("&");
 		for(String part: exprParts){
 			if (part.startsWith("q="))
-				requestExpression = part;			
+				requestExpression = part;
 		}
 		String fieldNameQuery = StringUtils.substringBetween(requestExpression, "=", ":");
 		// extract phrase query (in double-quotes)
 		String[] queryParts = requestExpression.split("\"");
 		if  (queryParts.length>=2 && queryParts[1].length()>5)
-			requestExpression = queryParts[1].replace('+', ' ');	
-		else if (requestExpression.indexOf(":") > -1 ) {// still field-based expression
+			requestExpression = queryParts[1].replace('+', ' ');
+		else if (requestExpression.contains(":")) {// still field-based expression
 			requestExpression = requestExpression.replaceAll(fieldNameQuery+":", "").replace('+',' ').replaceAll("  ", " ").replace("q=", "");
 		}
-		
+
 		if (fieldNameQuery ==null)
 			return docList;
 		if (requestExpression==null || requestExpression.length()<5  || requestExpression.split(" ").length<3)
 			return docList;
-		int[] docIDsHits = new int[len]; 
+		int[] docIDsHits = new int[len];
 
 		IndexReader indexReader = req.getSearcher().getIndexReader();
-		List<Integer> bestMatchesDocIds = new ArrayList<Integer>(); List<Float> bestMatchesScore = new ArrayList<Float>();
-		List<Pair<Integer, Float>> docIdsScores = new ArrayList<Pair<Integer, Float>> ();
+		List<Integer> bestMatchesDocIds = new ArrayList<>(); List<Float> bestMatchesScore = new ArrayList<Float>();
+		List<Pair<Integer, Float>> docIdsScores = new ArrayList<> ();
 		try {
 			for (int i=0; i<docList.size(); ++i) {
 				int docId = iter.nextDoc();
@@ -171,27 +149,24 @@ public class SyntGenRequestHandler extends SearchHandler {
 				if (answerText==null)
 					continue;
 				SentencePairMatchResult matchResult = pos.assessRelevance( requestExpression , answerText);
-				float syntMatchScore =  new Double(parseTreeChunkListScorer.getParseTreeChunkListScore(matchResult.getMatchResult())).floatValue();
+				float syntMatchScore = new Double(parseTreeChunkListScorer.getParseTreeChunkListScore(matchResult.getMatchResult())).floatValue();
 				bestMatchesDocIds.add(docId);
 				bestMatchesScore.add(syntMatchScore);
-				syntMatchScoreArr[i] = (float)syntMatchScore; //*iter.score();
+				syntMatchScoreArr[i] = syntMatchScore; //*iter.score();
 				System.out.println(" Matched query = '"+requestExpression + "' with answer = '"+answerText +"' | doc_id = '"+docId);
 				System.out.println(" Match result = '"+matchResult.getMatchResult() + "' with score = '"+syntMatchScore +"';" );
-				docIdsScores.add(new Pair(docId, syntMatchScore));
+				docIdsScores.add(new Pair<>(docId, syntMatchScore));
 			}
 
 		} catch (CorruptIndexException e1) {
-			// TODO Auto-generated catch block
 			e1.printStackTrace();
 			//log.severe("Corrupt index"+e1);
 		} catch (IOException e1) {
-			// TODO Auto-generated catch block
 			e1.printStackTrace();
 			//log.severe("File read IO / index"+e1);
 		}
-		
-		
-		Collections.sort(docIdsScores, new PairComparable());
+
+		docIdsScores.sort(new PairComparable<>());
 		for(int i = 0; i<docIdsScores.size(); i++){
 			bestMatchesDocIds.set(i, docIdsScores.get(i).getFirst());
 			bestMatchesScore.set(i, docIdsScores.get(i).getSecond());
@@ -199,17 +174,11 @@ public class SyntGenRequestHandler extends SearchHandler {
 		System.out.println(bestMatchesScore);
 		float maxScore = docList.maxScore(); // do not change
 		int limit = docIdsScores.size();
-		int start = 0; 
-		DocSlice ds = null;
-
-		ds = new DocSlice(start, limit, 
-				ArrayUtils.toPrimitive(bestMatchesDocIds.toArray(new Integer[0])), 
-				ArrayUtils.toPrimitive(bestMatchesScore.toArray(new Float[0])), 
-				bestMatchesDocIds.size(), maxScore);
-
-
-
-		return ds;
+		int start = 0;
+		return new DocSlice(start, limit,
+				ArrayUtils.toPrimitive(bestMatchesDocIds.toArray(new Integer[0])),
+				ArrayUtils.toPrimitive(bestMatchesScore.toArray(new Float[0])),
+				bestMatchesDocIds.size(), maxScore, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
 	}
 
 
@@ -221,10 +190,10 @@ public class SyntGenRequestHandler extends SearchHandler {
 		String q = params.get(CommonParams.Q);
 		String[] fqs = params.getParams(CommonParams.FQ);
 		int start = 0;
-		try { start = Integer.parseInt(params.get(CommonParams.START)); } 
+		try { start = Integer.parseInt(params.get(CommonParams.START)); }
 		catch (Exception e) { /* default */ }
 		int rows = 0;
-		try { rows = Integer.parseInt(params.get(CommonParams.ROWS)); } 
+		try { rows = Integer.parseInt(params.get(CommonParams.ROWS)); }
 		catch (Exception e) { /* default */ }
 		//SolrPluginUtils.setReturnFields(req, rsp);
 
@@ -234,11 +203,11 @@ public class SyntGenRequestHandler extends SearchHandler {
 		SolrIndexSearcher searcher = req.getSearcher();
 		Map<String,SchemaField> fields = req.getSchema().getFields();
 		int ndocs = start + rows;
-		Filter filter = buildFilter(fqs, req);
-		Set<Integer> alreadyFound = new HashSet<Integer>();
+		Query filter = buildFilter(fqs, req);
+		Set<Integer> alreadyFound = new HashSet<>();
 
 		// invoke the various sub-handlers in turn and return results
-		doSearch1(results, searcher, q, filter, ndocs, req, 
+		doSearch1(results, searcher, q, filter, ndocs, req,
 				fields, alreadyFound);
 
 		// ... more sub-handler calls here ...
@@ -246,9 +215,8 @@ public class SyntGenRequestHandler extends SearchHandler {
 		// build and write response
 		float maxScore = 0.0F;
 		int numFound = 0;
-		List<SolrDocument> slice = new ArrayList<SolrDocument>();
-		for (Iterator<SolrDocument> it = results.iterator(); it.hasNext(); ) {
-			SolrDocument sdoc = it.next();
+		List<SolrDocument> slice = new ArrayList<>();
+		for (SolrDocument sdoc : results) {
 			Float score = (Float) sdoc.getFieldValue("score");
 			if (maxScore < score) {
 				maxScore = score;
@@ -268,39 +236,32 @@ public class SyntGenRequestHandler extends SearchHandler {
 	}
 
 
-	private Filter buildFilter(String[] fqs, SolrQueryRequest req) 
+	private Query buildFilter(String[] fqs, SolrQueryRequest req)
 	throws IOException, ParseException {
 		if (fqs != null && fqs.length > 0) {
-			BooleanQuery fquery = new BooleanQuery();
-			for (int i = 0; i < fqs.length; i++) {
-				QParser parser = null;
-				try {
-					parser = QParser.getParser(fqs[i], null, req);
-				} catch (Exception e) {
-					// TODO Auto-generated catch block
-					e.printStackTrace();
-				}
+			BooleanQuery.Builder fquery =  new BooleanQuery.Builder();
+			for (String fq : fqs) {
+				QParser parser;
 				try {
+					parser = QParser.getParser(fq, null, req);
 					fquery.add(parser.getQuery(), Occur.MUST);
 				} catch (Exception e) {
-					// TODO Auto-generated catch block
 					e.printStackTrace();
 				}
 			}
-			return new CachingWrapperFilter(new QueryWrapperFilter(fquery));
+			return fquery.build();
 		}
 		return null;
 	}
 
 	private void doSearch1(SolrDocumentList results,
-			SolrIndexSearcher searcher, String q, Filter filter, 
+			SolrIndexSearcher searcher, String q, Query filter,
 			int ndocs, SolrQueryRequest req,
 			Map<String,SchemaField> fields, Set<Integer> alreadyFound) 
 	throws IOException {
 
 		// build custom query and extra fields
-		Query query = null; //buildCustomQuery1(q);
-		Map<String,Object> extraFields = new HashMap<String,Object>();
+		Map<String,Object> extraFields = new HashMap<>();
 		extraFields.put("search_type", "search1");
 		boolean includeScore = 
 			req.getParams().get(CommonParams.FL).contains("score");
@@ -308,7 +269,7 @@ public class SyntGenRequestHandler extends SearchHandler {
 		int  maxDocsPerSearcherType = 0;
 		float maprelScoreCutoff = 2.0f;
 		append(results, searcher.search(
-				query, filter, maxDocsPerSearcherType).scoreDocs,
+				filter, maxDocsPerSearcherType).scoreDocs,
 				alreadyFound, fields, extraFields, maprelScoreCutoff , 
 				searcher.getIndexReader(), includeScore);
 	}
@@ -341,14 +302,13 @@ public class SyntGenRequestHandler extends SearchHandler {
 			alreadyFound.add(hit.doc);
 		}
 	}
-	public class PairComparable implements Comparator<Pair> {
-		// @Override
-		public int compare(Pair o1, Pair o2) {
+	public static class PairComparable<T1, T2> implements Comparator<Pair<T1, T2>> {
+
+		@Override
+		public int compare(Pair<T1, T2> o1, Pair<T1, T2> o2) {
 			int b = -2;
 			if ( o1.getSecond() instanceof Float && o2.getSecond() instanceof Float){
-
-				b =  (((Float)o1.getSecond()> (Float)o2.getSecond()) ? -1
-						: (((Float)o1.getSecond() == (Float)o2.getSecond()) ? 0 : 1));
+				b =  (((Float) o2.getSecond()).compareTo((Float) o1.getSecond()));
 			}
 			return b;
 		}
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/FileHandler.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/FileHandler.java
index d15cc17..6d5add1 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/FileHandler.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/FileHandler.java
@@ -66,24 +66,23 @@ public class FileHandler {
    * @param append
    * @throws Exception
    */
-  public void writeToTextFile(ArrayList<String> list, String filePath,
-      boolean append) throws Exception {
-    FileWriter outFile = null;
+  public void writeToTextFile(ArrayList<String> list, String filePath, boolean append) throws Exception {
+    FileWriter outFile;
     Iterator<String> it = list.iterator();
     if (!append) {
       outFile = new FileWriter(filePath);
       PrintWriter out = new PrintWriter(outFile);
       while (it.hasNext()) {
-        out.println((String) it.next());
+        out.println(it.next());
       }
       outFile.close();
     } else {
       int tmp = 0;
       while (it.hasNext()) {
         if (tmp == 0) {
-          appendtofile("\n" + (String) it.next(), filePath);
+          appendtofile("\n" + it.next(), filePath);
         } else {
-          appendtofile((String) it.next(), filePath);
+          appendtofile(it.next(), filePath);
         }
         tmp++;
       }
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/PageFetcher.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/PageFetcher.java
index 7f17f84..f3b4814 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/PageFetcher.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/PageFetcher.java
@@ -18,10 +18,8 @@
 package opennlp.tools.similarity.apps.utils;
 
 import java.io.BufferedReader;
-import java.io.File;
 import java.io.IOException;
 import java.io.InputStreamReader;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.logging.Logger;
@@ -34,11 +32,9 @@ import org.apache.tika.parser.ParseContext;
 import org.apache.tika.parser.Parser;
 import org.apache.tika.sax.BodyContentHandler;
 
-
 public class PageFetcher {
-  private static final Logger log = Logger
-      .getLogger("opennlp.tools.similarity.apps.utils.PageFetcher");
-  Tika tika = new Tika();
+  private static final Logger log = Logger.getLogger("opennlp.tools.similarity.apps.utils.PageFetcher");
+  private final Tika tika = new Tika();
 
   private static int DEFAULT_TIMEOUT = 1500;
   private void setTimeout(int to){
@@ -52,47 +48,37 @@ public class PageFetcher {
   public String fetchPageAutoDetectParser(final String url ){
 	  String fetchURL = addHttp(url);
 	  String pageContent = null;
-	    URLConnection connection;
-	    try {
-	      log.info("fetch url  auto detect parser " + url);
-	      connection = new URL(fetchURL).openConnection();
-	      connection.setReadTimeout(DEFAULT_TIMEOUT);
-	      
-	    //parse method parameters
-	      Parser parser = new AutoDetectParser();
-	      BodyContentHandler handler = new BodyContentHandler();
-	      Metadata metadata = new Metadata();
-	      ParseContext context = new ParseContext();
-	      
-	      //parsing the file
-	      parser.parse(connection.getInputStream(), handler, metadata, context);
-	      
-	      pageContent = handler.toString();
-	    } catch (Exception e) {
-	      log.info(e.getMessage() + "\n" + e);
-	    }
-	    return  pageContent;
+    URLConnection connection;
+    try {
+      connection = new URL(fetchURL).openConnection();
+      connection.setReadTimeout(DEFAULT_TIMEOUT);
+      // parse method parameters
+      Parser parser = new AutoDetectParser();
+      BodyContentHandler handler = new BodyContentHandler();
+      Metadata metadata = new Metadata();
+      ParseContext context = new ParseContext();
+
+      // parsing the file
+      parser.parse(connection.getInputStream(), handler, metadata, context);
+
+      pageContent = handler.toString();
+    } catch (Exception e) {
+      log.severe(e.getMessage() + "\n" + e);
+    }
+    return  pageContent;
   }
   
 
   public String fetchPage(final String url, final int timeout) {
     String fetchURL = addHttp(url);
-
-    log.info("fetch url " + fetchURL);
-
     String pageContent = null;
     URLConnection connection;
     try {
       connection = new URL(fetchURL).openConnection();
       connection.setReadTimeout(DEFAULT_TIMEOUT);
-      
       pageContent = tika.parseToString(connection.getInputStream())
           .replace('\n', ' ').replace('\t', ' ');
-    } catch (MalformedURLException e) {
-      log.severe(e.getMessage() + "\n" + e);
-    } catch (IOException e) {
-      log.severe(e.getMessage() + "\n" + e);
-    } catch (TikaException e) {
+    } catch (IOException | TikaException e) {
       log.severe(e.getMessage() + "\n" + e);
     }
     return pageContent;
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/Pair.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/Pair.java
index 0bae33e..f5f2d90 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/Pair.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/Pair.java
@@ -60,13 +60,12 @@ public class Pair<T1, T2> {
   }
   
   public class PairComparable implements Comparator<Pair<T1, T2>> {
-    // @Override
-    public int compare(Pair o1, Pair o2) {
+
+    @Override
+    public int compare(Pair<T1, T2> o1, Pair<T1, T2> o2) {
       int b = -2;
-      if ( o1.second instanceof Float && o2.second instanceof Float){
-        
-        b =  (((Float)o1.second > (Float)o2.second) ? -1
-          : (((Float)o1.second == (Float)o2.second) ? 0 : 1));
+      if ( o1.second instanceof Float && o2.second instanceof Float) {
+        b =  (((Float) o2.second).compareTo((Float) o1.second));
       }
       return b;
     }
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/StringDistanceMeasurer.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/StringDistanceMeasurer.java
index 377b02a..b61137f 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/StringDistanceMeasurer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/StringDistanceMeasurer.java
@@ -93,7 +93,7 @@ public class StringDistanceMeasurer {
   // main entry point. Gets two strings and applies string match
   // and also linguistic match if score > a threshold
   public double measureStringDistance(String str1, String str2) {
-    double result = (double) -1.0;
+    double result = -1.0;
     try {
       str1 = StringCleaner.processSnapshotForMatching(str1);
       str2 = StringCleaner.processSnapshotForMatching(str2);
@@ -143,7 +143,7 @@ public class StringDistanceMeasurer {
       result = Math.pow((double) (lOverlap * lOverlap) / (double) l1
           / (double) l2, 0.4);
       if (result > 1)
-        result = (double) 1.0;
+        result = 1.0;
 
       // double ld = LevensteinDistanceFinder. levensteinDistance(str1, str2, 1,
       // 10, 1, 10);
@@ -151,10 +151,10 @@ public class StringDistanceMeasurer {
 
     } catch (Exception e) {
       e.printStackTrace();
-      return (double) -1.0;
+      return -1.0;
     }
 
-    Double linguisticScore = (double) -1.0;
+    Double linguisticScore = -1.0;
     // to be developed - employs linguistic processor
     /*
      * if (result>MIN_SCORE_FOR_LING) { List<List<ParseTreeChunk>> matchResult =
@@ -169,7 +169,7 @@ public class StringDistanceMeasurer {
   }
 
   public double measureStringDistanceNoStemming(String str1, String str2) {
-    double result = (double) -1.0;
+    double result = -1.0;
     try {
       str1 = StringCleaner.processSnapshotForMatching(str1);
       str2 = StringCleaner.processSnapshotForMatching(str2);
@@ -219,7 +219,7 @@ public class StringDistanceMeasurer {
       result = Math.pow((double) (lOverlap * lOverlap) / (double) l1
           / (double) l2, 0.4);
       if (result > 1)
-        result = (double) 1.0;
+        result = 1.0;
 
       // double ld = LevensteinDistanceFinder. levensteinDistance(str1, str2, 1,
       // 10, 1, 10);
@@ -227,10 +227,10 @@ public class StringDistanceMeasurer {
 
     } catch (Exception e) {
       e.printStackTrace();
-      return (double) -1.0;
+      return -1.0;
     }
 
-    Double linguisticScore = (double) -1.0;
+    Double linguisticScore = -1.0;
     // to be developed - employs linguistic processor
     /*
      * if (result>MIN_SCORE_FOR_LING) { List<List<ParseTreeChunk>> matchResult =
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/Utils.java b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/Utils.java
index 18fc5f7..1f8c235 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/Utils.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/utils/Utils.java
@@ -22,7 +22,6 @@ import java.awt.geom.AffineTransform;
 import java.awt.image.BufferedImage;
 import java.io.File;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -41,7 +40,7 @@ public class Utils {
   private static final Logger LOG = Logger
       .getLogger("opennlp.tools.similarity.apps.utils.Utils");
 
-  protected static final ArrayList<String[]> characterMappings = new ArrayList<String[]>();
+  protected static final ArrayList<String[]> characterMappings = new ArrayList<>();
 
   static {
     characterMappings
@@ -193,7 +192,7 @@ public class Utils {
   }
 
   public static String stripNonAsciiChars(String s) {
-    StringBuffer b = new StringBuffer();
+    StringBuilder b = new StringBuilder();
     if (s != null) {
       for (int i = 0; i < s.length(); i++) {
         if (((int) s.charAt(i)) <= 256) {
@@ -229,10 +228,11 @@ public class Utils {
       this.value = i;
     }
 
-    public static class SortByValue implements Comparator {
-      public int compare(Object obj1, Object obj2) {
-        float i1 = ((KeyValue) obj1).value;
-        float i2 = ((KeyValue) obj2).value;
+    public static class SortByValue implements Comparator<KeyValue> {
+      @Override
+      public int compare(KeyValue obj1, KeyValue obj2) {
+        float i1 = obj1.value;
+        float i2 = obj2.value;
 
         if (i1 < i2)
           return 1;
@@ -280,7 +280,7 @@ public class Utils {
   }
 
   public static int computeEditDistance(String s, String t) {
-    int d[][]; // matrix
+    int[][] d; // matrix
     int n; // length of s
     int m; // length of t
     int i; // iterates through s
@@ -334,13 +334,13 @@ public class Utils {
       res.add(new KeyValue(o, h.get(o)));
     }
 
-    Collections.sort(res, new KeyValue.SortByValue());
+    res.sort(new KeyValue.SortByValue());
 
     return res;
   }
 
   public static String convertKeyValueToString(ArrayList<KeyValue> l) {
-    StringBuffer retVal = new StringBuffer();
+    StringBuilder retVal = new StringBuilder();
     for (KeyValue kv : l) {
       retVal.append(kv.key);
       retVal.append("-");
@@ -352,7 +352,7 @@ public class Utils {
   }
 
   public static String convertStringArrayToString(ArrayList<String> l) {
-    StringBuffer b = new StringBuffer();
+    StringBuilder b = new StringBuilder();
     for (String s : l) {
       b.append(s);
       b.append(", ");
@@ -362,7 +362,7 @@ public class Utils {
   }
 
   public static String convertStringArrayToPlainString(ArrayList<String> l) {
-    StringBuffer b = new StringBuffer();
+    StringBuilder b = new StringBuilder();
     for (String s : l) {
       b.append(s);
       b.append(" ");
@@ -420,7 +420,7 @@ public class Utils {
   }
 
   public static String convertHashMapToString(HashMap<String, Integer> m) {
-    StringBuffer s = new StringBuffer();
+    StringBuilder s = new StringBuilder();
     for (String x : m.keySet()) {
       s.append(x);
       s.append("-");
@@ -450,7 +450,7 @@ public class Utils {
   }
 
   public static String CleanCharacter(String txt, int uValue) {
-    StringBuffer retVal = new StringBuffer();
+    StringBuilder retVal = new StringBuilder();
     for (int i = 0; i < txt.length(); i++) {
       int uChar = (txt.charAt(i));
       if (uChar != uValue) {
@@ -486,16 +486,14 @@ public class Utils {
     Pattern p = java.util.regex.Pattern.compile("\\<SCRIPT.*?</SCRIPT>",
         Pattern.DOTALL | Pattern.CASE_INSENSITIVE);
     Matcher matcher = p.matcher(text);
-    String tmp = matcher.replaceAll("");
-    return tmp;
+    return matcher.replaceAll("");
   }
 
   public static String stripNoScriptTags(String text) {
     Pattern p = java.util.regex.Pattern.compile("\\<NOSCRIPT.*?</NOSCRIPT>",
         Pattern.DOTALL | Pattern.CASE_INSENSITIVE);
     Matcher matcher = p.matcher(text);
-    String tmp = matcher.replaceAll("");
-    return tmp;
+    return matcher.replaceAll("");
   }
 
   public static String stripHTMLMultiLine(String text,
@@ -531,15 +529,13 @@ public class Utils {
   public static String stripHTMLMultiLine(String text) {
     Pattern p = java.util.regex.Pattern.compile("\\<.*?>", Pattern.DOTALL);
     Matcher matcher = p.matcher(text);
-    String tmp = matcher.replaceAll("");
-    return tmp;
+    return matcher.replaceAll("");
   }
 
   public static String stripHTMLCommentsMultiLine(String text) {
     Pattern p = java.util.regex.Pattern.compile("\\<!--.*?-->", Pattern.DOTALL);
     Matcher matcher = p.matcher(text);
-    String tmp = matcher.replaceAll("");
-    return tmp;
+    return matcher.replaceAll("");
   }
 
   public static boolean isFlagSet(Integer flags, Integer flagToCheck) {
@@ -560,11 +556,10 @@ public class Utils {
 
   public static Integer setFlag(Integer flags, Integer flagToCheck) {
     if (flags == null) {
-      flags = new Integer(0);
+      flags = 0;
     }
     if (!isFlagSet(flags, flagToCheck)) {
       flags = flags + flagToCheck;
-      ;
     }
     return flags;
   }
@@ -572,7 +567,7 @@ public class Utils {
   public static Integer resetFlag(Integer flags, Integer flagToCheck) {
     if (flags == null) {
       // nothing to reset
-      flags = new Integer(0);
+      flags = 0;
       return flags;
     }
 
@@ -587,7 +582,7 @@ public class Utils {
     if (text.length() <= length) {
       retVal = text;
     } else {
-      StringBuffer b = new StringBuffer();
+      StringBuilder b = new StringBuilder();
       for (int i = 0; i < text.length(); i++) {
         if (b.length() >= length && Character.isWhitespace(text.charAt(i))) { // iterate
           // until
@@ -615,7 +610,7 @@ public class Utils {
   }
 
   public static String makeStringUrlSafe(String text) {
-    StringBuffer b = new StringBuffer();
+    StringBuilder b = new StringBuilder();
     for (int i = 0; i < text.length(); i++) {
       if (StringUtils.isAlphanumericSpace(String.valueOf(text.charAt(i)))) {
         b.append(text.charAt(i));
@@ -636,10 +631,10 @@ public class Utils {
     return eventId;
   }
 
-  public static String buildCommaSeparatedIds(List ids) {
+  public static String buildCommaSeparatedIds(List<?> ids) {
 
     if (ids != null && ids.size() > 0) {
-      StringBuffer sbuf = new StringBuffer();
+      StringBuilder sbuf = new StringBuilder();
 
       for (int count = 0; count < ids.size(); count++) {
         if (count > 0) {
@@ -680,8 +675,7 @@ public class Utils {
     Pattern p = java.util.regex.Pattern.compile("\\<STYLE.*?</STYLE>",
         Pattern.DOTALL | Pattern.CASE_INSENSITIVE);
     Matcher matcher = p.matcher(text);
-    String tmp = matcher.replaceAll("");
-    return tmp;
+    return matcher.replaceAll("");
   }
 
   public static boolean isLatinWord(String word) {
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/stemmer/PStemmer.java b/opennlp-similarity/src/main/java/opennlp/tools/stemmer/PStemmer.java
index ef1e4b5..85a0eef 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/stemmer/PStemmer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/stemmer/PStemmer.java
@@ -17,505 +17,501 @@
 
 package opennlp.tools.stemmer;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.FileInputStream;
 
-	import java.io.IOException;
-	import java.io.InputStream;
-	import java.io.FileInputStream;
+import org.apache.lucene.util.ArrayUtil;
+
+/**
+ * Stemmer, implementing the Porter Stemming Algorithm
+ *
+ * The Stemmer class transforms a word into its root form.  The input
+ * word can be provided a character at time (by calling add()), or at once
+ * by calling one of the various stem(something) methods.
+ */
+public class PStemmer
+{
+	private char[] b;
+	private int i,    /* offset into b */
+		j, k, k0;
+	private boolean dirty = false;
+	private static final int INITIAL_SIZE = 50;
+
+	public PStemmer() {
+		b = new char[INITIAL_SIZE];
+		i = 0;
+	}
+
+	/**
+	 * reset() resets the stemmer so it can stem another word.  If you invoke
+	 * the stemmer by calling add(char) and then stem(), you must call reset()
+	 * before starting another word.
+	 */
+	public void reset() { i = 0; dirty = false; }
+
+	/**
+	 * Add a character to the word being stemmed.  When you are finished
+	 * adding characters, you can call stem(void) to process the word.
+	 */
+	public void add(char ch) {
+		if (b.length <= i) {
+			b = ArrayUtil.grow(b, i+1);
+		}
+		b[i++] = ch;
+	}
+
+	/**
+	 * After a word has been stemmed, it can be retrieved by toString(),
+	 * or a reference to the internal buffer can be retrieved by getResultBuffer
+	 * and getResultLength (which is generally more efficient.)
+	 */
+	@Override
+	public String toString() { return new String(b,0,i); }
+
+	/**
+	 * Returns the length of the word resulting from the stemming process.
+	 */
+	public int getResultLength() { return i; }
+
+	/**
+	 * Returns a reference to a character buffer containing the results of
+	 * the stemming process.  You also need to consult getResultLength()
+	 * to determine the length of the result.
+	 */
+	public char[] getResultBuffer() { return b; }
+
+	/* cons(i) is true <=> b[i] is a consonant. */
+
+	private final boolean cons(int i) {
+		switch (b[i]) {
+		case 'a': case 'e': case 'i': case 'o': case 'u':
+			return false;
+		case 'y':
+			return (i==k0) ? true : !cons(i-1);
+		default:
+			return true;
+		}
+	}
+
+	/* m() measures the number of consonant sequences between k0 and j. if c is
+		 a consonant sequence and v a vowel sequence, and <..> indicates arbitrary
+		 presence,
+
+					<c><v>       gives 0
+					<c>vc<v>     gives 1
+					<c>vcvc<v>   gives 2
+					<c>vcvcvc<v> gives 3
+					....
+	*/
+
+	private final int m() {
+		int n = 0;
+		int i = k0;
+		while(true) {
+			if (i > j)
+				return n;
+			if (! cons(i))
+				break;
+			i++;
+		}
+		i++;
+		while(true) {
+			while(true) {
+				if (i > j)
+					return n;
+				if (cons(i))
+					break;
+				i++;
+			}
+			i++;
+			n++;
+			while(true) {
+				if (i > j)
+					return n;
+				if (! cons(i))
+					break;
+				i++;
+			}
+			i++;
+		}
+	}
+
+	/* vowelinstem() is true <=> k0,...j contains a vowel */
+
+	private final boolean vowelinstem() {
+		int i;
+		for (i = k0; i <= j; i++)
+			if (! cons(i))
+				return true;
+		return false;
+	}
+
+	/* doublec(j) is true <=> j,(j-1) contain a double consonant. */
+
+	private final boolean doublec(int j) {
+		if (j < k0+1)
+			return false;
+		if (b[j] != b[j-1])
+			return false;
+		return cons(j);
+	}
+
+	/* cvc(i) is true <=> i-2,i-1,i has the form consonant - vowel - consonant
+		 and also if the second c is not w,x or y. this is used when trying to
+		 restore an e at the end of a short word. e.g.
+
+					cav(e), lov(e), hop(e), crim(e), but
+					snow, box, tray.
+
+	*/
+
+	private final boolean cvc(int i) {
+		if (i < k0+2 || !cons(i) || cons(i-1) || !cons(i-2))
+			return false;
+		else {
+			int ch = b[i];
+			if (ch == 'w' || ch == 'x' || ch == 'y') return false;
+		}
+		return true;
+	}
+
+	private final boolean ends(String s) {
+		int l = s.length();
+		int o = k-l+1;
+		if (o < k0)
+			return false;
+		for (int i = 0; i < l; i++)
+			if (b[o+i] != s.charAt(i))
+				return false;
+		j = k-l;
+		return true;
+	}
+
+	/* setto(s) sets (j+1),...k to the characters in the string s, readjusting
+		 k. */
+
+	void setto(String s) {
+		int l = s.length();
+		int o = j+1;
+		for (int i = 0; i < l; i++)
+			b[o+i] = s.charAt(i);
+		k = j+l;
+		dirty = true;
+	}
+
+	/* r(s) is used further down. */
+
+	void r(String s) { if (m() > 0) setto(s); }
+
+	/* step1() gets rid of plurals and -ed or -ing. e.g.
+
+					 caresses  ->  caress
+					 ponies    ->  poni
+					 ties      ->  ti
+					 caress    ->  caress
+					 cats      ->  cat
+
+					 feed      ->  feed
+					 agreed    ->  agree
+					 disabled  ->  disable
+
+					 matting   ->  mat
+					 mating    ->  mate
+					 meeting   ->  meet
+					 milling   ->  mill
+					 messing   ->  mess
+
+					 meetings  ->  meet
+
+	*/
+
+	private final void step1() {
+		if (b[k] == 's') {
+			if (ends("sses")) k -= 2;
+			else if (ends("ies")) setto("i");
+			else if (b[k-1] != 's') k--;
+		}
+		if (ends("eed")) {
+			if (m() > 0)
+				k--;
+		}
+		else if ((ends("ed") || ends("ing")) && vowelinstem()) {
+			k = j;
+			if (ends("at")) setto("ate");
+			else if (ends("bl")) setto("ble");
+			else if (ends("iz")) setto("ize");
+			else if (doublec(k)) {
+				int ch = b[k--];
+				if (ch == 'l' || ch == 's' || ch == 'z')
+					k++;
+			}
+			else if (m() == 1 && cvc(k))
+				setto("e");
+		}
+	}
+
+	/* step2() turns terminal y to i when there is another vowel in the stem. */
+
+	private final void step2() {
+		if (ends("y") && vowelinstem()) {
+			b[k] = 'i';
+			dirty = true;
+		}
+	}
+
+	/* step3() maps double suffices to single ones. so -ization ( = -ize plus
+		 -ation) maps to -ize etc. note that the string before the suffix must give
+		 m() > 0. */
+
+	private final void step3() {
+		if (k == k0) return; /* For Bug 1 */
+		switch (b[k-1]) {
+		case 'a':
+			if (ends("ational")) { r("ate"); break; }
+			if (ends("tional")) { r("tion"); break; }
+			break;
+		case 'c':
+			if (ends("enci")) { r("ence"); break; }
+			if (ends("anci")) { r("ance"); break; }
+			break;
+		case 'e':
+			if (ends("izer")) { r("ize"); break; }
+			break;
+		case 'l':
+			if (ends("bli")) { r("ble"); break; }
+			if (ends("alli")) { r("al"); break; }
+			if (ends("entli")) { r("ent"); break; }
+			if (ends("eli")) { r("e"); break; }
+			if (ends("ousli")) { r("ous"); break; }
+			break;
+		case 'o':
+			if (ends("ization")) { r("ize"); break; }
+			if (ends("ation")) { r("ate"); break; }
+			if (ends("ator")) { r("ate"); break; }
+			break;
+		case 's':
+			if (ends("alism")) { r("al"); break; }
+			if (ends("iveness")) { r("ive"); break; }
+			if (ends("fulness")) { r("ful"); break; }
+			if (ends("ousness")) { r("ous"); break; }
+			break;
+		case 't':
+			if (ends("aliti")) { r("al"); break; }
+			if (ends("iviti")) { r("ive"); break; }
+			if (ends("biliti")) { r("ble"); break; }
+			break;
+		case 'g':
+			if (ends("logi")) { r("log"); break; }
+		}
+	}
+
+	/* step4() deals with -ic-, -full, -ness etc. similar strategy to step3. */
+
+	private final void step4() {
+		switch (b[k]) {
+		case 'e':
+			if (ends("icate")) { r("ic"); break; }
+			if (ends("ative")) { r(""); break; }
+			if (ends("alize")) { r("al"); break; }
+			break;
+		case 'i':
+			if (ends("iciti")) { r("ic"); break; }
+			break;
+		case 'l':
+			if (ends("ical")) { r("ic"); break; }
+			if (ends("ful")) { r(""); break; }
+			break;
+		case 's':
+			if (ends("ness")) { r(""); break; }
+			break;
+		}
+	}
+
+	/* step5() takes off -ant, -ence etc., in context <c>vcvc<v>. */
+
+	private final void step5() {
+		if (k == k0) return; /* for Bug 1 */
+		switch (b[k-1]) {
+		case 'a':
+			if (ends("al")) break;
+			return;
+		case 'c':
+			if (ends("ance")) break;
+			if (ends("ence")) break;
+			return;
+		case 'e':
+			if (ends("er")) break; return;
+		case 'i':
+			if (ends("ic")) break; return;
+		case 'l':
+			if (ends("able")) break;
+			if (ends("ible")) break; return;
+		case 'n':
+			if (ends("ant")) break;
+			if (ends("ement")) break;
+			if (ends("ment")) break;
+			/* element etc. not stripped before the m */
+			if (ends("ent")) break;
+			return;
+		case 'o':
+			if (ends("ion") && j >= 0 && (b[j] == 's' || b[j] == 't')) break;
+			/* j >= 0 fixes Bug 2 */
+			if (ends("ou")) break;
+			return;
+			/* takes care of -ous */
+		case 's':
+			if (ends("ism")) break;
+			return;
+		case 't':
+			if (ends("ate")) break;
+			if (ends("iti")) break;
+			return;
+		case 'u':
+			if (ends("ous")) break;
+			return;
+		case 'v':
+			if (ends("ive")) break;
+			return;
+		case 'z':
+			if (ends("ize")) break;
+			return;
+		default:
+			return;
+		}
+		if (m() > 1)
+			k = j;
+	}
+
+	/* step6() removes a final -e if m() > 1. */
+
+	private final void step6() {
+		j = k;
+		if (b[k] == 'e') {
+			int a = m();
+			if (a > 1 || a == 1 && !cvc(k-1))
+				k--;
+		}
+		if (b[k] == 'l' && doublec(k) && m() > 1)
+			k--;
+	}
 
-	import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_CHAR;
-	import org.apache.lucene.util.ArrayUtil;
 
 	/**
-	 *
-	 * Stemmer, implementing the Porter Stemming Algorithm
-	 *
-	 * The Stemmer class transforms a word into its root form.  The input
-	 * word can be provided a character at time (by calling add()), or at once
-	 * by calling one of the various stem(something) methods.
+	 * Stem a word provided as a String.  Returns the result as a String.
 	 */
+	public String stem(String s) {
+		if (stem(s.toCharArray(), s.length()))
+			return toString();
+		else
+			return s;
+	}
+
+	/** Stem a word contained in a char[].  Returns true if the stemming process
+	 * resulted in a word different from the input.  You can retrieve the
+	 * result with getResultLength()/getResultBuffer() or toString().
+	 */
+	public boolean stem(char[] word) {
+		return stem(word, word.length);
+	}
 
-	public class PStemmer
-	{
-	  private char[] b;
-	  private int i,    /* offset into b */
-	    j, k, k0;
-	  private boolean dirty = false;
-	  private static final int INITIAL_SIZE = 50;
-
-	  public PStemmer() {
-	    b = new char[INITIAL_SIZE];
-	    i = 0;
-	  }
-
-	  /**
-	   * reset() resets the stemmer so it can stem another word.  If you invoke
-	   * the stemmer by calling add(char) and then stem(), you must call reset()
-	   * before starting another word.
-	   */
-	  public void reset() { i = 0; dirty = false; }
-
-	  /**
-	   * Add a character to the word being stemmed.  When you are finished
-	   * adding characters, you can call stem(void) to process the word.
-	   */
-	  public void add(char ch) {
-	    if (b.length <= i) {
-	      b = ArrayUtil.grow(b, i+1);
-	    }
-	    b[i++] = ch;
-	  }
-
-	  /**
-	   * After a word has been stemmed, it can be retrieved by toString(),
-	   * or a reference to the internal buffer can be retrieved by getResultBuffer
-	   * and getResultLength (which is generally more efficient.)
-	   */
-	  @Override
-	  public String toString() { return new String(b,0,i); }
-
-	  /**
-	   * Returns the length of the word resulting from the stemming process.
-	   */
-	  public int getResultLength() { return i; }
-
-	  /**
-	   * Returns a reference to a character buffer containing the results of
-	   * the stemming process.  You also need to consult getResultLength()
-	   * to determine the length of the result.
-	   */
-	  public char[] getResultBuffer() { return b; }
-
-	  /* cons(i) is true <=> b[i] is a consonant. */
-
-	  private final boolean cons(int i) {
-	    switch (b[i]) {
-	    case 'a': case 'e': case 'i': case 'o': case 'u':
-	      return false;
-	    case 'y':
-	      return (i==k0) ? true : !cons(i-1);
-	    default:
-	      return true;
-	    }
-	  }
-
-	  /* m() measures the number of consonant sequences between k0 and j. if c is
-	     a consonant sequence and v a vowel sequence, and <..> indicates arbitrary
-	     presence,
-
-	          <c><v>       gives 0
-	          <c>vc<v>     gives 1
-	          <c>vcvc<v>   gives 2
-	          <c>vcvcvc<v> gives 3
-	          ....
-	  */
-
-	  private final int m() {
-	    int n = 0;
-	    int i = k0;
-	    while(true) {
-	      if (i > j)
-	        return n;
-	      if (! cons(i))
-	        break;
-	      i++;
-	    }
-	    i++;
-	    while(true) {
-	      while(true) {
-	        if (i > j)
-	          return n;
-	        if (cons(i))
-	          break;
-	        i++;
-	      }
-	      i++;
-	      n++;
-	      while(true) {
-	        if (i > j)
-	          return n;
-	        if (! cons(i))
-	          break;
-	        i++;
-	      }
-	      i++;
-	    }
-	  }
-
-	  /* vowelinstem() is true <=> k0,...j contains a vowel */
-
-	  private final boolean vowelinstem() {
-	    int i;
-	    for (i = k0; i <= j; i++)
-	      if (! cons(i))
-	        return true;
-	    return false;
-	  }
-
-	  /* doublec(j) is true <=> j,(j-1) contain a double consonant. */
-
-	  private final boolean doublec(int j) {
-	    if (j < k0+1)
-	      return false;
-	    if (b[j] != b[j-1])
-	      return false;
-	    return cons(j);
-	  }
-
-	  /* cvc(i) is true <=> i-2,i-1,i has the form consonant - vowel - consonant
-	     and also if the second c is not w,x or y. this is used when trying to
-	     restore an e at the end of a short word. e.g.
-
-	          cav(e), lov(e), hop(e), crim(e), but
-	          snow, box, tray.
-
-	  */
-
-	  private final boolean cvc(int i) {
-	    if (i < k0+2 || !cons(i) || cons(i-1) || !cons(i-2))
-	      return false;
-	    else {
-	      int ch = b[i];
-	      if (ch == 'w' || ch == 'x' || ch == 'y') return false;
-	    }
-	    return true;
-	  }
-
-	  private final boolean ends(String s) {
-	    int l = s.length();
-	    int o = k-l+1;
-	    if (o < k0)
-	      return false;
-	    for (int i = 0; i < l; i++)
-	      if (b[o+i] != s.charAt(i))
-	        return false;
-	    j = k-l;
-	    return true;
-	  }
-
-	  /* setto(s) sets (j+1),...k to the characters in the string s, readjusting
-	     k. */
-
-	  void setto(String s) {
-	    int l = s.length();
-	    int o = j+1;
-	    for (int i = 0; i < l; i++)
-	      b[o+i] = s.charAt(i);
-	    k = j+l;
-	    dirty = true;
-	  }
-
-	  /* r(s) is used further down. */
-
-	  void r(String s) { if (m() > 0) setto(s); }
-
-	  /* step1() gets rid of plurals and -ed or -ing. e.g.
-
-	           caresses  ->  caress
-	           ponies    ->  poni
-	           ties      ->  ti
-	           caress    ->  caress
-	           cats      ->  cat
-
-	           feed      ->  feed
-	           agreed    ->  agree
-	           disabled  ->  disable
-
-	           matting   ->  mat
-	           mating    ->  mate
-	           meeting   ->  meet
-	           milling   ->  mill
-	           messing   ->  mess
-
-	           meetings  ->  meet
-
-	  */
-
-	  private final void step1() {
-	    if (b[k] == 's') {
-	      if (ends("sses")) k -= 2;
-	      else if (ends("ies")) setto("i");
-	      else if (b[k-1] != 's') k--;
-	    }
-	    if (ends("eed")) {
-	      if (m() > 0)
-	        k--;
-	    }
-	    else if ((ends("ed") || ends("ing")) && vowelinstem()) {
-	      k = j;
-	      if (ends("at")) setto("ate");
-	      else if (ends("bl")) setto("ble");
-	      else if (ends("iz")) setto("ize");
-	      else if (doublec(k)) {
-	        int ch = b[k--];
-	        if (ch == 'l' || ch == 's' || ch == 'z')
-	          k++;
-	      }
-	      else if (m() == 1 && cvc(k))
-	        setto("e");
-	    }
-	  }
-
-	  /* step2() turns terminal y to i when there is another vowel in the stem. */
-
-	  private final void step2() {
-	    if (ends("y") && vowelinstem()) {
-	      b[k] = 'i';
-	      dirty = true;
-	    }
-	  }
-
-	  /* step3() maps double suffices to single ones. so -ization ( = -ize plus
-	     -ation) maps to -ize etc. note that the string before the suffix must give
-	     m() > 0. */
-
-	  private final void step3() {
-	    if (k == k0) return; /* For Bug 1 */
-	    switch (b[k-1]) {
-	    case 'a':
-	      if (ends("ational")) { r("ate"); break; }
-	      if (ends("tional")) { r("tion"); break; }
-	      break;
-	    case 'c':
-	      if (ends("enci")) { r("ence"); break; }
-	      if (ends("anci")) { r("ance"); break; }
-	      break;
-	    case 'e':
-	      if (ends("izer")) { r("ize"); break; }
-	      break;
-	    case 'l':
-	      if (ends("bli")) { r("ble"); break; }
-	      if (ends("alli")) { r("al"); break; }
-	      if (ends("entli")) { r("ent"); break; }
-	      if (ends("eli")) { r("e"); break; }
-	      if (ends("ousli")) { r("ous"); break; }
-	      break;
-	    case 'o':
-	      if (ends("ization")) { r("ize"); break; }
-	      if (ends("ation")) { r("ate"); break; }
-	      if (ends("ator")) { r("ate"); break; }
-	      break;
-	    case 's':
-	      if (ends("alism")) { r("al"); break; }
-	      if (ends("iveness")) { r("ive"); break; }
-	      if (ends("fulness")) { r("ful"); break; }
-	      if (ends("ousness")) { r("ous"); break; }
-	      break;
-	    case 't':
-	      if (ends("aliti")) { r("al"); break; }
-	      if (ends("iviti")) { r("ive"); break; }
-	      if (ends("biliti")) { r("ble"); break; }
-	      break;
-	    case 'g':
-	      if (ends("logi")) { r("log"); break; }
-	    }
-	  }
-
-	  /* step4() deals with -ic-, -full, -ness etc. similar strategy to step3. */
-
-	  private final void step4() {
-	    switch (b[k]) {
-	    case 'e':
-	      if (ends("icate")) { r("ic"); break; }
-	      if (ends("ative")) { r(""); break; }
-	      if (ends("alize")) { r("al"); break; }
-	      break;
-	    case 'i':
-	      if (ends("iciti")) { r("ic"); break; }
-	      break;
-	    case 'l':
-	      if (ends("ical")) { r("ic"); break; }
-	      if (ends("ful")) { r(""); break; }
-	      break;
-	    case 's':
-	      if (ends("ness")) { r(""); break; }
-	      break;
-	    }
-	  }
-
-	  /* step5() takes off -ant, -ence etc., in context <c>vcvc<v>. */
-
-	  private final void step5() {
-	    if (k == k0) return; /* for Bug 1 */
-	    switch (b[k-1]) {
-	    case 'a':
-	      if (ends("al")) break;
-	      return;
-	    case 'c':
-	      if (ends("ance")) break;
-	      if (ends("ence")) break;
-	      return;
-	    case 'e':
-	      if (ends("er")) break; return;
-	    case 'i':
-	      if (ends("ic")) break; return;
-	    case 'l':
-	      if (ends("able")) break;
-	      if (ends("ible")) break; return;
-	    case 'n':
-	      if (ends("ant")) break;
-	      if (ends("ement")) break;
-	      if (ends("ment")) break;
-	      /* element etc. not stripped before the m */
-	      if (ends("ent")) break;
-	      return;
-	    case 'o':
-	      if (ends("ion") && j >= 0 && (b[j] == 's' || b[j] == 't')) break;
-	      /* j >= 0 fixes Bug 2 */
-	      if (ends("ou")) break;
-	      return;
-	      /* takes care of -ous */
-	    case 's':
-	      if (ends("ism")) break;
-	      return;
-	    case 't':
-	      if (ends("ate")) break;
-	      if (ends("iti")) break;
-	      return;
-	    case 'u':
-	      if (ends("ous")) break;
-	      return;
-	    case 'v':
-	      if (ends("ive")) break;
-	      return;
-	    case 'z':
-	      if (ends("ize")) break;
-	      return;
-	    default:
-	      return;
-	    }
-	    if (m() > 1)
-	      k = j;
-	  }
-
-	  /* step6() removes a final -e if m() > 1. */
-
-	  private final void step6() {
-	    j = k;
-	    if (b[k] == 'e') {
-	      int a = m();
-	      if (a > 1 || a == 1 && !cvc(k-1))
-	        k--;
-	    }
-	    if (b[k] == 'l' && doublec(k) && m() > 1)
-	      k--;
-	  }
-
-
-	  /**
-	   * Stem a word provided as a String.  Returns the result as a String.
-	   */
-	  public String stem(String s) {
-	    if (stem(s.toCharArray(), s.length()))
-	      return toString();
-	    else
-	      return s;
-	  }
-
-	  /** Stem a word contained in a char[].  Returns true if the stemming process
-	   * resulted in a word different from the input.  You can retrieve the
-	   * result with getResultLength()/getResultBuffer() or toString().
-	   */
-	  public boolean stem(char[] word) {
-	    return stem(word, word.length);
-	  }
-
-	  /** Stem a word contained in a portion of a char[] array.  Returns
-	   * true if the stemming process resulted in a word different from
-	   * the input.  You can retrieve the result with
-	   * getResultLength()/getResultBuffer() or toString().
-	   */
-	  public boolean stem(char[] wordBuffer, int offset, int wordLen) {
-	    reset();
-	    if (b.length < wordLen) {
-	      b = new char[ArrayUtil.oversize(wordLen, NUM_BYTES_CHAR)];
-	    }
-	    System.arraycopy(wordBuffer, offset, b, 0, wordLen);
-	    i = wordLen;
-	    return stem(0);
-	  }
-
-	  /** Stem a word contained in a leading portion of a char[] array.
-	   * Returns true if the stemming process resulted in a word different
-	   * from the input.  You can retrieve the result with
-	   * getResultLength()/getResultBuffer() or toString().
-	   */
-	  public boolean stem(char[] word, int wordLen) {
-	    return stem(word, 0, wordLen);
-	  }
-
-	  /** Stem the word placed into the Stemmer buffer through calls to add().
-	   * Returns true if the stemming process resulted in a word different
-	   * from the input.  You can retrieve the result with
-	   * getResultLength()/getResultBuffer() or toString().
-	   */
-	  public boolean stem() {
-	    return stem(0);
-	  }
-
-	  public boolean stem(int i0) {
-	    k = i - 1;
-	    k0 = i0;
-	    if (k > k0+1) {
-	      step1(); step2(); step3(); step4(); step5(); step6();
-	    }
-	    // Also, a word is considered dirty if we lopped off letters
-	    // Thanks to Ifigenia Vairelles for pointing this out.
-	    if (i != k+1)
-	      dirty = true;
-	    i = k+1;
-	    return dirty;
-	  }
-
-	  /** Test program for demonstrating the Stemmer.  It reads a file and
-	   * stems each word, writing the result to standard out.
-	   * Usage: Stemmer file-name
-	   */
-	  public static void main(String[] args) {
-	    PStemmer s = new PStemmer();
-
-	    for (int i = 0; i < args.length; i++) {
-	      try {
-	        InputStream in = new FileInputStream(args[i]);
-	        byte[] buffer = new byte[1024];
-	        int bufferLen, offset, ch;
-
-	        bufferLen = in.read(buffer);
-	        offset = 0;
-	        s.reset();
-
-	        while(true) {
-	          if (offset < bufferLen)
-	            ch = buffer[offset++];
-	          else {
-	            bufferLen = in.read(buffer);
-	            offset = 0;
-	            if (bufferLen < 0)
-	              ch = -1;
-	            else
-	              ch = buffer[offset++];
-	          }
-
-	          if (Character.isLetter((char) ch)) {
-	            s.add(Character.toLowerCase((char) ch));
-	          }
-	          else {
-	             s.stem();
-	             System.out.print(s.toString());
-	             s.reset();
-	             if (ch < 0)
-	               break;
-	             else {
-	               System.out.print((char) ch);
-	             }
-	           }
-	        }
-
-	        in.close();
-	      }
-	      catch (IOException e) {
-	        System.out.println("error reading " + args[i]);
-	      }
-	    }
-	  }
+	/** Stem a word contained in a portion of a char[] array.  Returns
+	 * true if the stemming process resulted in a word different from
+	 * the input.  You can retrieve the result with
+	 * getResultLength()/getResultBuffer() or toString().
+	 */
+	public boolean stem(char[] wordBuffer, int offset, int wordLen) {
+		reset();
+		if (b.length < wordLen) {
+			b = new char[ArrayUtil.oversize(wordLen, Character.BYTES)];
+		}
+		System.arraycopy(wordBuffer, offset, b, 0, wordLen);
+		i = wordLen;
+		return stem(0);
+	}
+
+	/** Stem a word contained in a leading portion of a char[] array.
+	 * Returns true if the stemming process resulted in a word different
+	 * from the input.  You can retrieve the result with
+	 * getResultLength()/getResultBuffer() or toString().
+	 */
+	public boolean stem(char[] word, int wordLen) {
+		return stem(word, 0, wordLen);
+	}
+
+	/** Stem the word placed into the Stemmer buffer through calls to add().
+	 * Returns true if the stemming process resulted in a word different
+	 * from the input.  You can retrieve the result with
+	 * getResultLength()/getResultBuffer() or toString().
+	 */
+	public boolean stem() {
+		return stem(0);
+	}
+
+	public boolean stem(int i0) {
+		k = i - 1;
+		k0 = i0;
+		if (k > k0+1) {
+			step1(); step2(); step3(); step4(); step5(); step6();
+		}
+		// Also, a word is considered dirty if we lopped off letters
+		// Thanks to Ifigenia Vairelles for pointing this out.
+		if (i != k+1)
+			dirty = true;
+		i = k+1;
+		return dirty;
+	}
+
+	/** Test program for demonstrating the Stemmer.  It reads a file and
+	 * stems each word, writing the result to standard out.
+	 * Usage: Stemmer file-name
+	 */
+	public static void main(String[] args) {
+		PStemmer s = new PStemmer();
+
+		for (int i = 0; i < args.length; i++) {
+			try {
+				InputStream in = new FileInputStream(args[i]);
+				byte[] buffer = new byte[1024];
+				int bufferLen, offset, ch;
+
+				bufferLen = in.read(buffer);
+				offset = 0;
+				s.reset();
+
+				while(true) {
+					if (offset < bufferLen)
+						ch = buffer[offset++];
+					else {
+						bufferLen = in.read(buffer);
+						offset = 0;
+						if (bufferLen < 0)
+							ch = -1;
+						else
+							ch = buffer[offset++];
+					}
+
+					if (Character.isLetter((char) ch)) {
+						s.add(Character.toLowerCase((char) ch));
+					}
+					else {
+						 s.stem();
+						 System.out.print(s.toString());
+						 s.reset();
+						 if (ch < 0)
+							 break;
+						 else {
+							 System.out.print((char) ch);
+						 }
+					 }
+				}
+
+				in.close();
+			}
+			catch (IOException e) {
+				System.out.println("error reading " + args[i]);
+			}
+		}
 	}
+}
 
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/GeneralizationListReducer.java b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/GeneralizationListReducer.java
index 69dfb6c..fc6c82a 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/GeneralizationListReducer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/GeneralizationListReducer.java
@@ -46,7 +46,7 @@ public class GeneralizationListReducer {
     }
 
     if (resultReduced.size() < 1) {
-      System.err.println("Wrong subsumption reduction");
+      // System.err.println("Wrong subsumption reduction");
     }
 
     if (resultReduced.size() > 1) {
@@ -86,7 +86,7 @@ public class GeneralizationListReducer {
     }
     resultReduced = resultDupl;
     if (resultReduced.size() < 1) {
-      System.err.println("Wrong subsumption reduction");
+      // System.err.println("Wrong subsumption reduction");
     }
 
     if (resultReduced.size() > 1) {
@@ -134,7 +134,7 @@ public class GeneralizationListReducer {
     }
 
     if (resultReduced.size() < 1) {
-      System.err.println("Wrong subsumption reduction");
+      // System.err.println("Wrong subsumption reduction");
       resultReduced = result;
     }
 
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeChunk.java b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeChunk.java
index 27f457c..ad22ac1 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeChunk.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeChunk.java
@@ -28,7 +28,9 @@ import org.apache.commons.lang3.StringUtils;
 
 import opennlp.tools.parse_thicket.ParseTreeNode;
 
-public class ParseTreeChunk implements Serializable{
+public class ParseTreeChunk implements Serializable {
+
+	private static final long serialVersionUID = -9007722991829174647L;
 	private String mainPOS;
 
 	private List<String> lemmas;
@@ -518,7 +520,6 @@ public class ParseTreeChunk implements Serializable{
 			}
 			results.add(resultsPhraseType);
 		}
-		System.out.println(results);
 		return results;
 
 		// 2.1 | Vietnam <b>embassy</b> <b>in</b> <b>Israel</b>: information on how
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeChunkListScorer.java b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeChunkListScorer.java
index e9a0368..db7325d 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeChunkListScorer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeChunkListScorer.java
@@ -28,7 +28,7 @@ public class ParseTreeChunkListScorer {
     double currScore = 0.0;
     for (List<ParseTreeChunk> chunksGivenPhraseType : matchResult)
       for (ParseTreeChunk chunk : chunksGivenPhraseType) {
-        Double score = getScore(chunk);
+        double score = getScore(chunk);
         // System.out.println(chunk+ " => score >>> "+score);
         if (score > currScore) {
           currScore = score;
@@ -44,7 +44,7 @@ public class ParseTreeChunkListScorer {
     for (List<ParseTreeChunk> chunksGivenPhraseType : matchResult) {
       double currScorePT = 0.0;
       for (ParseTreeChunk chunk : chunksGivenPhraseType) {
-        Double score = getScore(chunk);
+        double score = getScore(chunk);
         // System.out.println(chunk+ " => score >>> "+score);
         if (score > currScorePT) {
           currScorePT = score;
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeMatcherDeterministic.java b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeMatcherDeterministic.java
index 2949552..0dd96b9 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeMatcherDeterministic.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/ParseTreeMatcherDeterministic.java
@@ -19,15 +19,16 @@ package opennlp.tools.textsimilarity;
 
 import java.util.ArrayList;
 import java.util.List;
+
 import opennlp.tools.stemmer.PStemmer;
 
 public class ParseTreeMatcherDeterministic {
 
-  private GeneralizationListReducer generalizationListReducer = new GeneralizationListReducer();
+  private final GeneralizationListReducer generalizationListReducer = new GeneralizationListReducer();
 
-  private LemmaFormManager lemmaFormManager = new LemmaFormManager();
+  private final LemmaFormManager lemmaFormManager = new LemmaFormManager();
 
-  private POSManager posManager = new POSManager();
+  private final POSManager posManager = new POSManager();
 
   /**
    * key matching function which takes two phrases, aligns them and finds a set
@@ -45,13 +46,13 @@ public class ParseTreeMatcherDeterministic {
     List<String> lem1 = chunk1.getLemmas();
     List<String> lem2 = chunk2.getLemmas();
 
-    List<String> lem1stem = new ArrayList<String>();
-    List<String> lem2stem = new ArrayList<String>();
+    List<String> lem1stem = new ArrayList<>();
+    List<String> lem2stem = new ArrayList<>();
 
     PStemmer ps = new PStemmer();
     for (String word : lem1) {
       try {
-        lem1stem.add(ps.stem(word.toLowerCase()).toString());
+        lem1stem.add(ps.stem(word.toLowerCase()));
       } catch (Exception e) {
         // e.printStackTrace();
 
@@ -61,19 +62,19 @@ public class ParseTreeMatcherDeterministic {
     }
     try {
       for (String word : lem2) {
-        lem2stem.add(ps.stem(word.toLowerCase()).toString());
+        lem2stem.add(ps.stem(word.toLowerCase()));
       }
     } catch (Exception e) {
       System.err.println("problem processing word " + lem2.toString());
     }
 
-    List<String> overlap = new ArrayList(lem1stem);
+    List<String> overlap = new ArrayList<>(lem1stem);
     overlap.retainAll(lem2stem);
 
-    if (overlap == null || overlap.size() < 1)
+    if (overlap.size() < 1)
       return null;
 
-    List<Integer> occur1 = new ArrayList<Integer>(), occur2 = new ArrayList<Integer>();
+    List<Integer> occur1 = new ArrayList<>(), occur2 = new ArrayList<>();
     for (String word : overlap) {
       Integer i1 = lem1stem.indexOf(word);
       Integer i2 = lem2stem.indexOf(word);
@@ -81,13 +82,13 @@ public class ParseTreeMatcherDeterministic {
       occur2.add(i2);
     }
 
-    // now we search for plausible sublists of overlaps
+    // now we search for plausible sub-lists of overlaps
     // if at some position correspondence is inverse (one of two position
     // decreases instead of increases)
     // then we terminate current alignment accum and start a new one
-    List<List<int[]>> overlapsPlaus = new ArrayList<List<int[]>>();
+    List<List<int[]>> overlapsPlaus = new ArrayList<>();
     // starts from 1, not 0
-    List<int[]> accum = new ArrayList<int[]>();
+    List<int[]> accum = new ArrayList<>();
     accum.add(new int[] { occur1.get(0), occur2.get(0) });
     for (int i = 1; i < occur1.size(); i++) {
 
@@ -96,7 +97,7 @@ public class ParseTreeMatcherDeterministic {
         accum.add(new int[] { occur1.get(i), occur2.get(i) });
       else {
         overlapsPlaus.add(accum);
-        accum = new ArrayList<int[]>();
+        accum = new ArrayList<>();
         accum.add(new int[] { occur1.get(i), occur2.get(i) });
       }
     }
@@ -104,20 +105,20 @@ public class ParseTreeMatcherDeterministic {
       overlapsPlaus.add(accum);
     }
 
-    List<ParseTreeChunk> results = new ArrayList<ParseTreeChunk>();
+    List<ParseTreeChunk> results = new ArrayList<>();
     for (List<int[]> occur : overlapsPlaus) {
-      List<Integer> occr1 = new ArrayList<Integer>(), occr2 = new ArrayList<Integer>();
+      List<Integer> occr1 = new ArrayList<>(), occr2 = new ArrayList<>();
       for (int[] column : occur) {
         occr1.add(column[0]);
         occr2.add(column[1]);
       }
 
       int ov1 = 0, ov2 = 0; // iterators over common words;
-      List<String> commonPOS = new ArrayList<String>(), commonLemmas = new ArrayList<String>();
+      List<String> commonPOS = new ArrayList<>(), commonLemmas = new ArrayList<>();
       // we start two words before first word
       int k1 = occr1.get(ov1) - 2, k2 = occr2.get(ov2) - 2;
       // if (k1<0) k1=0; if (k2<0) k2=0;
-      Boolean bReachedCommonWord = false;
+      boolean bReachedCommonWord = false;
       while (k1 < 0 || k2 < 0) {
         k1++;
         k2++;
@@ -179,7 +180,7 @@ public class ParseTreeMatcherDeterministic {
                                                                     // behind
                                                                     // current
                                                                     // position,
-                                                                    // synchroneously
+                                                                    // synchronously
                                                                     // move
                                                                     // towards
                                                                     // right
@@ -217,11 +218,11 @@ public class ParseTreeMatcherDeterministic {
    */
   public List<List<ParseTreeChunk>> matchTwoSentencesGroupedChunksDeterministic(
       List<List<ParseTreeChunk>> sent1, List<List<ParseTreeChunk>> sent2) {
-    List<List<ParseTreeChunk>> results = new ArrayList<List<ParseTreeChunk>>();
+    List<List<ParseTreeChunk>> results = new ArrayList<>();
     // first iterate through component
     for (int comp = 0; comp < 2 && // just np & vp
         comp < sent1.size() && comp < sent2.size(); comp++) {
-      List<ParseTreeChunk> resultComps = new ArrayList<ParseTreeChunk>();
+      List<ParseTreeChunk> resultComps = new ArrayList<>();
       // then iterate through each phrase in each component
       for (ParseTreeChunk ch1 : sent1.get(comp)) {
         for (ParseTreeChunk ch2 : sent2.get(comp)) { // simpler version
@@ -229,7 +230,7 @@ public class ParseTreeMatcherDeterministic {
               ch1, ch2);
 
           if (chunkToAdd == null)
-            chunkToAdd = new ArrayList<ParseTreeChunk>();
+            chunkToAdd = new ArrayList<>();
           // System.out.println("ch1 = "+
           // ch1.toString()+" | ch2="+ch2.toString()
           // +"\n result = "+chunkToAdd.toString() + "\n");
@@ -248,7 +249,7 @@ public class ParseTreeMatcherDeterministic {
           // if (!LemmaFormManager.mustOccurVerifier(ch1, ch2, chunkToAdd))
           // continue; // if the words which have to stay do not stay, proceed
           // to other elements
-          Boolean alreadyThere = false;
+          boolean alreadyThere = false;
           for (ParseTreeChunk chunk : resultComps) {
             if (chunkToAdd.contains(chunk)) {
               alreadyThere = true;
@@ -258,16 +259,13 @@ public class ParseTreeMatcherDeterministic {
             // }
           }
 
-          if (!alreadyThere && chunkToAdd != null && chunkToAdd.size() > 0) {
+          if (!alreadyThere && chunkToAdd.size() > 0) {
             resultComps.addAll(chunkToAdd);
           }
 
         }
       }
-      List<ParseTreeChunk> resultCompsRed = generalizationListReducer
-          .applyFilteringBySubsumption(resultComps);
-
-      resultComps = resultCompsRed;
+      resultComps = generalizationListReducer.applyFilteringBySubsumption(resultComps);
       results.add(resultComps);
     }
 
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessor.java b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessor.java
index 2726553..789642f 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessor.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessor.java
@@ -67,9 +67,9 @@ public class ParserChunker2MatcherProcessor {
   private Parser parser;
   private ChunkerME chunker;
   private final int NUMBER_OF_SECTIONS_IN_SENTENCE_CHUNKS = 5;
-  private static Logger LOG = Logger
-      .getLogger("opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor");
-  private Map<String, String[][]> sentence_parseObject = new HashMap<String, String[][]>();
+  private static final Logger LOG =
+          Logger.getLogger("opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor");
+  private Map<String, String[][]> sentence_parseObject;
 
   public SentenceDetector getSentenceDetector() {
     return sentenceDetector;
@@ -102,11 +102,11 @@ public class ParserChunker2MatcherProcessor {
           .readObject();
     } catch (Exception e) {
       // this file might not exist initially
-      LOG.fine("parsing  cache file does not exist (but should be created)");
-      sentence_parseObject = new HashMap<String, String[][]>();
+      LOG.warning("parsing  cache file does not exist (but should be created)");
+      sentence_parseObject = new HashMap<>();
     }
     if (sentence_parseObject == null)
-      sentence_parseObject = new HashMap<String, String[][]>();
+      sentence_parseObject = new HashMap<>();
 
     try {
     	if (MODEL_DIR==null || MODEL_DIR.equals("/models")) {
@@ -122,8 +122,8 @@ public class ParserChunker2MatcherProcessor {
       initializeParser();
       initializeChunker();
     } catch (Exception e) { // a typical error when 'model' is not installed
+      LOG.warning("The model can't be read and we rely on cache");
       System.err.println("Please install OpenNLP model files in 'src/test/resources' (folder 'model'");
-      LOG.fine("The model can't be read and we rely on cache");
     }
   }
 
@@ -219,7 +219,7 @@ public class ParserChunker2MatcherProcessor {
     if (sentence == null || sentence.trim().length() < MIN_SENTENCE_LENGTH)
       return null;
 
-    Parse[] parseArray = null;
+    Parse[] parseArray;
     try {
       parseArray = ParserTool.parseLine(sentence, parser, 1);
     } catch (Throwable t) {
diff --git a/opennlp-similarity/src/main/java/opennlp/tools/word2vec/W2VDistanceMeasurer.java b/opennlp-similarity/src/main/java/opennlp/tools/word2vec/W2VDistanceMeasurer.java
index f673929..ab64a2d 100644
--- a/opennlp-similarity/src/main/java/opennlp/tools/word2vec/W2VDistanceMeasurer.java
+++ b/opennlp-similarity/src/main/java/opennlp/tools/word2vec/W2VDistanceMeasurer.java
@@ -1,28 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package opennlp.tools.word2vec;
 
-import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Collection;
 
 import org.deeplearning4j.models.embeddings.WeightLookupTable;
 import org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable;
 import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
+import org.deeplearning4j.models.word2vec.VocabWord;
 import org.deeplearning4j.models.word2vec.Word2Vec;
+import org.deeplearning4j.models.word2vec.wordstore.VocabCache;
 import org.deeplearning4j.models.word2vec.wordstore.inmemory.InMemoryLookupCache;
+import org.deeplearning4j.text.sentenceiterator.FileSentenceIterator;
 import org.deeplearning4j.text.sentenceiterator.SentenceIterator;
-import org.deeplearning4j.text.sentenceiterator.UimaSentenceIterator;
 import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor;
 import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory;
 import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
+import org.nd4j.common.primitives.Pair;
 import org.springframework.core.io.ClassPathResource;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-
 public class W2VDistanceMeasurer {
 	static W2VDistanceMeasurer instance;
 	public Word2Vec vec = null;
-	private String resourceDir = null;
 
 	public synchronized static W2VDistanceMeasurer getInstance() {
 		if (instance == null)
@@ -31,6 +49,7 @@ public class W2VDistanceMeasurer {
 	}
 
 	public W2VDistanceMeasurer(){
+		String resourceDir = null;
 		if (resourceDir ==null)
 			try {
 				resourceDir = new File( "." ).getCanonicalPath()+"/src/test/resources";
@@ -43,11 +62,11 @@ public class W2VDistanceMeasurer {
 		String pathToW2V = resourceDir + "/w2v/GoogleNews-vectors-negative300.bin.gz";
 		File gModel = new File(pathToW2V);
 		try {
-			vec = WordVectorSerializer.loadGoogleModel(gModel, true);
+			Pair<InMemoryLookupTable, VocabCache> pair = WordVectorSerializer.loadTxt(Files.newInputStream(gModel.toPath()));
+			vec = WordVectorSerializer.fromPair(pair);
 		} catch (IOException e) {
 			System.out.println("Word2vec model is not loaded");
 			vec = null;
-			return;
 		} 
 		
 	} 
@@ -63,29 +82,22 @@ public class W2VDistanceMeasurer {
 
 	public static void runCycle() {
 
-		String filePath=null;
+		SentenceIterator iter=null;
 		try {
-			filePath = new ClassPathResource("raw_sentences.txt").getFile().getAbsolutePath();
+			String filePath = new ClassPathResource("raw_sentences.txt").getFile().getAbsolutePath();
+			// Strip white space before and after for each line
+			System.out.println("Load & Vectorize Sentences....");
+			iter = new FileSentenceIterator(new File(filePath));
 		} catch (IOException e1) {
-			// TODO Auto-generated catch block
 			e1.printStackTrace();
 		}
 
-		System.out.println("Load & Vectorize Sentences....");
-		// Strip white space before and after for each line
-		SentenceIterator iter=null;
-		try {
-			iter = UimaSentenceIterator.createWithPath(filePath);
-		} catch (Exception e1) {
-			// TODO Auto-generated catch block
-			e1.printStackTrace();
-		}
 		// Split on white spaces in the line to get words
 		TokenizerFactory t = new DefaultTokenizerFactory();
 		t.setTokenPreProcessor(new CommonPreprocessor());
 
 		InMemoryLookupCache cache = new InMemoryLookupCache();
-		WeightLookupTable table = new InMemoryLookupTable.Builder()
+		WeightLookupTable<VocabWord> table = new InMemoryLookupTable.Builder<VocabWord>()
 		.vectorLength(100)
 		.useAdaGrad(false)
 		.cache(cache)
@@ -100,21 +112,11 @@ public class W2VDistanceMeasurer {
 		.windowSize(5).iterate(iter).tokenizerFactory(t).build();
 
 		System.out.println("Fitting Word2Vec model....");
-		try {
-			vec.fit();
-		} catch (IOException e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		}
+		vec.fit();
 
 		System.out.println("Writing word vectors to text file....");
 		// Write word
-		try {
-			WordVectorSerializer.writeWordVectors(vec, "pathToWriteto.txt");
-		} catch (IOException e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		}
+		WordVectorSerializer.writeWord2VecModel(vec, "pathToWriteTo.txt");
 
 		System.out.println("Closest Words:");
 		Collection<String> lst = vec.wordsNearest("day", 10);
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/MultiSentenceSearchResultsProcessorTest.java b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/MultiSentenceSearchResultsProcessorTest.java
index 0d2d58e..16c6057 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/MultiSentenceSearchResultsProcessorTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/MultiSentenceSearchResultsProcessorTest.java
@@ -20,11 +20,18 @@ import java.util.List;
 
 import opennlp.tools.similarity.apps.HitBase;
 
-import junit.framework.TestCase;
+import org.junit.Ignore;
+import org.junit.Test;
 
-public class MultiSentenceSearchResultsProcessorTest extends TestCase {
-	MultiSentenceSearchResultsProcessor proc = new MultiSentenceSearchResultsProcessor();
+import static org.junit.Assert.assertTrue;
 
+public class MultiSentenceSearchResultsProcessorTest {
+
+	private final MultiSentenceSearchResultsProcessor proc = new MultiSentenceSearchResultsProcessor();
+
+	@Test
+	@Ignore
+	// This test case fails with: "UnknownHostException: api.datamarket.azure.com: nodename nor servname provided, or not known"
 	public void testSearchOrder() {
 		List<HitBase> res; HitBase first = null;
 		String query ;
@@ -35,7 +42,7 @@ public class MultiSentenceSearchResultsProcessorTest extends TestCase {
 		System.out.println(res);
 		first = res.get(0);
 		assertTrue(first.getGenerWithQueryScore() > 2.0f);
-*/
+	  */
 		
 		
 		query = "Furious about reports that the IRS was used to target conservative groups, President Obama said that acting IRS Director Steve T. Miller was asked to resign. "+
@@ -46,7 +53,6 @@ public class MultiSentenceSearchResultsProcessorTest extends TestCase {
 		first = res.get(0);
 		assertTrue(first.getGenerWithQueryScore() > 000f);
 
-
 		query = " I see no meaningful distinction between complacency or complicity in the military's latest failure to uphold their own " +
 				"standards of conduct. Nor do I see a distinction between the service member who orchestrated this offense and the chain of " +
 				"command that was either oblivious to or tolerant of criminal behavior";
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/RelatedSentenceFinderTest.java b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/RelatedSentenceFinderTest.java
index f5c6222..d09f6ee 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/RelatedSentenceFinderTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/RelatedSentenceFinderTest.java
@@ -18,60 +18,60 @@ package opennlp.tools.parse_thicket.apps;
 
 
 import java.util.ArrayList;
-import java.util.List;
 
 import opennlp.tools.similarity.apps.ContentGenerator;
 import opennlp.tools.similarity.apps.HitBase;
-import opennlp.tools.similarity.apps.RelatedSentenceFinder;
-import junit.framework.TestCase;
+import org.junit.Test;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
-public class RelatedSentenceFinderTest extends TestCase {
-	//RelatedSentenceFinder finder = new RelatedSentenceFinder();
-	ContentGenerator finder = new ContentGenerator();
+public class RelatedSentenceFinderTest {
 	
+	private final ContentGenerator finder = new ContentGenerator();
+
+	@Test
 	public void testAugmentWithMinedSentencesAndVerifyRelevanceTest(){
 		HitBase input = new HitBase();
 		input.setAbstractText("He is pictured here in the Swiss Patent Office where he did ...");
-		input.setUrl("http://apod.nasa.gov/apod/ap951219.html");
+		input.setUrl("https://apod.nasa.gov/apod/ap951219.html");
 		input.setTitle("Albert Einstein");
-		HitBase result = finder.buildParagraphOfGeneratedText(input, "Swiss Patent Office", new ArrayList<String>());
-		System.out.println(result.toString());
-		assertTrue(result.getOriginalSentences()!=null);
+		HitBase result = finder.buildParagraphOfGeneratedText(input, "Swiss Patent Office", new ArrayList<>());
+		assertNotNull(result.getOriginalSentences());
 		assertTrue(result.getOriginalSentences().size()>0);
-		//assertTrue(result.getFragments().size()>0);
-		//assertTrue(result.getFragments().get(0).getFragment().indexOf("Swiss Patent Office")>-1);
+		// TODO Investigate why the below to checks fail
+		// assertTrue(result.getFragments().size()>0);
+		// assertTrue(result.getFragments().get(0).getFragment().contains("Swiss Patent Office"));
 	}
 	
 	/*
-	public void testBuildParagraphOfGeneratedTextTest(){
-		HitBase input = new HitBase();
-		input.setAbstractText("Albert Einstein was a German-born theoretical physicist who developed the general theory of relativity, one of the two pillars of modern physics (alongside ...");
-		input.setUrl("http://en.wikipedia.org/wiki/Albert_Einstein");
-		input.setTitle("Albert Einstein - Wikipedia, the free encyclopedia");
-		HitBase result = finder.buildParagraphOfGeneratedText(input,
-				"Albert Einstein", new ArrayList<String>());
-		System.out.println(result.toString());
-		assertTrue(result.getOriginalSentences()!=null);
-		assertTrue(result.getOriginalSentences().size()>0);
-		assertTrue(result.getFragments().size()>0);
-		assertTrue(result.getFragments().get(0).getFragment().indexOf("Albert Einstein")>-1);
-	} 
-*/
-	
+		public void testBuildParagraphOfGeneratedTextTest(){
+			HitBase input = new HitBase();
+			input.setAbstractText("Albert Einstein was a German-born theoretical physicist who developed the general theory of relativity, one of the two pillars of modern physics (alongside ...");
+			input.setUrl("http://en.wikipedia.org/wiki/Albert_Einstein");
+			input.setTitle("Albert Einstein - Wikipedia, the free encyclopedia");
+			HitBase result = finder.buildParagraphOfGeneratedText(input,
+					"Albert Einstein", new ArrayList<String>());
+			System.out.println(result.toString());
+			assertTrue(result.getOriginalSentences()!=null);
+			assertTrue(result.getOriginalSentences().size()>0);
+			assertTrue(result.getFragments().size()>0);
+			assertTrue(result.getFragments().get(0).getFragment().indexOf("Albert Einstein")>-1);
+		}
+	*/
+
+	@Test
 	public void testBuildParagraphOfGeneratedTextTestYearInTheEnd(){
 	    
 		HitBase input = new HitBase();
 		input.setAbstractText("Albert Einstein was born ... Germany, on March 14, 1879");
-		input.setUrl("http://www.nobelprize.org/nobel_prizes/physics/laureates/1921/einstein-bio.html");
+		input.setUrl("https://www.nobelprize.org/prizes/physics/1921/einstein/biographical");
 		input.setTitle("Albert Einstein - Biographical");
-		HitBase result = finder.buildParagraphOfGeneratedText(input,
-				"Albert Einstein", new ArrayList<String>());
-		System.out.println(result.toString());
-		assertTrue(result.getOriginalSentences()!=null);
+		HitBase result = finder.buildParagraphOfGeneratedText(input, "Albert Einstein", new ArrayList<>());
+		assertNotNull(result.getOriginalSentences());
 		assertTrue(result.getOriginalSentences().size()>0);
 		assertTrue(result.getFragments().size()>0);
-		assertTrue(result.getFragments().get(0).getFragment().indexOf("Albert Einstein")>-1);
+		assertTrue(result.getFragments().get(0).getFragment().contains("Albert Einstein"));
 	} 
 	
 	/*
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/StoryDiscourseNavigatorTest.java b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/StoryDiscourseNavigatorTest.java
index f94d1a7..11fec6e 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/StoryDiscourseNavigatorTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/apps/StoryDiscourseNavigatorTest.java
@@ -16,32 +16,24 @@
  */
 package opennlp.tools.parse_thicket.apps;
 
-
-import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.List;
 
-import opennlp.tools.similarity.apps.HitBase;
-import opennlp.tools.similarity.apps.RelatedSentenceFinder;
+import org.junit.Ignore;
+import org.junit.Test;
+
 import opennlp.tools.similarity.apps.StoryDiscourseNavigator;
-import junit.framework.TestCase;
 
+import static org.junit.Assert.assertTrue;
 
-public class StoryDiscourseNavigatorTest extends TestCase {
-	RelatedSentenceFinder finder = new RelatedSentenceFinder();
+public class StoryDiscourseNavigatorTest {
 
-	
-	public void testGeneratedExtednsionKeywords(){
+	@Test
+	@Ignore
+	// This test fails with "UnknownHostException: api.datamarket.azure.com: nodename nor servname provided, or not known"
+	public void testGeneratedExtensionKeywords(){
 		String[] res = new StoryDiscourseNavigator().obtainAdditionalKeywordsForAnEntity("Albert Einstein");
-		System.out.println(Arrays.asList(res));
 		assertTrue(res.length>0);
-		assertTrue(Arrays.asList(res).toString().indexOf("physics")>-1);
-		assertTrue(Arrays.asList(res).toString().indexOf("relativity")>-1);
-		
-		
-		
-	} 
-
+		assertTrue(Arrays.asList(res).toString().contains("physics"));
+		assertTrue(Arrays.asList(res).toString().contains("relativity"));
+	}
 }
-
-//[Albert Einstein (/�lbrt anstan/; German. albt antan ( listen); 14 March 1879 18 April 1955) was a German-born theoretical physicist who developed the general theory of relativity, one of the two pillars of modern physics (alongside quantum mechanics). 2 3 While best known for his massenergy equivalence formula E = mc2 (which has been dubbed "the world's most famous equation"), 4 he received the 1921 Nobel Prize in Physics "for his services to theoretical physics, and especially for hi [...]
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/pattern_structure/JSMLearnerOnLatticeTest.java b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/pattern_structure/JSMLearnerOnLatticeTest.java
index fd989ba..56bb9f3 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/pattern_structure/JSMLearnerOnLatticeTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/pattern_structure/JSMLearnerOnLatticeTest.java
@@ -18,31 +18,24 @@
 package opennlp.tools.parse_thicket.pattern_structure;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashSet;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.collections.ListUtils;
-
-import junit.framework.TestCase;
-import opennlp.tools.fca.ConceptLattice;
-import opennlp.tools.fca.FcaWriter;
-import opennlp.tools.fca.FormalConcept;
-import opennlp.tools.similarity.apps.BingWebQueryRunner;
-import opennlp.tools.similarity.apps.HitBase;
 import opennlp.tools.similarity.apps.utils.Pair;
 import opennlp.tools.textsimilarity.ParseTreeChunk;
 import opennlp.tools.textsimilarity.ParseTreeChunkListScorer;
 import opennlp.tools.textsimilarity.ParseTreeMatcherDeterministic;
 import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
+import org.junit.Test;
 
-public class JSMLearnerOnLatticeTest extends TestCase{
-	ParserChunker2MatcherProcessor chunk_maker = ParserChunker2MatcherProcessor.getInstance();
-	LinguisticPatternStructure psPos = new LinguisticPatternStructure(0,0), psNeg = new LinguisticPatternStructure(0,0);
-	ParseTreeMatcherDeterministic md = new ParseTreeMatcherDeterministic(); 
+public class JSMLearnerOnLatticeTest {
+	private final ParserChunker2MatcherProcessor chunk_maker = ParserChunker2MatcherProcessor.getInstance();
+	private final LinguisticPatternStructure psPos = new LinguisticPatternStructure(0,0), psNeg = new LinguisticPatternStructure(0,0);
+	private final ParseTreeMatcherDeterministic md = new ParseTreeMatcherDeterministic();
 
+	@Test
 	public void testJSMLearner() {
 
 		String text1p = "I rent an office space. This office is for my business. I can deduct office rental expense from my business profit to calculate net income. ";
@@ -66,37 +59,37 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 		List<List<ParseTreeChunk>> chunks4n = chunk_maker.formGroupedPhrasesFromChunksForPara(text4n);
 
 
-		LinkedHashSet<Integer> obj = null;
-		obj = new LinkedHashSet<Integer>();
+		LinkedHashSet<Integer> obj;
+		obj = new LinkedHashSet<>();
 		obj.add(0);
 		psPos.AddIntent(chunks1p, obj, 0);
-		obj = new LinkedHashSet<Integer>();
+		obj = new LinkedHashSet<>();
 		obj.add(1);
 		psPos.AddIntent(chunks2p, obj, 0);
-		obj = new LinkedHashSet<Integer>();
+		obj = new LinkedHashSet<>();
 		obj.add(2);
 		psPos.AddIntent(chunks3p, obj, 0);
-		obj = new LinkedHashSet<Integer>();
+		obj = new LinkedHashSet<>();
 		obj.add(3);
 		psPos.AddIntent(chunks4p, obj, 0);
-		obj = new LinkedHashSet<Integer>();
+		obj = new LinkedHashSet<>();
 		obj.add(0);
 		psNeg.AddIntent(chunks1n, obj, 0);
-		obj = new LinkedHashSet<Integer>();
+		obj = new LinkedHashSet<>();
 		obj.add(1);
 		psNeg.AddIntent(chunks2n, obj, 0);
-		obj = new LinkedHashSet<Integer>();
+		obj = new LinkedHashSet<>();
 		obj.add(2);
 		psNeg.AddIntent(chunks3n, obj, 0);
-		obj = new LinkedHashSet<Integer>();
+		obj = new LinkedHashSet<>();
 		obj.add(3);
 		psNeg.AddIntent(chunks4n, obj, 0);
 
 		String unknown = "I do not want to rent anything to anyone. I just want to rent a space for myself. I neither calculate deduction of individual or business tax. I subtract my tax from my income";
 		List<List<ParseTreeChunk>> chunksUnknown = chunk_maker.formGroupedPhrasesFromChunksForPara(unknown);
-		List<List<List<ParseTreeChunk>>> posIntersections = new ArrayList<List<List<ParseTreeChunk>>>(), 
-				negIntersections = new ArrayList<List<List<ParseTreeChunk>>>();
-		List<List<ParseTreeChunk>> intersection = null;
+		List<List<List<ParseTreeChunk>>> posIntersections = new ArrayList<>(), 
+				negIntersections = new ArrayList<>();
+		List<List<ParseTreeChunk>> intersection;
 		for(int iConcept = 0; iConcept<psPos.conceptList.size(); iConcept++){
 			if (psPos.conceptList.get(iConcept).intent!=null && psPos.conceptList.get(iConcept).intent.size()>0){
 				intersection = md
@@ -119,23 +112,23 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 		posIntersections = pair.getFirst();
 		negIntersections = pair.getSecond();
 
-		List<List<List<ParseTreeChunk>>> posIntersectionsUnderNeg = new ArrayList<List<List<ParseTreeChunk>>>(), 
-				negIntersectionsUnderPos = new ArrayList<List<List<ParseTreeChunk>>>();
+		List<List<List<ParseTreeChunk>>> posIntersectionsUnderNeg = new ArrayList<>(), 
+				negIntersectionsUnderPos = new ArrayList<>();
 
 		for(int iConcept = 0; iConcept<psNeg.conceptList.size(); iConcept++){
-			for(int iConceptJ = 0; iConceptJ<negIntersections.size(); iConceptJ++){
+			for (List<List<ParseTreeChunk>> negIntersection : negIntersections) {
 				intersection = md
-						.matchTwoSentencesGroupedChunksDeterministic(psNeg.conceptList.get(iConcept).intent, negIntersections.get(iConceptJ));
-				if (reduceList(intersection).size()>0)
+								.matchTwoSentencesGroupedChunksDeterministic(psNeg.conceptList.get(iConcept).intent, negIntersection);
+				if (reduceList(intersection).size() > 0)
 					posIntersectionsUnderNeg.add(reduceList(intersection));
 			}
 		}
 
 		for(int iConcept = 0; iConcept<psPos.conceptList.size(); iConcept++){
-			for(int iConceptJ = 0; iConceptJ<posIntersections.size(); iConceptJ++){
+			for (List<List<ParseTreeChunk>> posIntersection : posIntersections) {
 				intersection = md
-						.matchTwoSentencesGroupedChunksDeterministic(psPos.conceptList.get(iConcept).intent, posIntersections.get(iConceptJ));
-				if (reduceList(intersection).size()>0)
+								.matchTwoSentencesGroupedChunksDeterministic(psPos.conceptList.get(iConcept).intent, posIntersection);
+				if (reduceList(intersection).size() > 0)
 					negIntersectionsUnderPos.add(reduceList(intersection));
 			}
 		}
@@ -146,19 +139,19 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 		posIntersectionsUnderNegLst = subtract(posIntersectionsUnderNegLst, negIntersectionsUnderPosLst);
 		negIntersectionsUnderPosLst= subtract(negIntersectionsUnderPosLst, posIntersectionsUnderNegLst);
 
-		System.out.println("Pos - neg inters = "+posIntersectionsUnderNegLst);
-		System.out.println("Neg - pos inters = "+negIntersectionsUnderPosLst);
+		// System.out.println("Pos - neg inters = "+posIntersectionsUnderNegLst);
+		// System.out.println("Neg - pos inters = "+negIntersectionsUnderPosLst);
 
 	}
 
-	public List<List<ParseTreeChunk>> reduceList(List<List<ParseTreeChunk>> list){
+	private List<List<ParseTreeChunk>> reduceList(List<List<ParseTreeChunk>> list){
 		float minScore = 1.3f;
-		List<List<ParseTreeChunk>> newList = new ArrayList<List<ParseTreeChunk>>();
+		List<List<ParseTreeChunk>> newList = new ArrayList<>();
 
 
 		ParseTreeChunkListScorer scorer = new ParseTreeChunkListScorer();
 		for(  List<ParseTreeChunk> group: list){
-			List<ParseTreeChunk> newGroup = new ArrayList<ParseTreeChunk>();
+			List<ParseTreeChunk> newGroup = new ArrayList<>();
 			for(ParseTreeChunk ch: group){
 				if (scorer.getScore(ch) > minScore)
 					newGroup.add(ch);
@@ -171,28 +164,25 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 
 	}
 
-	public List<List<ParseTreeChunk>> flattenParseTreeChunkListList(List<List<List<ParseTreeChunk>>> listOfLists){
-		List<List<ParseTreeChunk>> newList = new ArrayList<List<ParseTreeChunk>>();
+	private List<List<ParseTreeChunk>> flattenParseTreeChunkListList(List<List<List<ParseTreeChunk>>> listOfLists){
+		List<List<ParseTreeChunk>> newList = new ArrayList<>();
 
 		for(  List<List<ParseTreeChunk>> member: listOfLists){
-			Set<ParseTreeChunk> newSet= new HashSet<ParseTreeChunk>();
+			Set<ParseTreeChunk> newSet= new HashSet<>();
 			for(  List<ParseTreeChunk> group: member){
 				if (group.size()>0)
 					newSet.addAll(group);
 			}
-			newList.add(new ArrayList<ParseTreeChunk>(newSet));
+			newList.add(new ArrayList<>(newSet));
 		}
 
 		return newList;  
 	}
 
-	public List<ParseTreeChunk> flattenParseTreeChunkLst(List<List<List<ParseTreeChunk>>> listOfLists){
-		List<ParseTreeChunk> newList = new ArrayList<ParseTreeChunk>();
-		Set<ParseTreeChunk> newSetAll = new HashSet<ParseTreeChunk>();
-
-
+	private List<ParseTreeChunk> flattenParseTreeChunkLst(List<List<List<ParseTreeChunk>>> listOfLists){
+		Set<ParseTreeChunk> newSetAll = new HashSet<>();
 		for(  List<List<ParseTreeChunk>> member: listOfLists){
-			Set<ParseTreeChunk> newSet= new HashSet<ParseTreeChunk>();
+			Set<ParseTreeChunk> newSet= new HashSet<>();
 			for(  List<ParseTreeChunk> group: member){
 				if (group.size()>0)
 					newSet.addAll(group);
@@ -200,18 +190,18 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 			newSetAll.addAll(newSet);
 		}
 
-		return removeDuplicates(new ArrayList<ParseTreeChunk>(newSetAll));  
+		return removeDuplicates(new ArrayList<>(newSetAll));  
 	}
 
-	public List<ParseTreeChunk> removeDuplicates(List<ParseTreeChunk> dupes){
-		List<Integer> toDelete = new ArrayList<Integer>();
+	private List<ParseTreeChunk> removeDuplicates(List<ParseTreeChunk> dupes){
+		List<Integer> toDelete = new ArrayList<>();
 		for(int i=0; i<dupes.size(); i++)
 			for(int j=i+1; j<dupes.size(); j++){
 				if (dupes.get(i).equals(dupes.get(j))){
 					toDelete.add(j);
 				}
 			}
-		List<ParseTreeChunk> cleaned = new ArrayList<ParseTreeChunk>();
+		List<ParseTreeChunk> cleaned = new ArrayList<>();
 		for(int i=0; i<dupes.size(); i++){
 			if (!toDelete.contains(i))
 				cleaned.add(dupes.get(i));
@@ -219,30 +209,31 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 		return cleaned;
 	}
 
-	public List<ParseTreeChunk> subtract(List<ParseTreeChunk> main, List<ParseTreeChunk> toSubtract){
-		List<Integer> toDelete = new ArrayList<Integer>();
+	private List<ParseTreeChunk> subtract(List<ParseTreeChunk> main, List<ParseTreeChunk> toSubtract){
+		List<Integer> toDelete = new ArrayList<>();
 		for(int i=0; i<main.size(); i++)
-			for(int j=0; j<toSubtract.size(); j++){
-				if (main.get(i).equals(toSubtract.get(j))){
+			for (ParseTreeChunk parseTreeChunk : toSubtract) {
+				if (main.get(i).equals(parseTreeChunk)) {
 					toDelete.add(i);
 				}
 			}
-		List<ParseTreeChunk> cleaned = new ArrayList<ParseTreeChunk>();
+		List<ParseTreeChunk> cleaned = new ArrayList<>();
 		for(int i=0; i<main.size(); i++){
 			if (!toDelete.contains(i))
 				cleaned.add(main.get(i));
 		}
 		return cleaned;
 	}
-	public List<ParseTreeChunk> intesectParseTreeChunkLists(List<ParseTreeChunk> a, List<ParseTreeChunk> b){
-		List<Integer> inters = new ArrayList<Integer>();
+
+	private List<ParseTreeChunk> intersectParseTreeChunkLists(List<ParseTreeChunk> a, List<ParseTreeChunk> b){
+		List<Integer> inters = new ArrayList<>();
 		for(int i=0; i<a.size(); i++)
-			for(int j=0; j<b.size(); j++){
-				if (a.get(i).equals(b.get(j))){
+			for (ParseTreeChunk parseTreeChunk : b) {
+				if (a.get(i).equals(parseTreeChunk)) {
 					inters.add(i);
 				}
 			}
-		List<ParseTreeChunk> cleaned = new ArrayList<ParseTreeChunk>();
+		List<ParseTreeChunk> cleaned = new ArrayList<>();
 		for(int i=0; i<a.size(); i++){
 			if (inters.contains(i))
 				cleaned.add(a.get(i));
@@ -250,17 +241,17 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 		return cleaned;
 	}
 
-	public Pair<List<List<List<ParseTreeChunk>>>, List<List<List<ParseTreeChunk>>>>
+	private Pair<List<List<List<ParseTreeChunk>>>, List<List<List<ParseTreeChunk>>>>
 		removeInconsistenciesFromPosNegIntersections(List<List<List<ParseTreeChunk>>> pos, 
 			List<List<List<ParseTreeChunk>>> neg ){
 
 		List<ParseTreeChunk> posIntersectionsFl = flattenParseTreeChunkLst(pos);
 		List<ParseTreeChunk> negIntersectionsFl = flattenParseTreeChunkLst(neg);
 
-		List<ParseTreeChunk> intersParseTreeChunkLists = intesectParseTreeChunkLists(posIntersectionsFl, negIntersectionsFl);
+		List<ParseTreeChunk> intersParseTreeChunkLists = intersectParseTreeChunkLists(posIntersectionsFl, negIntersectionsFl);
 
-		List<List<List<ParseTreeChunk>>> cleanedFromInconsPos = new ArrayList<List<List<ParseTreeChunk>>>(), 
-				cleanedFromInconsNeg = new ArrayList<List<List<ParseTreeChunk>>>();
+		List<List<List<ParseTreeChunk>>> cleanedFromInconsPos = new ArrayList<>(),
+				cleanedFromInconsNeg = new ArrayList<>();
 		/*
 		System.out.println("pos = "+ pos);
 		System.out.println("neg = "+ neg);
@@ -270,9 +261,9 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 		*/
 
 		for(  List<List<ParseTreeChunk>> member: pos){
-			List<List<ParseTreeChunk>> memberList = new ArrayList<List<ParseTreeChunk>>();
+			List<List<ParseTreeChunk>> memberList = new ArrayList<>();
 			for( List<ParseTreeChunk> group: member){
-				List<ParseTreeChunk> newGroup = new ArrayList<ParseTreeChunk>();
+				List<ParseTreeChunk> newGroup = new ArrayList<>();
 				for(ParseTreeChunk ch: group){
 					boolean bSkip = false;	 
 					for(ParseTreeChunk check: intersParseTreeChunkLists){
@@ -290,9 +281,9 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 		}
 		
 		for(  List<List<ParseTreeChunk>> member: neg){
-			List<List<ParseTreeChunk>> memberList = new ArrayList<List<ParseTreeChunk>>();
+			List<List<ParseTreeChunk>> memberList = new ArrayList<>();
 			for( List<ParseTreeChunk> group: member){
-				List<ParseTreeChunk> newGroup = new ArrayList<ParseTreeChunk>();
+				List<ParseTreeChunk> newGroup = new ArrayList<>();
 				for(ParseTreeChunk ch: group){
 					boolean bSkip = false;	 
 					for(ParseTreeChunk check: intersParseTreeChunkLists){
@@ -309,7 +300,7 @@ public class JSMLearnerOnLatticeTest extends TestCase{
 				cleanedFromInconsNeg.add(memberList);
 		}
 
-		return  new Pair(cleanedFromInconsPos, cleanedFromInconsNeg);
+		return new Pair<>(cleanedFromInconsPos, cleanedFromInconsNeg);
 
 	}
 
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/pattern_structure/PhraseTest.java b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/pattern_structure/PhraseTest.java
index 58246e1..5295bc9 100755
--- a/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/pattern_structure/PhraseTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/parse_thicket/pattern_structure/PhraseTest.java
@@ -18,13 +18,11 @@
 package opennlp.tools.parse_thicket.pattern_structure;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.LinkedHashSet;
 import java.util.List;
 
 import opennlp.tools.fca.ConceptLattice;
 import opennlp.tools.fca.FcaWriter;
-import opennlp.tools.fca.FormalConcept;
 import opennlp.tools.similarity.apps.BingWebQueryRunner;
 import opennlp.tools.similarity.apps.HitBase;
 import opennlp.tools.textsimilarity.ParseTreeChunk;
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/SearchResultsProcessorTest.java b/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/SearchResultsProcessorTest.java
index 4e3bf71..8d250c2 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/SearchResultsProcessorTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/SearchResultsProcessorTest.java
@@ -18,16 +18,18 @@ package opennlp.tools.similarity.apps;
 
 import java.util.List;
 
-import junit.framework.TestCase;
+import org.junit.Test;
 
-public class SearchResultsProcessorTest extends TestCase {
-  SearchResultsProcessor proc = new SearchResultsProcessor();
+import static org.junit.Assert.assertTrue;
 
+public class SearchResultsProcessorTest {
+  private final SearchResultsProcessor proc = new SearchResultsProcessor();
+
+  @Test
   public void testSearchOrder() {
     List<HitBase> res = proc.runSearch("How can I pay tax on my income abroad");
 
     // we verify that top answers have high similarity score
-    System.out.println(res);
     HitBase first = res.get(0);
     assertTrue(first.getGenerWithQueryScore() > 2.79);
     // assertTrue(first.getTitle().indexOf("Foreign")>-1 &&
@@ -40,11 +42,10 @@ public class SearchResultsProcessorTest extends TestCase {
 
   }
 
+  @Test
   public void testSearchOrder2() {
-    List<HitBase> res = proc
-        .runSearch("Can I estimate what my income tax would be by using my last pay");
+    List<HitBase> res = proc.runSearch("Can I estimate what my income tax would be by using my last pay");
 
-    System.out.println(res);
     HitBase first = res.get(0);
     assertTrue(first.getGenerWithQueryScore() > 1.9);
 
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/SpeechRecognitionResultsProcessorTest.java b/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/SpeechRecognitionResultsProcessorTest.java
index dc59a90..2c8a516 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/SpeechRecognitionResultsProcessorTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/SpeechRecognitionResultsProcessorTest.java
@@ -20,23 +20,26 @@ package opennlp.tools.similarity.apps;
 
 import java.util.Arrays;
 import java.util.List;
-import junit.framework.TestCase;
-import opennlp.tools.similarity.apps.SpeechRecognitionResultsProcessor;
+
+import org.junit.Test;
+
 import opennlp.tools.similarity.apps.SpeechRecognitionResultsProcessor.SentenceMeaningfullnessScore;
 
-public class SpeechRecognitionResultsProcessorTest extends TestCase {
+import static org.junit.Assert.assertTrue;
+
+public class SpeechRecognitionResultsProcessorTest {
 
+  @Test
   public void testRestaurantEntityInSpeechRecognitionResults() {
     SpeechRecognitionResultsProcessor proc = new SpeechRecognitionResultsProcessor();
-    List<SentenceMeaningfullnessScore> res = proc
-        .runSearchAndScoreMeaningfulness(Arrays.asList(new String[] {
+    List<SentenceMeaningfullnessScore> res = proc.runSearchAndScoreMeaningfulness(Arrays.asList(
             "remember to buy milk tomorrow for details",
             "remember to buy milk tomorrow from trader joes",
             "remember to buy milk tomorrow from 3 to jones",
             "remember to buy milk tomorrow for for details",
             "remember to buy milk tomorrow from third to joes",
             "remember to buy milk tomorrow from third to jones",
-            "remember to buy milk tomorrow from for d jones" }));
+            "remember to buy milk tomorrow from for d jones"));
 
     assertTrue(res.get(1).getScore() > res.get(0).getScore()
         && res.get(1).getScore() > res.get(2).getScore()
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/taxo_builder/TaxonomyBuildMatchTest.java b/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/taxo_builder/TaxonomyBuildMatchTest.java
index fd58841..9ea4320 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/taxo_builder/TaxonomyBuildMatchTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/similarity/apps/taxo_builder/TaxonomyBuildMatchTest.java
@@ -16,36 +16,38 @@
  */
 package opennlp.tools.similarity.apps.taxo_builder;
 
-import java.util.List;
+import org.junit.Test;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.assertTrue;
 
-public class TaxonomyBuildMatchTest extends TestCase {
+public class TaxonomyBuildMatchTest {
 
+  @Test
   public void testTaxonomySeedImport() {
     AriAdapter ad = new AriAdapter();
     ad.getChainsFromARIfile("src/test/resources/taxonomies/irs_dom.ari");
-    System.out.println(ad.lemma_AssocWords);
     assertTrue(ad.lemma_AssocWords.size() > 0);
   }
-/*
-  public void testTaxonomyBuild() {
-    TaxonomyExtenderViaMebMining self = new TaxonomyExtenderViaMebMining();
-    self.extendTaxonomy("src/test/resources/taxonomies/irs_dom.ari", "tax",
-        "en");
-    self.close();
-    assertTrue(self.getAssocWords_ExtendedAssocWords().size() > 0);
-  }
-*/
+
+  /*
+    public void testTaxonomyBuild() {
+      TaxonomyExtenderViaMebMining self = new TaxonomyExtenderViaMebMining();
+      self.extendTaxonomy("src/test/resources/taxonomies/irs_dom.ari", "tax",
+          "en");
+      self.close();
+      assertTrue(self.getAssocWords_ExtendedAssocWords().size() > 0);
+    }
+  */
+
+  @Test
   public void testTaxonomyMatch() {
     TaxoQuerySnapshotMatcher matcher = new TaxoQuerySnapshotMatcher(
         "src/test/resources/taxonomies/irs_domTaxo.dat");
-    int score = matcher
-        .getTaxoScore(
+    int score = matcher.getTaxoScore(
             "Can Form 1040 EZ be used to claim the earned income credit.",
-            "Can Form 1040EZ be used to claim the earned income credit? . Must I be entitled to claim a child as a dependent to claim the earned income credit based on the child being ");
+            "Can Form 1040EZ be used to claim the earned income credit? . " +
+                    "Must I be entitled to claim a child as a dependent to claim the earned income credit based on the child being ");
 
-    System.out.println("The score is: " + score);
     assertTrue(score > 3);
     matcher.close();
   }
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/GeneralizationListReducerTest.java b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/GeneralizationListReducerTest.java
index e2a1de3..7cc4ff1 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/GeneralizationListReducerTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/GeneralizationListReducerTest.java
@@ -20,15 +20,23 @@ package opennlp.tools.textsimilarity;
 import java.util.ArrayList;
 import java.util.List;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
 
-public class GeneralizationListReducerTest extends TestCase {
-  private GeneralizationListReducer generalizationListReducer = new GeneralizationListReducer();
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
-  public void notNull() {
+public class GeneralizationListReducerTest {
+  private final GeneralizationListReducer generalizationListReducer = new GeneralizationListReducer();
+
+  @Before
+  public void setup() {
     assertNotNull(generalizationListReducer);
   }
 
+  @Test
   public void test() {
     ParseTreeChunk ch1 = new ParseTreeChunk("VP", new String[] { "run",
         "around", "tigers", "zoo" }, new String[] { "VB", "IN", "NP", "NP" });
@@ -47,7 +55,7 @@ public class GeneralizationListReducerTest extends TestCase {
 
     // [DT-the NN-* VBG-flying NN-car ], [], [], [DT-the NN-* ]]
 
-    List<ParseTreeChunk> inp = new ArrayList<ParseTreeChunk>();
+    List<ParseTreeChunk> inp = new ArrayList<>();
     inp.add(ch1);
     inp.add(ch2);
     inp.add(ch5);
@@ -68,12 +76,9 @@ public class GeneralizationListReducerTest extends TestCase {
     assertFalse(ch5.isASubChunk(ch3));
     assertFalse(ch3.isASubChunk(ch5));
 
-    List<ParseTreeChunk> res = generalizationListReducer
-        .applyFilteringBySubsumption(inp);
-    assertEquals(
-        res.toString(),
+    List<ParseTreeChunk> res = generalizationListReducer.applyFilteringBySubsumption(inp);
+    assertEquals(res.toString(),
         "[VP [VB-run IN-around NP-tigers NP-zoo ], NP [DT-the NP-tigers ], NP [DT-the NN-* VBG-flying NN-car ]]");
-    System.out.println(res);
 
   }
 }
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/LemmaFormManagerTest.java b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/LemmaFormManagerTest.java
index dda18b2..9fb2c1e 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/LemmaFormManagerTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/LemmaFormManagerTest.java
@@ -17,33 +17,33 @@
 
 package opennlp.tools.textsimilarity;
 
-import static junit.framework.Assert.assertNotNull;
-import junit.framework.TestCase;
-
+import org.junit.Before;
 import org.junit.Test;
-import org.junit.runner.RunWith;
 
-public class LemmaFormManagerTest extends TestCase {
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+public class LemmaFormManagerTest {
 
-  private LemmaFormManager lemmaFormManager = new LemmaFormManager();
+  private final LemmaFormManager lemmaFormManager = new LemmaFormManager();
 
-  public void testNotNull() {
+  @Before
+  public void setup() {
     assertNotNull(lemmaFormManager);
   }
 
+  @Test
   public void testMatches() {
     assertEquals(lemmaFormManager.matchLemmas(null, "loud", "loudness", "NN"),
         "loud");
-    assertEquals(lemmaFormManager.matchLemmas(null, "24", "12", "CD"), null);
+    assertNull(lemmaFormManager.matchLemmas(null, "24", "12", "CD"));
     assertEquals(lemmaFormManager.matchLemmas(null, "loud", "loudly", "NN"),
         "loud");
-    assertEquals(
-        lemmaFormManager.matchLemmas(null, "!upgrade", "upgrade", "NN"),
+    assertEquals(lemmaFormManager.matchLemmas(null, "!upgrade", "upgrade", "NN"),
         "!upgrade");
-    assertEquals(
-        lemmaFormManager.matchLemmas(null, "!upgrade", "upgrades", "NN"), null);
-    assertEquals(lemmaFormManager.matchLemmas(null, "!upgrade", "get", "NN"),
-        null);
+    assertNull(lemmaFormManager.matchLemmas(null, "!upgrade", "upgrades", "NN"));
+    assertNull(lemmaFormManager.matchLemmas(null, "!upgrade", "get", "NN"));
   }
 
 }
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/ParseTreeChunkListScorerTest.java b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/ParseTreeChunkListScorerTest.java
index fd7961e..8b1cb37 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/ParseTreeChunkListScorerTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/ParseTreeChunkListScorerTest.java
@@ -19,16 +19,15 @@ package opennlp.tools.textsimilarity;
 
 import java.util.List;
 
-import opennlp.tools.parser.Parse;
-
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import junit.framework.TestCase;
 
-public class ParseTreeChunkListScorerTest extends TestCase {
-  private ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
-  private ParseTreeChunk parseTreeChunk = new ParseTreeChunk();
+import static org.junit.Assert.assertTrue;
+
+public class ParseTreeChunkListScorerTest {
+  private final ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
+  private final ParseTreeChunk parseTreeChunk = new ParseTreeChunk();
 
+  @Test
   public void test() {
     List<List<ParseTreeChunk>> chs = parseTreeChunk
         .obtainParseTreeChunkListByParsingList("[[ [NN-* IN-in NP-israel ],  [NP-* IN-in NP-israel ],  [NP-* IN-* TO-* NN-* ],  [NN-visa IN-* NN-* IN-in ]],"
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/ParseTreeChunkTest.java b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/ParseTreeChunkTest.java
index bc39669..9294e20 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/ParseTreeChunkTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/ParseTreeChunkTest.java
@@ -17,29 +17,25 @@
 
 package opennlp.tools.textsimilarity;
 
-import static org.junit.Assert.*;
+import org.junit.Test;
 
 import java.util.List;
 
-import junit.framework.TestCase;
-
-import org.junit.Test;
-import org.junit.runner.RunWith;
+import static org.junit.Assert.*;
 
-public class ParseTreeChunkTest extends TestCase {
-  private ParseTreeMatcherDeterministic parseTreeMatcher = new ParseTreeMatcherDeterministic();
-  private ParseTreeChunk parseTreeChunk = new ParseTreeChunk();
-  private ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
+public class ParseTreeChunkTest {
+  private final ParseTreeMatcherDeterministic parseTreeMatcher = new ParseTreeMatcherDeterministic();
+  private final ParseTreeChunk parseTreeChunk = new ParseTreeChunk();
+  private final ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
 
+  @Test
   public void test() {
     ParseTreeChunk ch1, ch2;
     List<List<ParseTreeChunk>> chRes;
 
-    ch1 = parseTreeChunk
-        .obtainParseTreeChunkListByParsingList(
+    ch1 = parseTreeChunk.obtainParseTreeChunkListByParsingList(
             "[[ [NN-* IN-in NP-israel ],  [NP-* IN-in NP-israel ],  [NP-* IN-* TO-* NN-* ],  [NN-visa IN-* NN-* IN-in ]], [ [VB-get NN-visa IN-* NN-* IN-in .-* ],  [VBD-* IN-* NN-* NN-* .-* ],  [VB-* NP-* ]]]")
         .get(0).get(0);
-    ;
 
     // NP [JJ-great JJ-unsecured NN-loan NNS-deals ]
     // NP [JJ-great NN-pizza NNS-deals ]
@@ -47,18 +43,16 @@ public class ParseTreeChunkTest extends TestCase {
         "deals" }, new String[] { "JJ", "JJ", "NN", "NNS" });
     ch2 = new ParseTreeChunk("NP", new String[] { "great", "pizza", "deals" },
         new String[] { "JJ", "NN", "NNS" });
-    assertEquals(
-        parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2)
-            .toString(), "[ [JJ-great NNS-deals ]]");
+    assertEquals(parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2).toString(),
+            "[ [JJ-great NNS-deals ]]");
 
     ch1 = new ParseTreeChunk("NP", new String[] { "great", "unsecured", "loan",
         "of", "jambo" }, new String[] { "JJ", "JJ", "NN", "IN", "NN" });
 
     ch2 = new ParseTreeChunk("NP", new String[] { "great", "jambo", "loan" },
         new String[] { "JJ", "NN", "NN" });
-    assertEquals(
-        parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2)
-            .toString(), "[ [JJ-great NN-loan ],  [NN-jambo ]]");
+    assertEquals(parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2).toString(),
+            "[ [JJ-great NN-loan ],  [NN-jambo ]]");
 
     ch1 = new ParseTreeChunk("NP", new String[] { "I", "love", "to", "run",
         "around", "zoo", "with", "tigers" }, new String[] { "NP", "VBP", "TO",
@@ -67,9 +61,8 @@ public class ParseTreeChunkTest extends TestCase {
     ch2 = new ParseTreeChunk("NP", new String[] { "I", "like", "it", "because",
         "it", "is", "loud" }, new String[] { "NP", "IN", "NP", "IN", "NP",
         "VBZ", "ADJP" });
-    assertEquals(
-        parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2)
-            .toString(), "[ [NP-i ]]");
+    assertEquals(parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2).toString(),
+            "[ [NP-i ]]");
 
     ch1 = new ParseTreeChunk("NP", new String[] { "love", "to", "run",
         "around", "zoo", "with", "tigers" }, new String[] { "VBP", "TO", "VB",
@@ -77,9 +70,7 @@ public class ParseTreeChunkTest extends TestCase {
 
     ch2 = new ParseTreeChunk("VP", new String[] { "run", "to", "the", "tiger",
         "zoo" }, new String[] { "VBP", "TO", "DT", "NN", "NN" });
-    assertEquals(
-        parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2)
-            .toString(),
+    assertEquals(parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2).toString(),
         "[ [VBP-* TO-to ],  [VB-run IN-* NP-zoo ],  [NP-tigers ]]");
 
     ch1 = new ParseTreeChunk("VP", new String[] { "love", "to", "run",
@@ -87,30 +78,24 @@ public class ParseTreeChunkTest extends TestCase {
         "NP", "NP" });
     ch2 = new ParseTreeChunk("VP", new String[] { "run", "to", "the", "tiger",
         "zoo" }, new String[] { "VBP", "TO", "DT", "NN", "NN" });
-    assertEquals(
-        parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2)
-            .toString(), "[ [VBP-* TO-to ],  [VB-run IN-* NP-tigers NP-zoo ]]");
+    assertEquals(parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2).toString(),
+            "[ [VBP-* TO-to ],  [VB-run IN-* NP-tigers NP-zoo ]]");
     ch1 = new ParseTreeChunk("VP", new String[] { "run", "around", "tigers",
         "zoo" }, new String[] { "VB", "IN", "NP", "NP" });
 
     ch2 = new ParseTreeChunk("NP", new String[] { "run", "to", "the", "tiger",
         "zoo" }, new String[] { "VBP", "TO", "DT", "NN", "NN" });
 
-    assertEquals(
-        parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2)
-            .toString(), "[ [VB-run IN-* NP-tigers NP-zoo ]]");
-
-    List<List<ParseTreeChunk>> lch1 = parseTreeChunk
-        .obtainParseTreeChunkListByParsingList("[[[DT-all NNS-children WHNP-who VBP-are CD-four NNS-years JJ-old IN-on CC-or IN-before NP-September ]]]");
-    List<List<ParseTreeChunk>> lch2 = parseTreeChunk
-        .obtainParseTreeChunkListByParsingList("[[[NP-Children CD-four NNS-years JJ-old ]]]");
-
-    chRes = parseTreeMatcher.matchTwoSentencesGroupedChunksDeterministic(lch1,
-        lch2);
-    System.out.println("generalization result = " + chRes + " score  ="
-        + parseTreeChunkListScorer.getParseTreeChunkListScore(chRes));
-    assertEquals(chRes.toString(),
-        "[[ [NNS-children CD-four NNS-years JJ-old ]]]");
+    assertEquals(parseTreeMatcher.generalizeTwoGroupedPhrasesDeterministic(ch1, ch2).toString(),
+            "[ [VB-run IN-* NP-tigers NP-zoo ]]");
+
+    List<List<ParseTreeChunk>> lch1 = parseTreeChunk.obtainParseTreeChunkListByParsingList(
+            "[[[DT-all NNS-children WHNP-who VBP-are CD-four NNS-years JJ-old IN-on CC-or IN-before NP-September ]]]");
+    List<List<ParseTreeChunk>> lch2 = parseTreeChunk.obtainParseTreeChunkListByParsingList(
+            "[[[NP-Children CD-four NNS-years JJ-old ]]]");
+
+    chRes = parseTreeMatcher.matchTwoSentencesGroupedChunksDeterministic(lch1, lch2);
+    assertEquals(chRes.toString(), "[[ [NNS-children CD-four NNS-years JJ-old ]]]");
     assertTrue(parseTreeChunkListScorer.getParseTreeChunkListScore(chRes) > 3);
 
   }
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/SyntMatcherTest.java b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/SyntMatcherTest.java
index 8d64950..ae503d9 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/SyntMatcherTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/SyntMatcherTest.java
@@ -18,33 +18,39 @@
 
 package opennlp.tools.textsimilarity;
 
-import static org.junit.Assert.*;
-import static org.junit.Assert.assertNotNull;
-
 import java.util.List;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
 
 import opennlp.tools.textsimilarity.chunker2matcher.ParserChunker2MatcherProcessor;
 
-import org.junit.Test;
-import org.junit.runner.RunWith;
+import static org.junit.Assert.*;
+import static org.junit.Assert.assertNotNull;
 
-public class SyntMatcherTest extends TestCase {
+public class SyntMatcherTest {
 
+  private final ParseTreeChunk parseTreeChunk = new ParseTreeChunk();
+  
   private ParserChunker2MatcherProcessor parserChunker2Matcher;
 
-  private ParseTreeChunk parseTreeChunk = new ParseTreeChunk();
-
-  public void notNullTest() {
+  @Before
+  public void setup() {
     parserChunker2Matcher = ParserChunker2MatcherProcessor.getInstance();
     assertNotNull(parserChunker2Matcher);
   }
 
+  @After
+  public void cleanUp() {
+    if (parserChunker2Matcher != null) {
+      parserChunker2Matcher.close();
+    }
+  }
+
+  @Test
   public void testMatch() {
-    parserChunker2Matcher = ParserChunker2MatcherProcessor.getInstance();
-    List<List<ParseTreeChunk>> matchResult = parserChunker2Matcher
-        .assessRelevance(
+    List<List<ParseTreeChunk>> matchResult = parserChunker2Matcher.assessRelevance(
             // "Can I get auto focus lens for digital camera",
             // "How can I get short focus zoom lens for digital camera"
             "Pulitzer Prize-Winning Reporter is an Illegal Immigrant",
@@ -53,74 +59,60 @@ public class SyntMatcherTest extends TestCase {
                 + "for his coverage of the Virginia Tech shootings in the Washington Post")
         .getMatchResult();
 
-    System.out.println(matchResult);
     assertEquals(
         "[[ [NNP-pulitzer NNP-prize NNP-winning NNP-reporter ],  [NN-immigrant ]], []]",
         matchResult.toString());
-    System.out.println(parseTreeChunk.listToString(matchResult));
     assertEquals(
         " np [ [NNP-pulitzer NNP-prize NNP-winning NNP-reporter ],  [NN-immigrant ]]",
         parseTreeChunk.listToString(matchResult));
 
-    matchResult = parserChunker2Matcher
-        .assessRelevance(
+    matchResult = parserChunker2Matcher.assessRelevance(
             "Sounds too good to be true but it actually is, the world's first flying car is finally here. ",
             "While it may seem like something straight out of a sci-fi "
                 + "movie, the  flying  car  might soon become a reality. ")
         .getMatchResult();
 
     // TODO: possibly problem in new POS tagger from Parser
-    System.out.println(matchResult);
     // was "[[ [DT-the NN-* VBG-flying NN-car ]], []]"
     assertEquals(
         "[[ [PRP-it ],  [DT-the NN-* NNS-* ]], [ [DT-the NN-* NNS-* ]]]",
         matchResult.toString());
-    System.out.println(parseTreeChunk.listToString(matchResult));
     assertEquals(
         " np [ [PRP-it ],  [DT-the NN-* NNS-* ]] vp [ [DT-the NN-* NNS-* ]]",
         parseTreeChunk.listToString(matchResult));
-
-    parserChunker2Matcher.close();
-
   }
 
+  @Test
   public void testMatchDigitalCamera() {
-    parserChunker2Matcher = ParserChunker2MatcherProcessor.getInstance();
-    List<List<ParseTreeChunk>> matchResult = parserChunker2Matcher
-        .assessRelevance(
+    List<List<ParseTreeChunk>> matchResult = parserChunker2Matcher.assessRelevance(
             "I am curious how to use the digital zoom of this camera for filming insects",
             "How can I get short focus zoom lens for digital camera")
         .getMatchResult();
 
-    System.out.println(matchResult);
     assertEquals(
         "[[ [PRP-i ],  [NN-zoom NN-camera ],  [JJ-digital NN-* ],  [NN-* IN-for ]], [ [JJ-digital NN-* ],  [NN-zoom NN-camera ],  [NN-* IN-for ]]]",
         matchResult.toString());
-    System.out.println(parseTreeChunk.listToString(matchResult));
     assertEquals(
         " np [ [PRP-i ],  [NN-zoom NN-camera ],  [JJ-digital NN-* ],  [NN-* IN-for ]] vp [ [JJ-digital NN-* ],  [NN-zoom NN-camera ],  [NN-* IN-for ]]",
         parseTreeChunk.listToString(matchResult));
-    parserChunker2Matcher.close();
   }
 
+  @Test
   public void testHighSimilarity() {
-    parserChunker2Matcher = ParserChunker2MatcherProcessor.getInstance();
-    List<List<ParseTreeChunk>> matchResult = parserChunker2Matcher
-        .assessRelevance("Can I get auto focus lens for digital camera",
+    List<List<ParseTreeChunk>> matchResult = parserChunker2Matcher.assessRelevance(
+            "Can I get auto focus lens for digital camera",
             "How can I get short focus zoom lens for digital camera")
         .getMatchResult();
 
-    System.out.println(matchResult);
     assertEquals(
         "[[ [PRP-i ],  [NN-focus NNS-* NNS-lens IN-for JJ-digital NN-camera ]], [ [VB-get NN-focus NNS-* NNS-lens IN-for JJ-digital NN-camera ]]]",
         matchResult.toString());
-    System.out.println(parseTreeChunk.listToString(matchResult));
     assertEquals(
         " np [ [PRP-i ],  [NN-focus NNS-* NNS-lens IN-for JJ-digital NN-camera ]] vp [ [VB-get NN-focus NNS-* NNS-lens IN-for JJ-digital NN-camera ]]",
         parseTreeChunk.listToString(matchResult));
-    parserChunker2Matcher.close();
   }
 
+  @Test
   public void testZClose() {
     ParserChunker2MatcherProcessor.getInstance().close();
   }
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessorTest.java b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessorTest.java
index 5ea49fc..ef5e7aa 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessorTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessorTest.java
@@ -19,66 +19,77 @@ package opennlp.tools.textsimilarity.chunker2matcher;
 
 import java.util.List;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
 
 import opennlp.tools.textsimilarity.ParseTreeChunk;
 import opennlp.tools.textsimilarity.ParseTreeChunkListScorer;
 import opennlp.tools.textsimilarity.TextSimilarityBagOfWords;
 
-public class ParserChunker2MatcherProcessorTest extends TestCase {
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+public class ParserChunker2MatcherProcessorTest {
+  private final TextSimilarityBagOfWords parserBOW = new TextSimilarityBagOfWords();
+  private final ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
+  
   private ParserChunker2MatcherProcessor parser;
-  private TextSimilarityBagOfWords parserBOW = new TextSimilarityBagOfWords();
-  private ParseTreeChunkListScorer parseTreeChunkListScorer = new ParseTreeChunkListScorer();
 
-  public void testGroupedPhrasesFormer() {
+  @Before
+  public void setup() {
     parser = ParserChunker2MatcherProcessor.getInstance();
+    assertNotNull(parser);
+  }
+
+  @After
+  public void cleanUp() {
+    if (parser != null) {
+      parser.close();
+    }
+  }
+
+  @Test
+  public void testGroupedPhrasesFormer() {
     String text = "Where do I apply? Go to your town office or city hall. If your town doesn't have an office, ask the town clerk or a Selectman. Tell them that you need a 1040 tax form . I Can 't Pay the Taxes on my House: What Can I Do?. Pine Tree Legal";
 
-    List<List<ParseTreeChunk>> res = parser
-        .formGroupedPhrasesFromChunksForPara(text);
-    System.out.println(res);
+    List<List<ParseTreeChunk>> res = parser.formGroupedPhrasesFromChunksForPara(text);
     assertEquals(
         "[[NP [PRP$-your NN-town NN-office CC-or NN-city NN-hall ], NP [PRP$-your NN-town NN-doesn NN-t ], NP [DT-an NN-office ], NP [DT-the NN-town NN-clerk CC-or DT-a NNP-Selectman ], NP [DT-a NNP-Selectman ], NP [PRP-them IN-that PRP-you ], NP [PRP-you ], NP [DT-a CD-1040 NN-tax NN-form ], NP [PRP-I ], NP [DT-the NNS-Taxes IN-on PRP$-my NNP-House WP-What MD-Can PRP-I ], NP [PRP$-my NNP-House WP-What MD-Can PRP-I ], NP [WP-What MD-Can PRP-I ], NP [PRP-I ], NP [NNP-Pine NNP-Tree NNP-Leg [...]
         // "[[NP [PRP$-your NN-town NN-office CC-or NN-city NN-hall ], NP [PRP$-your NN-town NN-doesn NN-t ], NP [DT-an NN-office ], NP [DT-the NN-town NN-clerk CC-or DT-a NNP-Selectman ], NP [DT-a NNP-Selectman ], NP [PRP-them IN-that PRP-you ], NP [PRP-you ], NP [DT-a CD-1040 NN-tax NN-form ], NP [PRP-I ], NP [DT-the NNS-Taxes IN-on PRP$-my NNP-House WP-What MD-Can PRP-I ], NP [PRP$-my NNP-House WP-What MD-Can PRP-I ], NP [WP-What MD-Can PRP-I ], NP [PRP-I ], NP [NNP-Pine NNP-Tree NNP- [...]
         res.toString());
 
-    res = parser
-        .formGroupedPhrasesFromChunksForSentence("How can I get short focus zoom lens for digital camera");
+    res = parser.formGroupedPhrasesFromChunksForSentence("How can I get short focus zoom lens for digital camera");
     assertEquals(
         "[[NP [PRP-I ], NP [JJ-short NN-focus NN-zoom NN-lens IN-for JJ-digital NN-camera ], NP [JJ-digital NN-camera ]], [VP [VB-get JJ-short NN-focus NN-zoom NN-lens IN-for JJ-digital NN-camera ]], [PP [IN-for JJ-digital NN-camera ]], [], [SENTENCE [WRB-How MD-can PRP-I VB-get JJ-short NN-focus NN-zoom NN-lens IN-for JJ-digital NN-camera ]]]",
         res.toString());
 
-    res = parser
-        .formGroupedPhrasesFromChunksForSentence("Its classy design and the Mercedes name make it a very cool vehicle to drive. ");
+    res = parser.formGroupedPhrasesFromChunksForSentence("Its classy design and the Mercedes name make it a very cool vehicle to drive. ");
     assertEquals(
         "[[NP [PRP$-Its JJ-classy NN-design CC-and DT-the NNP-Mercedes NN-name ], NP [DT-the NNP-Mercedes NN-name ], NP [PRP-it DT-a RB-very JJ-cool NN-vehicle TO-to NN-drive ], NP [DT-a RB-very JJ-cool NN-vehicle TO-to NN-drive ], NP [NN-drive ]], [VP [VBP-make PRP-it DT-a RB-very JJ-cool NN-vehicle TO-to NN-drive ]], [PP [TO-to NN-drive ]], [], [SENTENCE [PRP$-Its JJ-classy NN-design CC-and DT-the NNP-Mercedes NN-name VBP-make PRP-it DT-a RB-very JJ-cool NN-vehicle TO-to NN-drive ]]]",
         res.toString());
-    res = parser
-        .formGroupedPhrasesFromChunksForSentence("Sounds too good to be true but it actually is, the world's first flying car is finally here. ");
+    res = parser.formGroupedPhrasesFromChunksForSentence("Sounds too good to be true but it actually is, the world's first flying car is finally here. ");
     assertEquals(
         "[[NP [PRP-it RB-actually ], NP [DT-the NN-world NNS-s JJ-first NN-flying NN-car ]], [VP [VBZ-Sounds RB-too JJ-good ], VP [TO-to VB-be JJ-true CC-but PRP-it RB-actually ], VP [VBZ-is DT-the NN-world NNS-s JJ-first NN-flying NN-car ], VP [VBZ-is RB-finally RB-here ]], [], [ADJP [RB-too JJ-good ], ADJP [JJ-true CC-but PRP-it RB-actually ]], [SENTENCE [VBZ-Sounds RB-too JJ-good TO-to VB-be JJ-true CC-but PRP-it RB-actually VBZ-is DT-the NN-world NNS-s JJ-first NN-flying NN-car VBZ-i [...]
         res.toString());
-    res = parser
-        .formGroupedPhrasesFromChunksForSentence("UN Ambassador Ron Prosor repeated the Israeli position that the only way the Palestinians will get UN membership and statehood is through direct negotiations with the Israelis on a comprehensive peace agreement");
+    res = parser.formGroupedPhrasesFromChunksForSentence("UN Ambassador Ron Prosor repeated the Israeli position that the only way the Palestinians will get UN membership and statehood is through direct negotiations with the Israelis on a comprehensive peace agreement");
     assertEquals(
-        "[[NP [NNP-UN NNP-Ambassador NNP-Ron NNP-Prosor ], NP [DT-the JJ-Israeli NN-position IN-that DT-the JJ-only NN-way DT-the NNPS-Palestinians ], NP [DT-the JJ-only NN-way DT-the NNPS-Palestinians ], NP [DT-the NNPS-Palestinians ], NP [NN-membership CC-and NN-statehood VBZ-is IN-through JJ-direct NNS-negotiations IN-with DT-the NNP-Israelis IN-on DT-a JJ-comprehensive NN-peace NN-agreement ], NP [JJ-direct NNS-negotiations IN-with DT-the NNP-Israelis IN-on DT-a JJ-comprehensive NN-p [...]
+            "[[NP [NNP-UN NNP-Ambassador NNP-Ron NNP-Prosor ], NP [DT-the JJ-Israeli NN-position IN-that DT-the JJ-only NN-way DT-the NNPS-Palestinians ], NP [DT-the JJ-only NN-way DT-the NNPS-Palestinians ], NP [DT-the NNPS-Palestinians ], NP [NN-membership CC-and NN-statehood VBZ-is IN-through JJ-direct NNS-negotiations IN-with DT-the NNP-Israelis IN-on DT-a JJ-comprehensive NN-peace NN-agreement ], NP [JJ-direct NNS-negotiations IN-with DT-the NNP-Israelis IN-on DT-a JJ-comprehensive  [...]
         res.toString());
-    parser.close();
   }
 
+  @Test
   public void testPrintParseTree() {
-    parser = ParserChunker2MatcherProcessor.getInstance();
     try {
-      parser
-          .printParseTree("How can I get short focus zoom lens for digital camera");
+      parser.printParseTree("How can I get short focus zoom lens for digital camera");
     } catch (Exception e) {
       // when models does not read
     }
-    parser.close();
   }
 
+  @Test
   public void testRelevanceAssessm() {
-    parser = ParserChunker2MatcherProcessor.getInstance();
     String phrase1 = "Its classy design and the Mercedes name make it a very cool vehicle to drive. "
         + "The engine makes it a powerful car. "
         + "The strong engine gives it enough power. "
@@ -87,28 +98,22 @@ public class ParserChunker2MatcherProcessorTest extends TestCase {
         + "This car has an amazingly good engine. "
         + "This car provides you a very good mileage.";
 
-    System.out.println(parser.assessRelevance(phrase1, phrase2)
-        .getMatchResult());
-    parser.close();
+    System.out.println(parser.assessRelevance(phrase1, phrase2).getMatchResult());
 
   }
 
+  @Test
   public void testCompareRelevanceAssessmWithBagOfWords() {
-    parser = ParserChunker2MatcherProcessor.getInstance();
     // we first demonstrate how similarity expression for DIFFERENT cases have
     // too high score for bagOfWords
     String phrase1 = "How to deduct rental expense from income ";
     String phrase2 = "How to deduct repair expense from rental income.";
-    List<List<ParseTreeChunk>> matchResult = parser.assessRelevance(phrase1,
-        phrase2).getMatchResult();
+    List<List<ParseTreeChunk>> matchResult = parser.assessRelevance(phrase1, phrase2).getMatchResult();
     assertEquals(      
         "[[ [NN-expense IN-from NN-income ],  [JJ-rental NN-* ]], [ [TO-to VB-deduct JJ-rental NN-* ],  [VB-deduct NN-expense IN-from NN-income ]]]", 
         matchResult.toString());
-    System.out.println(matchResult);
-    double matchScore = parseTreeChunkListScorer
-        .getParseTreeChunkListScore(matchResult);
-    double bagOfWordsScore = parserBOW.assessRelevanceAndGetScore(phrase1,
-        phrase2);
+    double matchScore = parseTreeChunkListScorer.getParseTreeChunkListScore(matchResult);
+    double bagOfWordsScore = parserBOW.assessRelevanceAndGetScore(phrase1, phrase2);
     assertTrue(matchScore + 2 < bagOfWordsScore);
     System.out.println("MatchScore is adequate ( = " + matchScore
         + ") and bagOfWordsScore = " + bagOfWordsScore + " is too high");
@@ -121,14 +126,10 @@ public class ParserChunker2MatcherProcessorTest extends TestCase {
     assertEquals(
         "[[ [JJ-* NN-expense IN-for PRP$-my NN-* ]], [ [TO-to VB-* JJ-* NN-expense IN-for PRP$-my NN-* ]]]", 
         matchResult.toString());
-    System.out.println(matchResult);
-    matchScore = parseTreeChunkListScorer
-        .getParseTreeChunkListScore(matchResult);
+    matchScore = parseTreeChunkListScorer.getParseTreeChunkListScore(matchResult);
     bagOfWordsScore = parserBOW.assessRelevanceAndGetScore(phrase1, phrase2);
     assertTrue(matchScore > 2 * bagOfWordsScore);
-    System.out.println("MatchScore is adequate ( = " + matchScore
-        + ") and bagOfWordsScore = " + bagOfWordsScore + " is too low");
-    parser.close();
+    System.out.println("MatchScore is adequate ( = " + matchScore + ") and bagOfWordsScore = " + bagOfWordsScore + " is too low");
 
   }
 }
diff --git a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/chunker2matcher/PhraseNodeTest.java b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/chunker2matcher/PhraseNodeTest.java
index 62b2cf8..66b3756 100644
--- a/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/chunker2matcher/PhraseNodeTest.java
+++ b/opennlp-similarity/src/test/java/opennlp/tools/textsimilarity/chunker2matcher/PhraseNodeTest.java
@@ -19,12 +19,23 @@ package opennlp.tools.textsimilarity.chunker2matcher;
 
 import java.util.List;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Test;
 
-public class PhraseNodeTest extends TestCase {
-  ParserChunker2MatcherProcessor proc = ParserChunker2MatcherProcessor
-      .getInstance();
+import static org.junit.Assert.assertEquals;
 
+public class PhraseNodeTest {
+  
+  private final ParserChunker2MatcherProcessor proc = ParserChunker2MatcherProcessor.getInstance();
+
+  @After
+  public void cleanUp() {
+    if (proc != null) {
+      proc.close();
+    }
+  }
+
+  @Test
   public void testPOSTagsExtraction() {
 
     SentenceNode node = proc.parseSentenceNode("How can I get there");
diff --git a/opennlp-similarity/src/test/resources/models/en-sent.bin b/opennlp-similarity/src/test/resources/models/en-sent.bin
new file mode 100644
index 0000000..e89076b
Binary files /dev/null and b/opennlp-similarity/src/test/resources/models/en-sent.bin differ