This is an automated email from the ASF dual-hosted git repository.

jzemerick pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/opennlp-sandbox.git


The following commit(s) were added to refs/heads/master by this push:
     new 305b5ff  improves resource management by using try-with-resources 
pattern more consequently (#94)
305b5ff is described below

commit 305b5ff3f02bad7646ffbcda70e3da565aba152b
Author: Martin Wiesner <[email protected]>
AuthorDate: Fri Apr 7 18:16:08 2023 +0200

    improves resource management by using try-with-resources pattern more 
consequently (#94)
    
    fixes several compiler warnings
    cures several deprecation notices
    applies generics where useful (and obvious)
    removes redundant casts
    improves code formatting
---
 .../caseditor/SearchCorpusServerJob.java           |   2 +-
 .../connector/CSQueueCollectionReader.java         |   9 +-
 .../corpus_server/impl/DerbyCorporaStore.java      |  30 ++--
 .../corpus_server/impl/DerbyCorpusStore.java       | 117 ++++---------
 .../corpus_server/impl/LuceneSearchService.java    |  15 +-
 .../opennlp/corpus_server/tools/CorpusBackup.java  |  17 +-
 .../opennlp/corpus_server/tools/FileUtil.java      |  14 +-
 .../mahout/AbstractOnlineLearnerTrainer.java       |   6 +-
 .../addons/modelbuilder/KnownEntityProvider.java   |   4 +-
 .../modelbuilder/ModelGenerationValidator.java     |   4 +-
 .../addons/modelbuilder/ModelParameter.java        |   2 +-
 .../opennlp/addons/modelbuilder/Modelable.java     |   3 +-
 .../impls/FileKnownEntityProvider.java             |  14 +-
 .../modelbuilder/impls/FileModelValidatorImpl.java |  14 +-
 .../modelbuilder/impls/FileSentenceProvider.java   |  16 +-
 .../resolver/DefaultNonReferentialResolver.java    |   8 +-
 .../tools/coref/resolver/MaxentResolver.java       |   8 +-
 .../java/opennlp/tools/coref/sim/GenderModel.java  |   8 +-
 .../opennlp/tools/coref/sim/SimilarityModel.java   |   8 +-
 .../apps/relevanceVocabs/PhraseProcessor.java      | 182 ++++++++++-----------
 .../apps/relevanceVocabs/SynonymListFilter.java    |  21 +--
 .../review_builder/FBOpenGraphSearchManager.java   |  33 ++--
 .../ClassifierTrainingSetIndexer.java              |  20 +--
 .../tools/jsmlearning/ProfileReaderWriter.java     |  53 +-----
 .../pattern_structure/PatternStructureWriter.java  |  16 +-
 .../apps/solr/IterativeSearchRequestHandler.java   |   4 +-
 .../tools/similarity/apps/solr/WordDocBuilder.java |  45 +++--
 .../apps/taxo_builder/TaxonomySerializer.java      |  87 +++-------
 .../chunker2matcher/ParserCacheSerializer.java     |   4 +-
 .../ParserChunker2MatcherProcessor.java            |  17 +-
 .../disambiguator/DisambiguatorEvaluatorTool.java  |  26 +--
 .../opennlp/tools/disambiguator/WSDEvaluator.java  |   3 +-
 .../java/opennlp/tools/disambiguator/WSDModel.java |   2 +
 .../opennlp/tools/disambiguator/WSDSample.java     |   9 +
 .../main/java/opennlp/summarization/Sentence.java  |  20 ++-
 .../lexicalchaining/LexicalChain.java              |  15 +-
 .../wikinews_importer/AnnotatingMarkupParser.java  |   6 +-
 37 files changed, 314 insertions(+), 548 deletions(-)

diff --git 
a/caseditor-corpus-server-plugin/src/main/java/org/apache/opennlp/corpus_server/caseditor/SearchCorpusServerJob.java
 
b/caseditor-corpus-server-plugin/src/main/java/org/apache/opennlp/corpus_server/caseditor/SearchCorpusServerJob.java
index 51dfb37..8dda9aa 100644
--- 
a/caseditor-corpus-server-plugin/src/main/java/org/apache/opennlp/corpus_server/caseditor/SearchCorpusServerJob.java
+++ 
b/caseditor-corpus-server-plugin/src/main/java/org/apache/opennlp/corpus_server/caseditor/SearchCorpusServerJob.java
@@ -70,7 +70,7 @@ public class SearchCorpusServerJob extends Job {
       return new Status(IStatus.WARNING, CorpusServerPlugin.PLUGIN_ID, "Failed 
to connect to server!");
     }
     
-    if (response.getClientResponseStatus().getStatusCode() != 200) {
+    if (response.getStatusInfo().getStatusCode() != 200) {
       return new Status(IStatus.WARNING, CorpusServerPlugin.PLUGIN_ID, "Failed 
to retrieve results from server!");
     }
     
diff --git 
a/corpus-server/corpus-server-connector/src/main/java/org/apache/opennlp/corpus_server/connector/CSQueueCollectionReader.java
 
b/corpus-server/corpus-server-connector/src/main/java/org/apache/opennlp/corpus_server/connector/CSQueueCollectionReader.java
index fcded8c..ab6ef0c 100644
--- 
a/corpus-server/corpus-server-connector/src/main/java/org/apache/opennlp/corpus_server/connector/CSQueueCollectionReader.java
+++ 
b/corpus-server/corpus-server-connector/src/main/java/org/apache/opennlp/corpus_server/connector/CSQueueCollectionReader.java
@@ -167,21 +167,16 @@ public class CSQueueCollectionReader extends 
CollectionReader_ImplBase {
         .header("Content-Type", MediaType.TEXT_XML)
         .get(ClientResponse.class);
     
-    InputStream casIn = casResponse.getEntityInputStream();
-    
-    try {
+
+    try (InputStream casIn = casResponse.getEntityInputStream()) {
       UimaUtil.deserializeXmiCAS(cas, casIn);
     }
     catch (IOException e) {
       if (logger.isLoggable(Level.SEVERE)) {
         logger.log(Level.SEVERE, "Failed to load CAS: " +  casId + " code: " + 
casResponse.getStatus());
       }
-      
       throw e;
     }
-    finally {
-      casIn.close();
-    }
     
     if (idType != null && idFeature != null) {
       FeatureStructure idFS = cas.createFS(idType);
diff --git 
a/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/DerbyCorporaStore.java
 
b/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/DerbyCorporaStore.java
index 88ed6f9..45d8f6e 100644
--- 
a/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/DerbyCorporaStore.java
+++ 
b/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/DerbyCorporaStore.java
@@ -71,9 +71,10 @@ public class DerbyCorporaStore extends AbstractCorporaStore {
 
   private void readTable(Set<String> set, DatabaseMetaData md, String 
searchCriteria, String schema)
           throws SQLException {
-    ResultSet rs = md.getTables(null, schema, null, new String[]{ 
searchCriteria });
-    while (rs.next()) {
-      set.add(rs.getString("TABLE_NAME").toLowerCase());
+    try (ResultSet rs = md.getTables(null, schema, null, new String[]{ 
searchCriteria })) {
+      while (rs.next()) {
+        set.add(rs.getString("TABLE_NAME").toLowerCase());
+      }
     }
   }
 
@@ -117,29 +118,26 @@ public class DerbyCorporaStore extends 
AbstractCorporaStore {
       }
 
     } catch (SQLException e) {
-      
       if (LOGGER.isLoggable(Level.SEVERE)) {
         LOGGER.log(Level.SEVERE, "Failed to create corpus: " + corpusName, e);
       }
-      
       throw new IOException(e);
     }
     
     LOGGER.info("Created new corpus: " + corpusName);
-    
-    
+
     for (CorporaChangeListener listener : getListeners()) {
       // TODO: Maybe optimize this, or just pass the corpus id
       listener.addedCorpus(getCorpus(corpusName));
     }
   }
 
+  @Override
   public Set<String> getCorpusIds() throws IOException {
     
     Set<String> corpusIds = new HashSet<>();
     
-    try {
-      Connection conn = dataSource.getConnection();
+    try (Connection conn = dataSource.getConnection()) {
       DatabaseMetaData dbmd = conn.getMetaData();
 
       String[] types = { "TABLE" };
@@ -150,14 +148,10 @@ public class DerbyCorporaStore extends 
AbstractCorporaStore {
         corpusIds.add(tableName.toLowerCase());
       }
 
-      conn.close();
-
     } catch (SQLException e) {
-
       if (LOGGER.isLoggable(Level.SEVERE)) {
         LOGGER.log(Level.SEVERE, "Failed to retrieve corpus ids!", e);
       }
-
       throw new IOException(e);
     }
     
@@ -167,14 +161,12 @@ public class DerbyCorporaStore extends 
AbstractCorporaStore {
   @Override
   public void dropCorpus(String corpusName) throws IOException {
 
-    try {
-      Connection conn = dataSource.getConnection();
+    try (Connection conn = dataSource.getConnection()) {
       Statement s = conn.createStatement();
       s.execute("drop table " + corpusName);
       s.close();
 
       conn.commit();
-      conn.close();
     } catch (SQLException e) {
       if (LOGGER.isLoggable(Level.SEVERE)) {
         LOGGER.log(Level.SEVERE, "Failed to create corpus: " + corpusName, e);
@@ -199,9 +191,8 @@ public class DerbyCorporaStore extends AbstractCorporaStore 
{
     
     DerbyCorpusStore corpusStore = null;
     
-    try {
-      DatabaseMetaData metadata;
-      metadata = dataSource.getConnection().getMetaData();
+    try (Connection conn = dataSource.getConnection()) {
+      DatabaseMetaData metadata = conn.getMetaData();
       String[] names = { "TABLE" };
       ResultSet tableNames = metadata.getTables(null, null, null, names);
 
@@ -214,7 +205,6 @@ public class DerbyCorporaStore extends AbstractCorporaStore 
{
         }
       }
     } catch (SQLException e) {
-      
       if (LOGGER.isLoggable(Level.SEVERE)) {
         LOGGER.log(Level.SEVERE, "Failed to check if corpus exists!", e);
       }
diff --git 
a/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/DerbyCorpusStore.java
 
b/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/DerbyCorpusStore.java
index 5e48821..d287075 100644
--- 
a/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/DerbyCorpusStore.java
+++ 
b/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/DerbyCorpusStore.java
@@ -56,30 +56,22 @@ public class DerbyCorpusStore implements CorpusStore {
     
     byte[] casBytes  = null;
     
-    try {
-      Connection conn = dataSource.getConnection();
-      
-      PreparedStatement ps = conn.prepareStatement("select * from " + 
-          corpusName + " where name=?");
+    try (Connection conn = dataSource.getConnection();
+         PreparedStatement ps = conn.prepareStatement("select * from " + 
corpusName + " where name=?")) {
+
       ps.setString(1, casId);
       
       ResultSet casResult = ps.executeQuery();
-      
       if (casResult.next()) {
         casBytes = casResult.getBytes(2);
       }
       
       casResult.close();
-      ps.close();
-      conn.close();
-      
+
     } catch (SQLException e) {
-      
       if (LOGGER.isLoggable(Level.SEVERE)) {
-        LOGGER.log(Level.SEVERE, "Failed to retrieve CAS: " + 
-            casId, e);
+        LOGGER.log(Level.SEVERE, "Failed to retrieve CAS: " + casId, e);
       }
-      
       throw new IOException(e);
     }
     
@@ -89,30 +81,20 @@ public class DerbyCorpusStore implements CorpusStore {
   @Override
   public void addCAS(String casID, byte[] content) throws IOException {
     
-    try {
-      Connection conn = dataSource.getConnection();
-      PreparedStatement ps = conn.prepareStatement("insert into " + 
-          corpusName + " values (?, ?)");
-      
+    try (Connection conn = dataSource.getConnection();
+         PreparedStatement ps = conn.prepareStatement("insert into " + 
corpusName + " values (?, ?)")) {
       ps.setString(1, casID);
       
       Blob b = conn.createBlob();
       b.setBytes(1, content);
       ps.setBlob(2, b);
-      
       ps.executeUpdate();
-      
+
       conn.commit();
-      
-      ps.close();
-      conn.close();
     } catch (SQLException e) {
-      
       if (LOGGER.isLoggable(Level.SEVERE)) {
-        LOGGER.log(Level.SEVERE, "Failed to add CAS: " + 
-            casID, e);
+        LOGGER.log(Level.SEVERE, "Failed to add CAS: " + casID, e);
       }
-      
       throw new IOException(e);
     }
     
@@ -123,30 +105,21 @@ public class DerbyCorpusStore implements CorpusStore {
 
   @Override
   public void updateCAS(String casID, byte[] content) throws IOException {
-    try {
-      Connection conn = dataSource.getConnection();
-      PreparedStatement ps = conn.prepareStatement("update " + 
-          corpusName + " set cas = ? where name = ?");
-      
+    try (Connection conn = dataSource.getConnection();
+         PreparedStatement ps = conn.prepareStatement("update " + corpusName + 
" set cas = ? where name = ?")) {
+
       ps.setString(2, casID);
       
       Blob b = conn.createBlob();
       b.setBytes(1, content);
       ps.setBlob(1, b);
-      
       ps.executeUpdate();
       
       conn.commit();
-      
-      ps.close();
-      conn.close();
     } catch (SQLException e) {
-      
       if (LOGGER.isLoggable(Level.SEVERE)) {
-        LOGGER.log(Level.SEVERE, "Failed to add CAS: " + 
-            casID, e);
+        LOGGER.log(Level.SEVERE, "Failed to add CAS: " + casID, e);
       }
-      
       throw new IOException(e);
     }
     
@@ -158,26 +131,16 @@ public class DerbyCorpusStore implements CorpusStore {
   @Override
   public void removeCAS(String casID) throws IOException {
     
-    try {
-      Connection conn = dataSource.getConnection();
-      PreparedStatement ps = conn.prepareStatement("delete from " + 
-          corpusName + " where name = ?");
-      
+    try (Connection conn = dataSource.getConnection();
+         PreparedStatement ps = conn.prepareStatement("delete from " + 
corpusName + " where name = ?")) {
       ps.setString(1, casID);
-      
       ps.executeUpdate();
       
       conn.commit();
-      
-      ps.close();
-      conn.close();
     } catch (SQLException e) {
-      
       if (LOGGER.isLoggable(Level.SEVERE)) {
-        LOGGER.log(Level.SEVERE, "Failed to remove CAS: " + 
-            casID, e);
+        LOGGER.log(Level.SEVERE, "Failed to remove CAS: " + casID, e);
       }
-      
       throw new IOException(e);
     }
     
@@ -188,61 +151,44 @@ public class DerbyCorpusStore implements CorpusStore {
   
   @Override
   public void replaceTypeSystem(byte[] newTypeSystem) throws IOException {
-    try {
-      Connection conn = dataSource.getConnection();
+    try (Connection conn = dataSource.getConnection();
+         PreparedStatement typeSystemPS = conn.prepareStatement("update " +
+                 corpusName + " set cas = ? where name = ?")) {
       // Replace the type system
-      PreparedStatement typeSystemPS = conn.prepareStatement("update " + 
-          corpusName + " set cas = ? where name = ?");
-
       typeSystemPS.setString(2, "_typesystem");
 
       Blob typeSystemBlob = conn.createBlob();
       typeSystemBlob.setBytes(1, newTypeSystem);
       typeSystemPS.setBlob(1, typeSystemBlob);
-
       typeSystemPS.executeUpdate();
       
       conn.commit();
-      
-      typeSystemPS.close();
-      conn.close();
     } catch (SQLException e) {
-      
       if (LOGGER.isLoggable(Level.SEVERE)) {
         LOGGER.log(Level.SEVERE, "Failed to replace the Type System!", e);
       }
-      
       throw new IOException(e);
     }
   }
   
   @Override
   public byte[] getTypeSystem() throws IOException {
-    
     byte[] tsBytes;
     
-    try {
-      Connection conn = dataSource.getConnection();
-      Statement s = conn.createStatement();
-      ResultSet tsResult = s.executeQuery("select * FROM " + corpusName + 
-          " WHERE name='_typesystem'");
-      
+    try (Connection conn = dataSource.getConnection();
+         ResultSet tsResult = conn.createStatement().executeQuery(
+                 "select * FROM " + corpusName + " WHERE name='_typesystem'")) 
{
+
       if (tsResult.next()) {
         tsBytes = tsResult.getBytes(2);
-      }
-      else {
+      } else {
         throw new IOException("Failed to retrieve type system!");
       }
       
-      tsResult.close();
-      s.close();
-      conn.close();
     } catch (SQLException e) {
-      
       if (LOGGER.isLoggable(Level.SEVERE)) {
         LOGGER.log(Level.SEVERE, "Failed to retrieve type system", e);
       }
-      
       throw new IOException(e);
     }
     
@@ -253,25 +199,18 @@ public class DerbyCorpusStore implements CorpusStore {
   public byte[] getIndexMapping() throws IOException {
     byte[] indexMappingBytes = null;
     
-    try {
-      Connection conn = dataSource.getConnection();
-      Statement s = conn.createStatement();
-      ResultSet indexMappingResult = s.executeQuery("select * FROM " + 
corpusName + 
-          " WHERE name='_indexMapping'");
-      
+    try (Connection conn = dataSource.getConnection();
+         ResultSet indexMappingResult = conn.createStatement().executeQuery(
+                 "select * FROM " + corpusName + " WHERE 
name='_indexMapping'")) {
+
       if (indexMappingResult.next()) {
         indexMappingBytes = indexMappingResult.getBytes(2);
       }
       
-      indexMappingResult.close();
-      s.close();
-      conn.close();
     } catch (SQLException e) {
-      
       if (LOGGER.isLoggable(Level.SEVERE)) {
         LOGGER.log(Level.SEVERE, "Failed to retrieve type system", e);
       }
-      
       throw new IOException(e);
     }
     
diff --git 
a/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/LuceneSearchService.java
 
b/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/LuceneSearchService.java
index d216ac4..8d207c1 100644
--- 
a/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/LuceneSearchService.java
+++ 
b/corpus-server/corpus-server-impl/src/main/java/org/apache/opennlp/corpus_server/impl/LuceneSearchService.java
@@ -233,24 +233,19 @@ public class LuceneSearchService implements SearchService 
{
     String corpusId = store.getCorpusId();
     
     AnalysisEngine indexer = corpusIndexerMap.get(corpusId);
-    
-    InputStream indexTsIn = LuceneSearchService.class.getResourceAsStream(
-        "/org/apache/opennlp/corpus_server/impl/TypeSystem.xml");
-    
+
     TypeSystemDescription indexTypeDesc;
-    try {
+    try (InputStream indexTsIn = LuceneSearchService.class.getResourceAsStream(
+        "/org/apache/opennlp/corpus_server/impl/TypeSystem.xml")) {
       indexTypeDesc = UimaUtil.createTypeSystemDescription(indexTsIn);
     }
-    finally {
-      indexTsIn.close();
-    }
-    
+
     List<MetaDataObject> specs = new ArrayList<>();
     specs.add(indexTypeDesc);
     TypeSystemDescription tsDescription = UimaUtil.createTypeSystemDescription(
           new ByteArrayInputStream(store.getTypeSystem()));
     specs.add(tsDescription);
-    
+
     // Note: This might be a performance problem
     CAS cas;
     try {
diff --git 
a/corpus-server/corpus-server-tools/src/main/java/org/apache/opennlp/corpus_server/tools/CorpusBackup.java
 
b/corpus-server/corpus-server-tools/src/main/java/org/apache/opennlp/corpus_server/tools/CorpusBackup.java
index d07a548..66b7a65 100644
--- 
a/corpus-server/corpus-server-tools/src/main/java/org/apache/opennlp/corpus_server/tools/CorpusBackup.java
+++ 
b/corpus-server/corpus-server-tools/src/main/java/org/apache/opennlp/corpus_server/tools/CorpusBackup.java
@@ -79,11 +79,9 @@ public class CorpusBackup {
     File backupFile = new File(args[2]);
 
     // create zip file
-    OutputStream backupOut;
-        
-    try {
-      backupOut = new FileOutputStream(backupFile);
-      ZipOutputStream zipPackageOut = new ZipOutputStream(backupOut);
+
+    try (OutputStream backupOut = new FileOutputStream(backupFile);
+         ZipOutputStream zipPackageOut = new ZipOutputStream(backupOut)){
     
       WebResource corpusWebResource = c.resource(args[0] + "/corpora/" + 
corpusId);
       
@@ -96,10 +94,9 @@ public class CorpusBackup {
           .get(ClientResponse.class);
       
       zipPackageOut.putNextEntry(new ZipEntry("TypeSystem.xml"));
-      InputStream tsIn = tsResponse.getEntityInputStream();
-      
-      copyStream(tsIn, zipPackageOut);
-      tsIn.close();
+      try (InputStream tsIn = tsResponse.getEntityInputStream()) {
+        copyStream(tsIn, zipPackageOut);
+      }
       zipPackageOut.closeEntry();
       
       // consume task queue
@@ -139,8 +136,6 @@ public class CorpusBackup {
         
         System.out.println(casId);
       }
-      
-      zipPackageOut.close();
     }
     catch (IOException e) {
       e.printStackTrace();
diff --git 
a/corpus-server/corpus-server-tools/src/main/java/org/apache/opennlp/corpus_server/tools/FileUtil.java
 
b/corpus-server/corpus-server-tools/src/main/java/org/apache/opennlp/corpus_server/tools/FileUtil.java
index cff9781..8f8a4f4 100644
--- 
a/corpus-server/corpus-server-tools/src/main/java/org/apache/opennlp/corpus_server/tools/FileUtil.java
+++ 
b/corpus-server/corpus-server-tools/src/main/java/org/apache/opennlp/corpus_server/tools/FileUtil.java
@@ -27,23 +27,15 @@ public class FileUtil {
 
   static byte[] fileToBytes(File file) throws IOException {
 
-    ByteArrayOutputStream fileBytes = new ByteArrayOutputStream(
-        (int) file.length());
-
-    InputStream fileIn = new FileInputStream(file);
-
-    try {
+    try (ByteArrayOutputStream fileBytes = new ByteArrayOutputStream((int) 
file.length());
+         InputStream fileIn = new FileInputStream(file)) {
       byte[] buffer = new byte[1024];
       int length;
       while ((length = fileIn.read(buffer)) > 0) {
         fileBytes.write(buffer, 0, length);
       }
+      return fileBytes.toByteArray();
     }
-    finally {
-      fileIn.close();
-    }
-    
-    return fileBytes.toByteArray();
   }
 
 }
diff --git 
a/mahout-addon/src/main/java/opennlp/addons/mahout/AbstractOnlineLearnerTrainer.java
 
b/mahout-addon/src/main/java/opennlp/addons/mahout/AbstractOnlineLearnerTrainer.java
index 320415a..0929b84 100644
--- 
a/mahout-addon/src/main/java/opennlp/addons/mahout/AbstractOnlineLearnerTrainer.java
+++ 
b/mahout-addon/src/main/java/opennlp/addons/mahout/AbstractOnlineLearnerTrainer.java
@@ -22,12 +22,12 @@ package opennlp.addons.mahout;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.mahout.math.RandomAccessSparseVector;
+import org.apache.mahout.math.Vector;
+
 import opennlp.tools.ml.AbstractEventTrainer;
 import opennlp.tools.ml.model.DataIndexer;
-
 import opennlp.tools.util.TrainingParameters;
-import org.apache.mahout.math.RandomAccessSparseVector;
-import org.apache.mahout.math.Vector;
 
 abstract class AbstractOnlineLearnerTrainer extends AbstractEventTrainer {
 
diff --git 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/KnownEntityProvider.java
 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/KnownEntityProvider.java
index a905f02..1362d1b 100644
--- 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/KnownEntityProvider.java
+++ 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/KnownEntityProvider.java
@@ -15,12 +15,14 @@
  */
 package opennlp.addons.modelbuilder;
 
+import opennlp.addons.modelbuilder.impls.BaseModelBuilderParams;
+
 import java.util.Set;
 
 /**
  * Supplies a list of known entities (a list of names or locations)
  */
-public interface KnownEntityProvider extends ModelParameter {
+public interface KnownEntityProvider extends 
ModelParameter<BaseModelBuilderParams> {
 
   /**
    * Returns a list of known non-ambiguous entities.
diff --git 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/ModelGenerationValidator.java
 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/ModelGenerationValidator.java
index 047e8a3..6ef8a9e 100644
--- 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/ModelGenerationValidator.java
+++ 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/ModelGenerationValidator.java
@@ -15,12 +15,14 @@
  */
 package opennlp.addons.modelbuilder;
 
+import opennlp.addons.modelbuilder.impls.BaseModelBuilderParams;
+
 import java.util.Collection;
 
 /**
  * Validates results from the iterative namefinding
  */
-public interface ModelGenerationValidator extends ModelParameter {
+public interface ModelGenerationValidator extends 
ModelParameter<BaseModelBuilderParams> {
 
   Boolean validSentence(String sentence);
 
diff --git 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/ModelParameter.java
 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/ModelParameter.java
index 1285323..2a946d8 100644
--- 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/ModelParameter.java
+++ 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/ModelParameter.java
@@ -17,7 +17,7 @@ package opennlp.addons.modelbuilder;
 
 import opennlp.addons.modelbuilder.impls.BaseModelBuilderParams;
 
-public interface ModelParameter<T extends  BaseModelBuilderParams>{
+public interface ModelParameter<T extends  BaseModelBuilderParams> {
    
   void setParameters(T params);
 
diff --git 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/Modelable.java 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/Modelable.java
index 2f78254..37cb54f 100644
--- 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/Modelable.java
+++ 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/Modelable.java
@@ -17,9 +17,10 @@ package opennlp.addons.modelbuilder;
 
 import java.util.Set;
 
+import opennlp.addons.modelbuilder.impls.BaseModelBuilderParams;
 import opennlp.tools.namefind.TokenNameFinderModel;
 
-public interface Modelable extends ModelParameter {
+public interface Modelable extends ModelParameter<BaseModelBuilderParams> {
 
   String annotate(String sentence, String namedEntity, String entityType);
 
diff --git 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileKnownEntityProvider.java
 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileKnownEntityProvider.java
index 69fce62..c897532 100644
--- 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileKnownEntityProvider.java
+++ 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileKnownEntityProvider.java
@@ -18,7 +18,6 @@ package opennlp.addons.modelbuilder.impls;
 import java.io.BufferedReader;
 import java.io.FileInputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.nio.charset.StandardCharsets;
 import java.util.HashSet;
@@ -36,21 +35,12 @@ public class FileKnownEntityProvider implements 
KnownEntityProvider {
   @Override
   public Set<String> getKnownEntities() {
     if (knownEntities.isEmpty()) {
-      try {
-        InputStream fis;
-        BufferedReader br;
+      try (BufferedReader br = new BufferedReader(new InputStreamReader(
+              new FileInputStream(params.getKnownEntitiesFile()), 
StandardCharsets.UTF_8))) {
         String line;
-
-        fis = new FileInputStream(params.getKnownEntitiesFile());
-        br = new BufferedReader(new InputStreamReader(fis, 
StandardCharsets.UTF_8));
         while ((line = br.readLine()) != null) {
           knownEntities.add(line);
         }
-
-        // Done with the file
-        br.close();
-        br = null;
-        fis = null;
       } catch (IOException ex) {
         
Logger.getLogger(FileKnownEntityProvider.class.getName()).log(Level.SEVERE, 
null, ex);
       }
diff --git 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileModelValidatorImpl.java
 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileModelValidatorImpl.java
index e005615..8c0703b 100644
--- 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileModelValidatorImpl.java
+++ 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileModelValidatorImpl.java
@@ -18,7 +18,6 @@ package opennlp.addons.modelbuilder.impls;
 import java.io.BufferedReader;
 import java.io.FileInputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.nio.charset.StandardCharsets;
 import java.util.Collection;
@@ -30,7 +29,7 @@ import java.util.logging.Logger;
 import opennlp.addons.modelbuilder.ModelGenerationValidator;
 
 /**
- *Validates NER results input before inclusion into the model
+ * Validates NER results input before inclusion into the model.
  */
 public class FileModelValidatorImpl implements ModelGenerationValidator {
 
@@ -72,19 +71,12 @@ public class FileModelValidatorImpl implements 
ModelGenerationValidator {
       return badentities;
     }
     if (!badentities.isEmpty()) {
-      try {
-        InputStream fis;
-        BufferedReader br;
+      try (BufferedReader br = new BufferedReader(new InputStreamReader(
+              new FileInputStream(params.getKnownEntityBlacklist()), 
StandardCharsets.UTF_8))) {
         String line;
-
-        fis = new FileInputStream(params.getKnownEntityBlacklist());
-        br = new BufferedReader(new InputStreamReader(fis, 
StandardCharsets.UTF_8));
         while ((line = br.readLine()) != null) {
           badentities.add(line);
         }
-        br.close();
-        br = null;
-        fis = null;
       } catch (IOException ex) {
         
Logger.getLogger(FileKnownEntityProvider.class.getName()).log(Level.SEVERE, 
null, ex);
       }
diff --git 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileSentenceProvider.java
 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileSentenceProvider.java
index 481f421..ec0393f 100644
--- 
a/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileSentenceProvider.java
+++ 
b/modelbuilder-addon/src/main/java/opennlp/addons/modelbuilder/impls/FileSentenceProvider.java
@@ -18,7 +18,6 @@ package opennlp.addons.modelbuilder.impls;
 import java.io.BufferedReader;
 import java.io.FileInputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.nio.charset.StandardCharsets;
 import java.util.HashSet;
@@ -39,23 +38,12 @@ public class FileSentenceProvider implements 
SentenceProvider {
   @Override
   public Set<String> getSentences() {
      if (sentences.isEmpty()) {
-      try {
-        InputStream fis;
-        BufferedReader br;
+      try (BufferedReader br = new BufferedReader(new InputStreamReader(
+              new FileInputStream(params.getSentenceFile()), 
StandardCharsets.UTF_8))) {
         String line;
-
-        fis = new FileInputStream(params.getSentenceFile());
-        br = new BufferedReader(new InputStreamReader(fis, 
StandardCharsets.UTF_8));
-        int i=0;
         while ((line = br.readLine()) != null) {
-         
           sentences.add(line);
         }
-
-        // Done with the file
-        br.close();
-        br = null;
-        fis = null;
       } catch (IOException ex) {
         
Logger.getLogger(FileKnownEntityProvider.class.getName()).log(Level.SEVERE, 
null, ex);
       }
diff --git 
a/opennlp-coref/src/main/java/opennlp/tools/coref/resolver/DefaultNonReferentialResolver.java
 
b/opennlp-coref/src/main/java/opennlp/tools/coref/resolver/DefaultNonReferentialResolver.java
index 1e37977..6e23c3d 100644
--- 
a/opennlp-coref/src/main/java/opennlp/tools/coref/resolver/DefaultNonReferentialResolver.java
+++ 
b/opennlp-coref/src/main/java/opennlp/tools/coref/resolver/DefaultNonReferentialResolver.java
@@ -127,11 +127,11 @@ public class DefaultNonReferentialResolver implements 
NonReferentialResolver {
     if (ResolverMode.TRAIN == mode) {
       System.err.println(this + " referential");
       if (debugOn) {
-        FileWriter writer = new FileWriter(modelName + ".events");
-        for (Event e : events) {
-          writer.write(e.toString() + "\n");
+        try (FileWriter writer = new FileWriter(modelName + ".events")) {
+          for (Event e : events) {
+            writer.write(e.toString() + "\n");
+          }
         }
-        writer.close();
       }
       TrainingParameters params = TrainingParameters.defaultParams();
       params.put(TrainingParameters.ITERATIONS_PARAM, 100);
diff --git 
a/opennlp-coref/src/main/java/opennlp/tools/coref/resolver/MaxentResolver.java 
b/opennlp-coref/src/main/java/opennlp/tools/coref/resolver/MaxentResolver.java
index f0ead35..858765d 100644
--- 
a/opennlp-coref/src/main/java/opennlp/tools/coref/resolver/MaxentResolver.java
+++ 
b/opennlp-coref/src/main/java/opennlp/tools/coref/resolver/MaxentResolver.java
@@ -340,11 +340,11 @@ public abstract class MaxentResolver extends 
AbstractResolver {
     if (ResolverMode.TRAIN == mode) {
       if (DEBUG) {
         System.err.println(this + " referential");
-        FileWriter writer = new FileWriter(modelName + ".events");
-        for (Event e : events) {
-          writer.write(e.toString() + "\n");
+        try (FileWriter writer = new FileWriter(modelName + ".events")) {
+          for (Event e : events) {
+            writer.write(e.toString() + "\n");
+          }
         }
-        writer.close();
       }
       TrainingParameters params = TrainingParameters.defaultParams();
       params.put(TrainingParameters.ITERATIONS_PARAM, 100);
diff --git 
a/opennlp-coref/src/main/java/opennlp/tools/coref/sim/GenderModel.java 
b/opennlp-coref/src/main/java/opennlp/tools/coref/sim/GenderModel.java
index c1ffb6e..39f3a61 100644
--- a/opennlp-coref/src/main/java/opennlp/tools/coref/sim/GenderModel.java
+++ b/opennlp-coref/src/main/java/opennlp/tools/coref/sim/GenderModel.java
@@ -251,11 +251,11 @@ public class GenderModel implements TestGenderModel, 
TrainSimilarityModel {
   @Override
   public void trainModel() throws IOException {
     if (debugOn) {
-      FileWriter writer = new FileWriter(modelName + ".events");
-      for (Event e : events) {
-        writer.write(e.toString() + "\n");
+      try (FileWriter writer = new FileWriter(modelName + ".events")) {
+        for (Event e : events) {
+          writer.write(e.toString() + "\n");
+        }
       }
-      writer.close();
     }
     GISTrainer trainer = new GISTrainer();
     trainer.init(TrainingParameters.defaultParams(), null);
diff --git 
a/opennlp-coref/src/main/java/opennlp/tools/coref/sim/SimilarityModel.java 
b/opennlp-coref/src/main/java/opennlp/tools/coref/sim/SimilarityModel.java
index 8ba4887..ce5ae5d 100644
--- a/opennlp-coref/src/main/java/opennlp/tools/coref/sim/SimilarityModel.java
+++ b/opennlp-coref/src/main/java/opennlp/tools/coref/sim/SimilarityModel.java
@@ -356,11 +356,11 @@ public class SimilarityModel implements 
TestSimilarityModel, TrainSimilarityMode
   @Override
   public void trainModel() throws IOException {
     if (debugOn) {
-      FileWriter writer = new FileWriter(modelName + ".events");
-      for (Event e : events) {
-        writer.write(e.toString() + "\n");
+      try (FileWriter writer = new FileWriter(modelName + ".events")) {
+        for (Event e : events) {
+          writer.write(e.toString() + "\n");
+        }
       }
-      writer.close();
     }
     TrainingParameters params = TrainingParameters.defaultParams();
     params.put(TrainingParameters.ITERATIONS_PARAM, 100);
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/apps/relevanceVocabs/PhraseProcessor.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/apps/relevanceVocabs/PhraseProcessor.java
index 9a05642..df48cce 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/apps/relevanceVocabs/PhraseProcessor.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/apps/relevanceVocabs/PhraseProcessor.java
@@ -34,7 +34,7 @@ public class PhraseProcessor {
        public static boolean allChildNodesArePOSTags(Parse p) {
                Parse[] subParses = p.getChildren();
                for (Parse subPars : subParses)
-                       if (!((Parse) subPars).isPosTag())
+                       if (!subPars.isPosTag())
                                return false;
                return true;
        }
@@ -49,7 +49,7 @@ public class PhraseProcessor {
                        if (subpars.getType().equals("NP") && 
allChildNodesArePOSTags(subpars)) {
                                Span _span = subpars.getSpan();
                                
nounphrases.add(p.getText().substring(_span.getStart(), _span.getEnd()));
-                       } else if (!((Parse) subpars).isPosTag())
+                       } else if (!subpars.isPosTag())
                                nounphrases.addAll(getNounPhrases(subpars));
                }
 
@@ -66,7 +66,7 @@ public class PhraseProcessor {
                        if (subpars.getType().startsWith("VB") && 
allChildNodesArePOSTags(subpars)) {
                                Span _span = subpars.getSpan();
                                
verbPhrases.add(p.getText().substring(_span.getStart(), _span.getEnd()));
-                       } else if (!((Parse) subpars).isPosTag())
+                       } else if (!subpars.isPosTag())
                                verbPhrases.addAll(getNounPhrases(subpars));
                }
 
@@ -89,9 +89,7 @@ public class PhraseProcessor {
                if (groupedChunks.size()<1)
                        return null;
 
-               List<ParseTreeChunk> vPhrases = groupedChunks.get(1);
-
-               return vPhrases;
+               return groupedChunks.get(1);
        }
 
        public List<List<ParseTreeChunk>> getPhrasesOfAllTypes(String sentence) 
{
@@ -112,102 +110,102 @@ public class PhraseProcessor {
                return groupedChunks;
        }
 
-// forms phrases from text which are candidate expressions for events lookup
-public List<String> extractNounPhraseProductNameCandidate(String sentence) {
-
-       List<String> queryArrayStr = new ArrayList<>();
+       // forms phrases from text which are candidate expressions for events 
lookup
+       public List<String> extractNounPhraseProductNameCandidate(String 
sentence) {
 
-       if (sentence.split(" ").length ==1) { // this is a word, return empty
-               //queryArrayStr.add( sentence);
-               return queryArrayStr;
-       }
-       String quoted1 = StringUtils.substringBetween(sentence, "\"", "\"");
-       String quoted2 = StringUtils.substringBetween(sentence, "\'", "\'");
-       List<List<ParseTreeChunk>> groupedChunks = 
nlProc.formGroupedPhrasesFromChunksForPara(sentence);
-       if (groupedChunks.size()<1)
-               return queryArrayStr;
+               List<String> queryArrayStr = new ArrayList<>();
 
-       List<ParseTreeChunk> nPhrases = groupedChunks.get(0);
-
-       for (ParseTreeChunk ch : nPhrases) {
-               String query = "";
-               int size = ch.getLemmas().size();
-               boolean phraseBeingFormed = false;
-               for (int i = 0; i < size; i++) {
-                       if ((ch.getPOSs().get(i).startsWith("N") || 
ch.getPOSs().get(i)
-                                       .startsWith("J") || 
ch.getPOSs().get(i).startsWith("CD") ) )
-                       //              && 
StringUtils.isAlpha(ch.getLemmas().get(i)))
-                       {
-                               query += ch.getLemmas().get(i) + " ";
-                               phraseBeingFormed = true;
-                       } else
-                               if ((ch.getPOSs().get(i).startsWith("PR") || 
ch.getPOSs().get(i).startsWith("IN") || ch.getPOSs().get(i).startsWith("TO")  )
-                                               && phraseBeingFormed )
-                                       break;
-                               else if (ch.getPOSs().get(i).startsWith("DT") 
|| ch.getPOSs().get(i).startsWith("CC"))
-                               continue;
+               if (sentence.split(" ").length ==1) { // this is a word, return 
empty
+                       //queryArrayStr.add( sentence);
+                       return queryArrayStr;
                }
-               query = query.trim();
-               int len = query.split(" ").length;
-               if (len > 5 || len < 2) // too long or too short
-                       continue;
-
-               /*
-               if (len < 4 && len>1) { // every word should start with capital
-                       String[] qs = query.split(" ");
-                       boolean bAccept = true;
-                       for (String w : qs) {
-                               if (w.toLowerCase().equals(w)) // idf only two 
words then
-                                                                               
                // has to be person name,
-                                                                               
                // title or geolocation
-                                       bAccept = false;
+               String quoted1 = StringUtils.substringBetween(sentence, "\"", 
"\"");
+               String quoted2 = StringUtils.substringBetween(sentence, "\'", 
"\'");
+               List<List<ParseTreeChunk>> groupedChunks = 
nlProc.formGroupedPhrasesFromChunksForPara(sentence);
+               if (groupedChunks.size()<1)
+                       return queryArrayStr;
+
+               List<ParseTreeChunk> nPhrases = groupedChunks.get(0);
+
+               for (ParseTreeChunk ch : nPhrases) {
+                       String query = "";
+                       int size = ch.getLemmas().size();
+                       boolean phraseBeingFormed = false;
+                       for (int i = 0; i < size; i++) {
+                               if ((ch.getPOSs().get(i).startsWith("N") || 
ch.getPOSs().get(i)
+                                               .startsWith("J") || 
ch.getPOSs().get(i).startsWith("CD") ) )
+                               //              && 
StringUtils.isAlpha(ch.getLemmas().get(i)))
+                               {
+                                       query += ch.getLemmas().get(i) + " ";
+                                       phraseBeingFormed = true;
+                               } else
+                                       if 
((ch.getPOSs().get(i).startsWith("PR") || ch.getPOSs().get(i).startsWith("IN") 
|| ch.getPOSs().get(i).startsWith("TO")  )
+                                                       && phraseBeingFormed )
+                                               break;
+                                       else if 
(ch.getPOSs().get(i).startsWith("DT") || ch.getPOSs().get(i).startsWith("CC"))
+                                       continue;
                        }
-                       if (!bAccept)
+                       query = query.trim();
+                       int len = query.split(" ").length;
+                       if (len > 5 || len < 2) // too long or too short
                                continue;
-               }
-               */
-                // individual word, possibly a frequent word
-               // if len==1 do nothing
-
-               query = query.trim();
-               queryArrayStr.add(query);
 
-               }
-               /*
-               if (queryArrayStr.size() < 1) { // release constraints on NP 
down to 2
-                                                                               
// keywords
-                       for (ParseTreeChunk ch : nPhrases) {
-                               String query = "";
-                               int size = ch.getLemmas().size();
-
-                               for (int i = 0; i < size; i++) {
-                                       if (ch.getPOSs().get(i).startsWith("N")
-                                                       || 
ch.getPOSs().get(i).startsWith("J")) {
-                                               query += ch.getLemmas().get(i) 
+ " ";
-                                       }
+                       /*
+                       if (len < 4 && len>1) { // every word should start with 
capital
+                               String[] qs = query.split(" ");
+                               boolean bAccept = true;
+                               for (String w : qs) {
+                                       if (w.toLowerCase().equals(w)) // idf 
only two words then
+                                                                               
                        // has to be person name,
+                                                                               
                        // title or geolocation
+                                               bAccept = false;
                                }
-                               query = query.trim();
-                               int len = query.split(" ").length;
-                               if (len < 2)
+                               if (!bAccept)
                                        continue;
+                       }
+                       */
+                        // individual word, possibly a frequent word
+                       // if len==1 do nothing
+
+                       query = query.trim();
+                       queryArrayStr.add(query);
 
-                               query = 
TextProcessor.fastTokenize(query.toLowerCase(), false)
-                                               .toString().replace('[', ' 
').replace(']', ' ').trim();
-                               if (query.length() > 6)
-                                       queryArrayStr.add(query);
                        }
-               }
-               //queryArrayStr = Utils
-               //              .removeDuplicatesFromQueries(queryArrayStr);
-               if (quoted1 != null
-                               && ((quoted1.length() > 5 && 
!stopList.isCommonWord(quoted1)) || quoted1
-                                               .length() > 10))
-                       queryArrayStr.add(quoted1);
-               if (quoted2 != null
-                               && ((quoted2.length() > 5 && 
!stopList.isCommonWord(quoted2)) || quoted2
-                                               .length() > 10))
-                       queryArrayStr.add(quoted2);
-               */
+                       /*
+                       if (queryArrayStr.size() < 1) { // release constraints 
on NP down to 2
+                                                                               
        // keywords
+                               for (ParseTreeChunk ch : nPhrases) {
+                                       String query = "";
+                                       int size = ch.getLemmas().size();
+
+                                       for (int i = 0; i < size; i++) {
+                                               if 
(ch.getPOSs().get(i).startsWith("N")
+                                                               || 
ch.getPOSs().get(i).startsWith("J")) {
+                                                       query += 
ch.getLemmas().get(i) + " ";
+                                               }
+                                       }
+                                       query = query.trim();
+                                       int len = query.split(" ").length;
+                                       if (len < 2)
+                                               continue;
+
+                                       query = 
TextProcessor.fastTokenize(query.toLowerCase(), false)
+                                                       
.toString().replace('[', ' ').replace(']', ' ').trim();
+                                       if (query.length() > 6)
+                                               queryArrayStr.add(query);
+                               }
+                       }
+                       //queryArrayStr = Utils
+                       //              
.removeDuplicatesFromQueries(queryArrayStr);
+                       if (quoted1 != null
+                                       && ((quoted1.length() > 5 && 
!stopList.isCommonWord(quoted1)) || quoted1
+                                                       .length() > 10))
+                               queryArrayStr.add(quoted1);
+                       if (quoted2 != null
+                                       && ((quoted2.length() > 5 && 
!stopList.isCommonWord(quoted2)) || quoted2
+                                                       .length() > 10))
+                               queryArrayStr.add(quoted2);
+                       */
                return queryArrayStr;
        }
 
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/apps/relevanceVocabs/SynonymListFilter.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/apps/relevanceVocabs/SynonymListFilter.java
index e40f384..d5fba63 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/apps/relevanceVocabs/SynonymListFilter.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/apps/relevanceVocabs/SynonymListFilter.java
@@ -48,22 +48,15 @@ public class SynonymListFilter {
                List<String> items = new ArrayList<>();
 
                StringBuilder contents = new StringBuilder();               
-               try {
-
-                       BufferedReader input =  new BufferedReader(new 
FileReader(aFile));
-                       try {
-                               String line; //not declared within while loop
-                               while (( line = input.readLine()) != null){
-                                       int endOfWord = line.indexOf(';');
-                                       if (endOfWord>2)
-                                               line = line.substring(1, 
endOfWord -1 );
+               try (BufferedReader input =  new BufferedReader(new 
FileReader(aFile))) {
+                       String line; //not declared within while loop
+                       while (( line = input.readLine()) != null){
+                               int endOfWord = line.indexOf(';');
+                               if (endOfWord>2)
+                                       line = line.substring(1, endOfWord -1 );
 
-                                       items.add(line);
+                               items.add(line);
 
-                               }
-                       }
-                       finally {
-                               input.close();
                        }
                }
                catch (IOException ex){
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/apps/review_builder/FBOpenGraphSearchManager.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/apps/review_builder/FBOpenGraphSearchManager.java
index 4a9b14e..f2a130a 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/apps/review_builder/FBOpenGraphSearchManager.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/apps/review_builder/FBOpenGraphSearchManager.java
@@ -39,14 +39,11 @@ public class FBOpenGraphSearchManager {
        protected final PageFetcher pageFetcher = new PageFetcher();
        protected static final int NUM_TRIES = 5;
        protected static final long WAIT_BTW_TRIES=1000; //milliseconds between 
re-tries
-       
-               
+
        public FBOpenGraphSearchManager(){
                profiles = 
ProfileReaderWriter.readProfiles("C:\\nc\\features\\analytics\\dealanalyzer\\sweetjack-localcoupon-may12012tooct302012.csv");
-               
        }
-       
-               
+
        public void setFacebookClient(FacebookClient c){
                this.mFBClient=c;
        }
@@ -126,9 +123,9 @@ public class FBOpenGraphSearchManager {
                String likes = StringUtils.substringBetween(content, "stats 
fwb\">", "<" );
                if (likes==null)
                        continue;
-               Integer nLikes =0;
+               int nLikes =0;
                try {
-               nLikes = Integer.parseInt(likes);
+                       nLikes = Integer.parseInt(likes);
                } catch (Exception e){
                        
                }
@@ -137,21 +134,15 @@ public class FBOpenGraphSearchManager {
                }
                
         }
-        
-        
         return null;
        }
-       
-
-    // 
     
-    public static void main(String[] args){
-       FBOpenGraphSearchManager man = new FBOpenGraphSearchManager ();
-       man.setFacebookClient(new DefaultFacebookClient());
-               
-       
-       long res = man.getFBPageLikes("chain saw");
-       System.out.println(res);
-               
-    }
+       public static void main(String[] args){
+               FBOpenGraphSearchManager man = new FBOpenGraphSearchManager ();
+               man.setFacebookClient(new DefaultFacebookClient());
+
+               long res = man.getFBPageLikes("chain saw");
+               System.out.println(res);
+
+       }
 }
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/ClassifierTrainingSetIndexer.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/ClassifierTrainingSetIndexer.java
index 7fcd9ce..e430177 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/ClassifierTrainingSetIndexer.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/doc_classifier/ClassifierTrainingSetIndexer.java
@@ -148,30 +148,18 @@ public class ClassifierTrainingSetIndexer {
           ee.printStackTrace();
         }
       } else { // for xml files
-        try {
+        try (FileReader fr = new FileReader(f)) {
           Document doc = new Document();
 
-          String name = new String(f.getPath());
+          String name = f.getPath();
           String[] nparts = name.split("/");
           int len = nparts.length;
           name = nparts[len - 2];
 
-          FileReader fr = new FileReader(f);
           doc.add(new TextField("text", fr));
-
-          doc.add(new StringField("path", f.getPath(),
-                  Field.Store.YES));
+          doc.add(new StringField("path", f.getPath(), Field.Store.YES));
           doc.add(new StringField("class", name, Field.Store.YES));
-          try {
-
-            indexWriter.addDocument(doc);
-
-          } catch (Exception e) {
-            e.printStackTrace();
-            System.out.println("Could not add: " + f);
-          } finally {
-            fr.close();
-          }
+          indexWriter.addDocument(doc);
         } catch (Exception ee) {
           ee.printStackTrace();
         }
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/jsmlearning/ProfileReaderWriter.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/jsmlearning/ProfileReaderWriter.java
index ddf99af..c509552 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/jsmlearning/ProfileReaderWriter.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/jsmlearning/ProfileReaderWriter.java
@@ -58,35 +58,18 @@ public class ProfileReaderWriter {
        }
 
        public static void writeReport( List<String[]> allLines, String 
reportName){
-               CSVWriter writer = null;
-               try {   
-                       writer = new CSVWriter(new PrintWriter(reportName));    
                
-               } catch (FileNotFoundException e) {
-                       e.printStackTrace();
-               }               
-               writer.writeAll(allLines);
-
-               try {
+               try (CSVWriter writer = new CSVWriter(new 
PrintWriter(reportName))) {
+                       writer.writeAll(allLines);
                        writer.flush();
-                       writer.close();
                } catch (IOException e) {
                        e.printStackTrace();
                }
        }
 
        public static void writeReport( List<String[]> allLines, String 
reportName, char delimiter){
-               CSVWriter writer = null;
-               try {   
-                       writer = new CSVWriter(new PrintWriter(reportName), 
delimiter, delimiter, delimiter);                   
-               } catch (FileNotFoundException e) {
-                       e.printStackTrace();
-               }       
-
-               writer.writeAll(allLines);
-
-               try {
+               try (CSVWriter writer = new CSVWriter(new 
PrintWriter(reportName), delimiter, delimiter, delimiter)) {
+                       writer.writeAll(allLines);
                        writer.flush();
-                       writer.close();
                } catch (IOException e) {
                        e.printStackTrace();
                }
@@ -101,18 +84,9 @@ public class ProfileReaderWriter {
                        System.out.println("Creating file "+reportName);
                }
                
-               CSVWriter writer = null;
-               try {   
-                       writer = new CSVWriter(new PrintWriter(reportName), 
delimiter, delimiter, delimiter);                   
-               } catch (FileNotFoundException e) {
-                       e.printStackTrace();
-               }       
-
-               writer.writeAll(allLines);
-
-               try {
+               try (CSVWriter writer = new CSVWriter(new 
PrintWriter(reportName), delimiter, delimiter, delimiter)) {
+                       writer.writeAll(allLines);
                        writer.flush();
-                       writer.close();
                } catch (IOException e) {
                        e.printStackTrace();
                }
@@ -126,18 +100,9 @@ public class ProfileReaderWriter {
                        System.out.println("Creating file "+reportName);
                }
                
-               CSVWriter writer = null;
-               try {   
-                       writer = new CSVWriter(new PrintWriter(reportName));    
                
-               } catch (FileNotFoundException e) {
-                       e.printStackTrace();
-               }       
-
-               writer.writeAll(allLines);
-
-               try {
+               try (CSVWriter writer = new CSVWriter(new 
PrintWriter(reportName))) {
+                       writer.writeAll(allLines);
                        writer.flush();
-                       writer.close();
                } catch (IOException e) {
                        e.printStackTrace();
                }
@@ -151,8 +116,6 @@ public class ProfileReaderWriter {
                List<String[]> allLines = new ArrayList<>();
                allLines.add(new String[] {"aa " , "  bb", "ccc" });
                ProfileReaderWriter.writeReport( allLines, "reportName.txt", ' 
');
-
        }
 
-
 }
\ No newline at end of file
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/PatternStructureWriter.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/PatternStructureWriter.java
index 548c740..28cdf77 100755
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/PatternStructureWriter.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/parse_thicket/pattern_structure/PatternStructureWriter.java
@@ -28,28 +28,18 @@ public class PatternStructureWriter {
        public void WriteStatsToTxt(String filename, PhrasePatternStructure ps){
                        
                String formatStr = "[%5.2f; %5.2f]  %s   %s%n";
-               Writer writer = null;
-
-               try {
-                   writer = new BufferedWriter(new OutputStreamWriter(
-                         new FileOutputStream(filename), 
StandardCharsets.UTF_8));
+               try (Writer writer = new BufferedWriter(new OutputStreamWriter(
+                                               new FileOutputStream(filename), 
StandardCharsets.UTF_8))) {
                    writer.write("PatternStructure size: " + 
ps.conceptList.size()+ " with " + ps.objectCount + "objects\n");
                    
                    for (PhraseConcept c : ps.conceptList){
                        
writer.write(String.format(formatStr,c.intLogStabilityBottom, 
c.intLogStabilityUp, c.extent, c.intent));
                    }
-                   writer.close();
-                   
+
                } catch (IOException ex) {
                        System.err.println(ex.getMessage());
-               } finally {
-                  try {writer.close();} catch (Exception ex) {}
                }
        }
        
-
-               public static void main(String[] args) {
-                       
-               }
 }
                        
\ No newline at end of file
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeSearchRequestHandler.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeSearchRequestHandler.java
index b1ece30..6391d25 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeSearchRequestHandler.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/IterativeSearchRequestHandler.java
@@ -174,10 +174,10 @@ public class IterativeSearchRequestHandler extends 
SearchHandler {
                                float syntMatchScore =  
Double.valueOf(parseTreeChunkListScorer.getParseTreeChunkListScore(matchResult.getMatchResult())).floatValue();
                                bestMatchesDocIds.add(docId);
                                bestMatchesScore.add(syntMatchScore);
-                               syntMatchScoreArr[i] = (float)syntMatchScore; 
//*iter.score();
+                               syntMatchScoreArr[i] = syntMatchScore; 
//*iter.score();
                                System.out.println(" Matched query = 
'"+requestExpression + "' with answer = '"+answerText +"' | doc_id = '"+docId);
                                System.out.println(" Match result = 
'"+matchResult.getMatchResult() + "' with score = '"+syntMatchScore +"';" );
-                               docIdsScores.add(new Pair(docId, 
syntMatchScore));
+                               docIdsScores.add(new Pair<>(docId, 
syntMatchScore));
                        }
 
                } catch (CorruptIndexException e1) {
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/WordDocBuilder.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/WordDocBuilder.java
index d781f9e..461dcd0 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/WordDocBuilder.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/solr/WordDocBuilder.java
@@ -182,31 +182,28 @@ public class WordDocBuilder{
      * @throws FileNotFoundException
      * @throws IOException
      */
-    protected static byte[] convertImageToByteArray(File file)
-            throws FileNotFoundException, IOException {
-        InputStream is = new FileInputStream(file );
-        long length = file.length();
-        // You cannot create an array using a long, it needs to be an int.
-        if (length > Integer.MAX_VALUE) {
-            System.out.println("File too large!!");
-        }
-        byte[] bytes = new byte[(int)length];
-        int offset = 0;
-        int numRead;
-        while (offset < bytes.length && (numRead=is.read(bytes, offset, 
bytes.length-offset)) >= 0) {
-            offset += numRead;
-        }
-        // Ensure all the bytes have been read
-        if (offset < bytes.length) {
-            System.out.println("Could not completely read file "
-                        +file.getName());
-        }
-        is.close();
-        return bytes;
+    protected static byte[] convertImageToByteArray(File file) throws 
FileNotFoundException, IOException {
+        try (InputStream is = new FileInputStream(file)) {
+                                       long length = file.length();
+                                       // You cannot create an array using a 
long, it needs to be an int.
+                                       if (length > Integer.MAX_VALUE) {
+                                                       
System.out.println("File too large!!");
+                                       }
+                                       byte[] bytes = new byte[(int)length];
+                                       int offset = 0;
+                                       int numRead;
+                                       while (offset < bytes.length && 
(numRead=is.read(bytes, offset, bytes.length-offset)) >= 0) {
+                                                       offset += numRead;
+                                       }
+                                       // Ensure all the bytes have been read
+                                       if (offset < bytes.length) {
+                                                       
System.out.println("Could not completely read file "
+                                                                               
                        +file.getName());
+                                       }
+                                       return bytes;
+                               }
     }
-    
-    
-    
+
     public static void saveImageFromTheWeb(String imageUrl, String 
destinationFile) {
                        File f = new File(destinationFile);
                        if (!f.exists()) {
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/taxo_builder/TaxonomySerializer.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/taxo_builder/TaxonomySerializer.java
index 445b62c..22f63c9 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/taxo_builder/TaxonomySerializer.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/similarity/apps/taxo_builder/TaxonomySerializer.java
@@ -23,7 +23,6 @@ import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -38,92 +37,60 @@ import opennlp.tools.jsmlearning.ProfileReaderWriter;
 public class TaxonomySerializer implements Serializable {
 
   private static final long serialVersionUID = 7431412616514648388L;
-  private Map<String, List<List<String>>> lemma_ExtendedAssocWords = new 
HashMap<>();
-  private Map<List<String>, List<List<String>>> assocWords_ExtendedAssocWords 
= new HashMap<>();
+  private final Map<String, List<List<String>>> lemma_ExtendedAssocWords;
+  private final Map<List<String>, List<List<String>>> 
assocWords_ExtendedAssocWords;
 
-  public TaxonomySerializer(
-      Map<String, List<List<String>>> lemma_ExtendedAssocWords,
+  public TaxonomySerializer(Map<String, List<List<String>>> 
lemma_ExtendedAssocWords,
       Map<List<String>, List<List<String>>> assocWords_ExtendedAssocWords) {
 
     this.lemma_ExtendedAssocWords = lemma_ExtendedAssocWords;
     this.assocWords_ExtendedAssocWords = assocWords_ExtendedAssocWords;
   }
 
-  public TaxonomySerializer() {
-  }
-
-  public Map<List<String>, List<List<String>>> 
getAssocWords_ExtendedAssocWords() {
-    return assocWords_ExtendedAssocWords;
-  }
-
   public Map<String, List<List<String>>> getLemma_ExtendedAssocWords() {
     return lemma_ExtendedAssocWords;
   }
 
-  public void setLemma_ExtendedAssocWords(
-      Map<String, List<List<String>>> lemma_ExtendedAssocWords) {
-    this.lemma_ExtendedAssocWords = lemma_ExtendedAssocWords;
-  }
-
-  public void setAssocWords_ExtendedAssocWords(
-      Map<List<String>, List<List<String>>> assocWords_ExtendedAssocWords) {
-    this.assocWords_ExtendedAssocWords = assocWords_ExtendedAssocWords;
-  }
-
   public void writeTaxonomy(String filename) {
-    FileOutputStream fos;
-    ObjectOutputStream out;
-    try {
-      fos = new FileOutputStream(filename);
-      out = new ObjectOutputStream(fos);
+    try (ObjectOutputStream out = new ObjectOutputStream(new 
FileOutputStream(filename))) {
       out.writeObject(this);
-      out.close();
     } catch (IOException ex) {
       ex.printStackTrace();
     }
 
-     String csvFilename = filename+".csv";
-     List<String[]> taxo_list = new ArrayList<>();
-     List<String> entries = new ArrayList<>(lemma_ExtendedAssocWords.keySet());
-     for(String e: entries){
-        List<String> lines = new ArrayList<>();
-        lines.add(e);
-        for(List<String> ls: lemma_ExtendedAssocWords.get(e)){
-                lines.add(ls.toString());
-        }
-        taxo_list.add((String[])lines.toArray(new String[0]));
+    String csvFilename = filename+".csv";
+    List<String[]> taxo_list = new ArrayList<>();
+    List<String> entries = new ArrayList<>(lemma_ExtendedAssocWords.keySet());
+    for(String e: entries){
+     List<String> lines = new ArrayList<>();
+     lines.add(e);
+     for(List<String> ls: lemma_ExtendedAssocWords.get(e)){
+       lines.add(ls.toString());
      }
-     ProfileReaderWriter.writeReport(taxo_list, csvFilename);
-     
-     String csvFilenameListEntries = filename+"_ListEntries.csv";
-     taxo_list = new ArrayList<>();
-     List<List<String>> entriesList = new 
ArrayList<>(assocWords_ExtendedAssocWords.keySet());
-     for(List<String> e: entriesList){
-       List<String> lines = new ArrayList<>(e);
-        for(List<String> ls: assocWords_ExtendedAssocWords.get(e)){
-                lines.add(ls.toString());
-        }
-        taxo_list.add((String[])lines.toArray(new String[0]));
+     taxo_list.add(lines.toArray(new String[0]));
+    }
+    ProfileReaderWriter.writeReport(taxo_list, csvFilename);
+
+    String csvFilenameListEntries = filename+"_ListEntries.csv";
+    taxo_list = new ArrayList<>();
+    List<List<String>> entriesList = new 
ArrayList<>(assocWords_ExtendedAssocWords.keySet());
+    for(List<String> e: entriesList){
+     List<String> lines = new ArrayList<>(e);
+     for(List<String> ls: assocWords_ExtendedAssocWords.get(e)){
+       lines.add(ls.toString());
      }
-     ProfileReaderWriter.writeReport(taxo_list, csvFilenameListEntries);
+     taxo_list.add(lines.toArray(new String[0]));
+    }
+    ProfileReaderWriter.writeReport(taxo_list, csvFilenameListEntries);
   }
 
   public static TaxonomySerializer readTaxonomy(String filename) {
     TaxonomySerializer data = null;
-    FileInputStream fis;
-    ObjectInputStream in;
-    try {
-      fis = new FileInputStream(filename);
-      in = new ObjectInputStream(fis);
+    try (ObjectInputStream in = new ObjectInputStream(new 
FileInputStream(filename))) {
       data = (TaxonomySerializer) in.readObject();
-      in.close();
     } catch (IOException | ClassNotFoundException ex) {
       ex.printStackTrace();
     }
-
-    // System.out.print(data.lemma_ExtendedAssocWords);
-
     return data;
-
   }
 }
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserCacheSerializer.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserCacheSerializer.java
index f1b4ea2..b7e4611 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserCacheSerializer.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserCacheSerializer.java
@@ -72,8 +72,8 @@ public class ParserCacheSerializer {
 
       Map<String, String[][]> sentence_parseObject = (Map<String, String[][]>) 
objectToSerialize;
       List<String> keys = new ArrayList<>(sentence_parseObject.keySet());
-      try (CSVWriter writer = new CSVWriter(
-              new FileWriter(RESOURCE_DIR + PARSE_CACHE_FILE_NAME_CSV, 
false))) {
+      try (CSVWriter writer = new CSVWriter(new FileWriter(
+              RESOURCE_DIR + PARSE_CACHE_FILE_NAME_CSV, false))) {
         for (String k : keys) {
           String[][] triplet = sentence_parseObject.get(k);
           writer.writeNext(new String[] { k });
diff --git 
a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessor.java
 
b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessor.java
index e2bb275..3dc6b30 100644
--- 
a/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessor.java
+++ 
b/opennlp-similarity/src/main/java/opennlp/tools/textsimilarity/chunker2matcher/ParserChunker2MatcherProcessor.java
@@ -581,25 +581,12 @@ public class ParserChunker2MatcherProcessor {
   }
 
   protected void initializeSentenceDetector() {
-    InputStream is = null;
-    try {
-      is = new FileInputStream(MODEL_DIR + "/en-sent.bin"
-
-      );
+    try (InputStream is = new FileInputStream(MODEL_DIR + "/en-sent.bin")) {
       SentenceModel model = new SentenceModel(is);
       sentenceDetector = new SentenceDetectorME(model);
     } catch (IOException e) {
       e.printStackTrace();
-    } finally {
-      if (is != null) {
-        try {
-          is.close();
-        } catch (IOException e) {
-           // we swallow exception to support the cached run
-               e.printStackTrace();
-        }
-      }
-    }
+    } 
   }
 
   protected void initializeTokenizer() {
diff --git 
a/opennlp-wsd/src/main/java/opennlp/tools/cmdline/disambiguator/DisambiguatorEvaluatorTool.java
 
b/opennlp-wsd/src/main/java/opennlp/tools/cmdline/disambiguator/DisambiguatorEvaluatorTool.java
index 5b6451e..453db86 100644
--- 
a/opennlp-wsd/src/main/java/opennlp/tools/cmdline/disambiguator/DisambiguatorEvaluatorTool.java
+++ 
b/opennlp-wsd/src/main/java/opennlp/tools/cmdline/disambiguator/DisambiguatorEvaluatorTool.java
@@ -19,7 +19,6 @@ package opennlp.tools.cmdline.disambiguator;
 
 import java.io.File;
 import java.io.IOException;
-import java.nio.charset.Charset;
 
 import opennlp.tools.cmdline.ArgumentParser;
 import opennlp.tools.cmdline.CLI;
@@ -62,35 +61,22 @@ public final class DisambiguatorEvaluatorTool extends 
CmdLineTool {
     File testData = params.getData();
     CmdLineUtil.checkInputFile("Test data", testData);
 
-    Charset encoding = params.getEncoding();
-
     WSDisambiguator disambiguator = DisambiguatorTool.makeTool(params);
-
     WSDEvaluator evaluator = new WSDEvaluator(disambiguator);
 
     System.out.print("Evaluating ... ");
 
-    ObjectStream<WSDSample> sampleStream = DisambiguatorTool.openSampleData(
-        "Test", testData, encoding);
-
-    try {
-      evaluator.evaluate(sampleStream);
-    } catch (IOException e) {
-      System.err.println("failed");
-      System.err.println("Reading test data error " + e.getMessage());
-      throw new TerminateToolException(-1);
-    } finally {
-      try {
-        sampleStream.close();
+    try (ObjectStream<WSDSample> sampleStream = 
DisambiguatorTool.openSampleData(
+            "Test", testData, params.getEncoding())) {
+        evaluator.evaluate(sampleStream);
       } catch (IOException e) {
-        // sorry that this can fail
-      }
+        System.err.println("failed");
+        System.err.println("Reading test data error " + e.getMessage());
+        throw new TerminateToolException(-1);
     }
 
     System.out.println("done");
-
     System.out.println();
-
     System.out.println("Accuracy: " + evaluator.getAccuracy());
   }
 }
\ No newline at end of file
diff --git 
a/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDEvaluator.java 
b/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDEvaluator.java
index 13d1e67..eeab5c1 100644
--- a/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDEvaluator.java
+++ b/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDEvaluator.java
@@ -22,8 +22,7 @@ import opennlp.tools.util.eval.Mean;
 
 /**
  * The {@link WSDEvaluator} measures the performance of the given
- * {@link WSDisambiguator} with the provided reference
- * {@link WordToDisambiguate}.
+ * {@link WSDisambiguator} with the provided reference {@code 
WordToDisambiguate}.
  *
  * @see Evaluator
  * @see WSDisambiguator
diff --git 
a/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDModel.java 
b/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDModel.java
index 90afbbf..dbb0a71 100644
--- a/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDModel.java
+++ b/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDModel.java
@@ -38,6 +38,8 @@ import opennlp.tools.util.model.BaseModel;
 // TODO unify both supervised models
 public class WSDModel extends BaseModel {
 
+  private static final long serialVersionUID = 8597537955427934846L;
+  
   private static final String COMPONENT_NAME = "WSD";
   private static final String WSD_MODEL_ENTRY_NAME = "WSD.model";
 
diff --git 
a/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDSample.java 
b/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDSample.java
index cb9365e..66d613e 100644
--- a/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDSample.java
+++ b/opennlp-wsd/src/main/java/opennlp/tools/disambiguator/WSDSample.java
@@ -19,6 +19,7 @@ package opennlp.tools.disambiguator;
 
 import java.util.Arrays;
 import java.util.List;
+import java.util.Objects;
 
 import net.sf.extjwnl.JWNLException;
 import net.sf.extjwnl.data.POS;
@@ -188,6 +189,14 @@ public class WSDSample {
     }
   }
 
+  @Override
+  public int hashCode() {
+    int result = Objects.hash(targetPosition);
+    result = 31 * result + Arrays.hashCode(sentence);
+    result = 31 * result + Arrays.hashCode(tags);
+    return result;
+  }
+
   // Return the synsets (thus the senses) of the current target word
   public List<Synset> getSynsets() {
     try {
diff --git a/summarizer/src/main/java/opennlp/summarization/Sentence.java 
b/summarizer/src/main/java/opennlp/summarization/Sentence.java
index 312e6d4..fad8cf1 100755
--- a/summarizer/src/main/java/opennlp/summarization/Sentence.java
+++ b/summarizer/src/main/java/opennlp/summarization/Sentence.java
@@ -21,6 +21,7 @@ import java.text.BreakIterator;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.Locale;
+import java.util.Objects;
 
 import opennlp.summarization.preprocess.PorterStemmer;
 import opennlp.summarization.preprocess.StopWords;
@@ -28,7 +29,8 @@ import opennlp.summarization.preprocess.StopWords;
 /**
  * A representation of a sentence geared toward pagerank and summarization.
  */
-public class Sentence {        
+public class Sentence {
+
        //sentId is always position of sentence in doc.
        private int sentId;
        private String stringVal;
@@ -132,12 +134,18 @@ public class Sentence {
                return wordCnt==0? this.getStringVal().split(" ").length: 
wordCnt;
        }
 
-       //Should add an article id to the sentence class. For now returns true 
if the ids are the same.
+       // Should add an article id to the sentence class. For now returns true 
if the ids are the same.
+       @Override
+       public boolean equals(Object o) {
+               if (this == o) return true;
+               if (o == null || getClass() != o.getClass()) return false;
+               Sentence sentence = (Sentence) o;
+               return sentId == sentence.sentId;
+       }
+
        @Override
-       public boolean equals(Object o){
-               if(! (o instanceof Sentence)) return false;
-               Sentence s = (Sentence)o;
-               return s.sentId == this.sentId;
+       public int hashCode() {
+               return Objects.hash(sentId);
        }
 
        private static final String SPACE = " ";
diff --git 
a/summarizer/src/main/java/opennlp/summarization/lexicalchaining/LexicalChain.java
 
b/summarizer/src/main/java/opennlp/summarization/lexicalchaining/LexicalChain.java
index f6f3595..86e5ba8 100644
--- 
a/summarizer/src/main/java/opennlp/summarization/lexicalchaining/LexicalChain.java
+++ 
b/summarizer/src/main/java/opennlp/summarization/lexicalchaining/LexicalChain.java
@@ -19,6 +19,7 @@ package opennlp.summarization.lexicalchaining;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Objects;
 
 import opennlp.summarization.Sentence;
 
@@ -73,11 +74,19 @@ public class LexicalChain implements 
Comparable<LexicalChain>{
   @Override
   public int compareTo(LexicalChain o) {
     double diff = (score() - o.score());
-    return diff ==0? 0: diff > 0 ?1:-1;
+    return diff == 0 ? 0: diff > 0 ? 1:-1;
   }
 
   @Override
-  public boolean equals(Object o){
-    return super.equals(o);
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    LexicalChain that = (LexicalChain) o;
+    return start == that.start && last == that.last && score == that.score && 
occurrences == that.occurrences;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(start, last, score, occurrences);
   }
 }
diff --git 
a/wikinews-importer/src/main/java/org/apache/opennlp/wikinews_importer/AnnotatingMarkupParser.java
 
b/wikinews-importer/src/main/java/org/apache/opennlp/wikinews_importer/AnnotatingMarkupParser.java
index cb39418..90f1721 100644
--- 
a/wikinews-importer/src/main/java/org/apache/opennlp/wikinews_importer/AnnotatingMarkupParser.java
+++ 
b/wikinews-importer/src/main/java/org/apache/opennlp/wikinews_importer/AnnotatingMarkupParser.java
@@ -122,10 +122,8 @@ public class AnnotatingMarkupParser implements 
ITextConverter {
                     if (node instanceof WPATag) {
                         // extract wikilink annotations
                         WPATag tag = (WPATag) node;
-                        String wikilinkLabel = (String) 
tag.getAttributes().get(
-                                WIKILINK_TITLE_ATTR_KEY);
-                        String wikilinkTarget = (String) 
tag.getAttributes().get(
-                                WIKILINK_TARGET_ATTR_KEY);
+                        String wikilinkLabel = 
tag.getAttributes().get(WIKILINK_TITLE_ATTR_KEY);
+                        String wikilinkTarget = 
tag.getAttributes().get(WIKILINK_TARGET_ATTR_KEY);
                         if (wikilinkLabel != null) {
                             int colonIdx = -1; // wikilinkLabel.indexOf(':');
                             if (colonIdx == -1) {

Reply via email to