edu.stanford.nlp.util.Genericsjava.util.HashSet



Project stanfordnlp/CoreNLP in file ...ading.domains.ace.reader.AceSentenceSegmenter.java (2015-03-14)
@@ -3,6 +3,7 @@ package edu.stanford.nlp.ie.machinereading.domains.ace.reader;
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
@@ -14,13 +15,12 @@ import edu.stanford.nlp.ie.machinereading.common.DomReader;
 import edu.stanford.nlp.ie.machinereading.domains.ace.reader.RobustTokenizer.WordToken;
 import edu.stanford.nlp.io.IOUtils;
 import edu.stanford.nlp.ling.Word;
-import edu.stanford.nlp.util.Generics;
 
 public class AceSentenceSegmenter extends DomReader {
   // list of tokens which mark sentence boundaries
   private final static String[] sentenceFinalPunc = new String[] { ".", "!",
       "?" };
-  private static Set<String> sentenceFinalPuncSet = Generics.newHashSet();
+  private static Set<String> sentenceFinalPuncSet = new HashSet<String>();
 
   static {
     // set up sentenceFinalPuncSet
Project stanfordnlp/CoreNLP in file ...inereading.domains.ace.reader.RobustTokenizer.java (2015-03-14)
@@ -9,6 +9,7 @@ package edu.stanford.nlp.ie.machinereading.domains.ace.reader;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
@@ -19,7 +20,6 @@ import java.io.BufferedReader;
 
 import edu.stanford.nlp.ling.Word;
 import edu.stanford.nlp.process.AbstractTokenizer;
-import edu.stanford.nlp.util.Generics;
 
 public class RobustTokenizer<T extends Word> extends AbstractTokenizer<Word> {
   
@@ -598,7 +598,7 @@ public class RobustTokenizer<T extends Word> extends AbstractTokenizer<Word> {
 
     /** Creates a new instance of AbreviationMap with some know abbreviations */
     public AbbreviationMap(boolean caseInsensitive)  {
-      mAbbrevSet = Generics.newHashSet(normalizeCase(caseInsensitive, Arrays.asList(new String[]{
+      mAbbrevSet = new HashSet<String>(normalizeCase(caseInsensitive, Arrays.asList(new String[]{
           "1.",
           "10.",
           "11.",
Project stanfordnlp/CoreNLP in file ...tional.arabic.pipeline.MWETreeVisitorExternal.java (2015-03-14)
@@ -7,6 +7,7 @@ import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
@@ -16,7 +17,6 @@ import edu.stanford.nlp.trees.TreeReader;
 import edu.stanford.nlp.trees.TreeReaderFactory;
 import edu.stanford.nlp.trees.TreeVisitor;
 import edu.stanford.nlp.trees.international.arabic.ArabicTreeReaderFactory;
-import edu.stanford.nlp.util.Generics;
 
 /**
  * Converts all contiguous MWEs listed in an MWE list to flattened trees.
@@ -35,7 +35,7 @@ public class MWETreeVisitorExternal implements TreeVisitor {
   }
   
   private Set<String> loadMWEs() {
-    Set<String> mweSet = Generics.newHashSet();  
+    Set<String> mweSet = new HashSet<String>();  
     try {
       BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(mweFile), "UTF-8"));
       for (String line; (line = br.readLine()) != null;) {
Project stanfordnlp/CoreNLP in file ...nternational.morph.MorphoFeatureSpecification.java (2015-03-14)
@@ -2,11 +2,11 @@ package edu.stanford.nlp.international.morph;
 
 import java.io.Serializable;
 
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.regex.Pattern;
 
-import edu.stanford.nlp.util.Generics;
 import edu.stanford.nlp.util.Pair;
 
 /**
@@ -37,7 +37,7 @@ public abstract class MorphoFeatureSpecification implements Serializable {
   protected final Set<MorphoFeatureType> activeFeatures;
   
   public MorphoFeatureSpecification() {
-    activeFeatures = Generics.newHashSet();
+    activeFeatures = new HashSet<MorphoFeatureType>();
   }
   
   public void activate(MorphoFeatureType feat) {
Project stanfordnlp/CoreNLP in file ...c.edu.stanford.nlp.process.StripTagsProcessor.java (2015-03-14)
@@ -3,13 +3,13 @@ package edu.stanford.nlp.process;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
 import edu.stanford.nlp.ling.BasicDocument;
 import edu.stanford.nlp.ling.Document;
 import edu.stanford.nlp.ling.Word;
-import edu.stanford.nlp.util.Generics;
 
 /**
  * A <code>Processor</code> whose <code>process</code> method deletes all
@@ -26,7 +26,7 @@ import edu.stanford.nlp.util.Generics;
  */
 public class StripTagsProcessor<L, F> extends AbstractListProcessor<Word, Word, L, F> {
 
-  private static final Set<String> BLOCKTAGS = Generics.newHashSet(Arrays.asList(
+  private static final HashSet<String> BLOCKTAGS = new HashSet<String>(Arrays.asList(
           "blockquote", "br", "div", "h1", "h2", "h3", "h4", "h5", "h6", "hr", "li", "ol", "p", "pre", "table", "tr", "ul"));
 
   /**