java.util.HashSetedu.stanford.nlp.util.Generics



Project stanfordnlp/CoreNLP in file ...rc.edu.stanford.nlp.graph.ConnectedComponents.java (2013-08-19)
@@ -1,13 +1,13 @@
 package edu.stanford.nlp.graph;
 
 import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
 import edu.stanford.nlp.util.CollectionUtils;
+import edu.stanford.nlp.util.Generics;
 
 /**
  * Finds connected components in the graph, currently uses inefficient list for
@@ -31,7 +31,7 @@ public class ConnectedComponents<V, E> {
   }
 
   private static <V, E> Set<V> bfs(LinkedList<V> todo, DirectedMultiGraph<V, E> graph, List<V> verticesLeft) {
-    Set<V> cc = new HashSet<V>();
+    Set<V> cc = Generics.newHashSet();
     while (todo.size() > 0) {
       V node = todo.removeFirst();
       cc.add(node);
Project stanfordnlp/CoreNLP in file ...66/src.edu.stanford.nlp.graph.NetworkFeatures.java (2013-08-19)
@@ -1,11 +1,11 @@
 package edu.stanford.nlp.graph;
 
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.Queue;
 import java.util.Set;
 
 import edu.stanford.nlp.util.CollectionUtils;
+import edu.stanford.nlp.util.Generics;
 
 /**
  * get some network statistics
@@ -29,7 +29,7 @@ public class NetworkFeatures {
   };
 
   public static <V, E> Set<ThreeNodeStat> find3NodeStats(Graph<V, E> graph, V a, V b, V c) {
-    Set<ThreeNodeStat> stats = new HashSet<ThreeNodeStat>();
+    Set<ThreeNodeStat> stats = Generics.newHashSet();
 
     if (graph.isEdge(a, b)) {
       stats.add(ThreeNodeStat.GENEROSITY);
Project stanfordnlp/CoreNLP in file ...ading.domains.ace.reader.AceSentenceSegmenter.java (2013-08-19)
@@ -3,7 +3,6 @@ package edu.stanford.nlp.ie.machinereading.domains.ace.reader;
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
@@ -15,12 +14,13 @@ import edu.stanford.nlp.ie.machinereading.common.DomReader;
 import edu.stanford.nlp.ie.machinereading.domains.ace.reader.RobustTokenizer.WordToken;
 import edu.stanford.nlp.io.IOUtils;
 import edu.stanford.nlp.ling.Word;
+import edu.stanford.nlp.util.Generics;
 
 public class AceSentenceSegmenter extends DomReader {
   // list of tokens which mark sentence boundaries
   private final static String[] sentenceFinalPunc = new String[] { ".", "!",
       "?" };
-  private static Set<String> sentenceFinalPuncSet = new HashSet<String>();
+  private static Set<String> sentenceFinalPuncSet = Generics.newHashSet();
 
   static {
     // set up sentenceFinalPuncSet
Project stanfordnlp/CoreNLP in file ...inereading.domains.ace.reader.RobustTokenizer.java (2013-08-19)
@@ -9,7 +9,6 @@ package edu.stanford.nlp.ie.machinereading.domains.ace.reader;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
@@ -20,6 +19,7 @@ import java.io.BufferedReader;
 
 import edu.stanford.nlp.ling.Word;
 import edu.stanford.nlp.process.AbstractTokenizer;
+import edu.stanford.nlp.util.Generics;
 
 public class RobustTokenizer<T extends Word> extends AbstractTokenizer<Word> {
   
@@ -598,7 +598,7 @@ public class RobustTokenizer<T extends Word> extends AbstractTokenizer<Word> {
 
     /** Creates a new instance of AbreviationMap with some know abbreviations */
     public AbbreviationMap(boolean caseInsensitive)  {
-      mAbbrevSet = new HashSet<String>(normalizeCase(caseInsensitive, Arrays.asList(new String[]{
+      mAbbrevSet = Generics.newHashSet(normalizeCase(caseInsensitive, Arrays.asList(new String[]{
           "1.",
           "10.",
           "11.",
Project stanfordnlp/CoreNLP in file ...tional.arabic.pipeline.MWETreeVisitorExternal.java (2013-08-19)
@@ -7,7 +7,6 @@ import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
@@ -17,6 +16,7 @@ import edu.stanford.nlp.trees.TreeReader;
 import edu.stanford.nlp.trees.TreeReaderFactory;
 import edu.stanford.nlp.trees.TreeVisitor;
 import edu.stanford.nlp.trees.international.arabic.ArabicTreeReaderFactory;
+import edu.stanford.nlp.util.Generics;
 
 /**
  * Converts all contiguous MWEs listed in an MWE list to flattened trees.
@@ -35,7 +35,7 @@ public class MWETreeVisitorExternal implements TreeVisitor {
   }
   
   private Set<String> loadMWEs() {
-    Set<String> mweSet = new HashSet<String>();  
+    Set<String> mweSet = Generics.newHashSet();  
     try {
       BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(mweFile), "UTF-8"));
       for (String line; (line = br.readLine()) != null;) {