org.apache.lucene.analysis.TokenStream



API Populatity

15 Client projects

Project: org.apache

Project apache/lucene-solr in file ...eryparser.classic.TestMultiPhraseQueryParsing.java (2012-06-28)
@@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.classic;
  */
 
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
@@ -67,7 +66,7 @@ public class TestMultiPhraseQueryParsing extends LuceneTestCase {
     }
 
     @Override
-    public final boolean incrementToken() throws IOException {
+    public final boolean incrementToken() {
       clearAttributes();
       if (upto < tokens.length) {
         final TokenAndPos token = tokens[upto++];
Project apache/lucene-solr in file ...ne.spatial.prefix.RecursivePrefixTreeStrategy.java (2014-04-18)
@@ -19,6 +19,7 @@ package org.apache.lucene.spatial.prefix;
 
 import com.spatial4j.core.shape.Point;
 import com.spatial4j.core.shape.Shape;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.spatial.DisjointSpatialFilter;
 import org.apache.lucene.spatial.prefix.tree.Cell;
@@ -112,7 +113,7 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy {
   }
 
   @Override
-  protected CellTokenStream createTokenStream(Shape shape, int detailLevel) {
+  protected TokenStream createTokenStream(Shape shape, int detailLevel) {
     if (shape instanceof Point || !pruneLeafyBranches)
       return super.createTokenStream(shape, detailLevel);
 
Project apache/lucene-solr in file ...sis.miscellaneous.TestLimitTokenCountAnalyzer.java (2011-09-26)
@@ -23,6 +23,7 @@ import java.io.StringReader;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
@@ -38,12 +39,11 @@ public class TestLimitTokenCountAnalyzer extends BaseTokenStreamTestCase {
   public void testLimitTokenCountAnalyzer() throws IOException {
     Analyzer a = new LimitTokenCountAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
     // dont use assertAnalyzesTo here, as the end offset is not the end of the string!
-    assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1  2     3  4  5")), new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, 4);
+    assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader("1  2     3  4  5")), new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, 4);
     assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, 3);
     
     a = new LimitTokenCountAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT), 2);
     // dont use assertAnalyzesTo here, as the end offset is not the end of the string!
-    assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, 3);
     assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, 3);
   }