org.apache.lucene.analysis.compound
Class CompoundWordTokenFilterBase

java.lang.Object
  extended by org.apache.lucene.analysis.TokenStream
      extended by org.apache.lucene.analysis.TokenFilter
          extended by org.apache.lucene.analysis.compound.CompoundWordTokenFilterBase
Direct Known Subclasses:
DictionaryCompoundWordTokenFilter, HyphenationCompoundWordTokenFilter

public abstract class CompoundWordTokenFilterBase
extends TokenFilter

Base class for decomposition token filters.


Field Summary
static int DEFAULT_MAX_SUBWORD_SIZE
          The default for maximal length of subwords that get propagated to the output of this filter
static int DEFAULT_MIN_SUBWORD_SIZE
          The default for minimal length of subwords that get propagated to the output of this filter
static int DEFAULT_MIN_WORD_SIZE
          The default for minimal word length that gets decomposed
protected  CharArraySet dictionary
           
protected  int maxSubwordSize
           
protected  int minSubwordSize
           
protected  int minWordSize
           
protected  boolean onlyLongestMatch
           
protected  java.util.LinkedList tokens
           
 
Fields inherited from class org.apache.lucene.analysis.TokenFilter
input
 
Constructor Summary
protected CompoundWordTokenFilterBase(TokenStream input, java.util.Set dictionary)
           
protected CompoundWordTokenFilterBase(TokenStream input, java.util.Set dictionary, boolean onlyLongestMatch)
           
protected CompoundWordTokenFilterBase(TokenStream input, java.util.Set dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch)
           
protected CompoundWordTokenFilterBase(TokenStream input, java.lang.String[] dictionary)
           
protected CompoundWordTokenFilterBase(TokenStream input, java.lang.String[] dictionary, boolean onlyLongestMatch)
           
protected CompoundWordTokenFilterBase(TokenStream input, java.lang.String[] dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch)
           
 
Method Summary
protected static void addAllLowerCase(java.util.Set target, java.util.Collection col)
           
protected  Token createToken(int offset, int length, Token prototype)
           
protected  void decompose(Token token)
           
protected abstract  void decomposeInternal(Token token)
           
static java.util.Set makeDictionary(java.lang.String[] dictionary)
          Create a set of words from an array The resulting Set does case insensitive matching TODO We should look for a faster dictionary lookup approach.
protected static char[] makeLowerCaseCopy(char[] buffer)
           
 Token next(Token reusableToken)
           
 
Methods inherited from class org.apache.lucene.analysis.TokenFilter
close, reset
 
Methods inherited from class org.apache.lucene.analysis.TokenStream
next
 
Methods inherited from class java.lang.Object
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
 

Field Detail

DEFAULT_MIN_WORD_SIZE

public static final int DEFAULT_MIN_WORD_SIZE
The default for minimal word length that gets decomposed

See Also:
Constant Field Values

DEFAULT_MIN_SUBWORD_SIZE

public static final int DEFAULT_MIN_SUBWORD_SIZE
The default for minimal length of subwords that get propagated to the output of this filter

See Also:
Constant Field Values

DEFAULT_MAX_SUBWORD_SIZE

public static final int DEFAULT_MAX_SUBWORD_SIZE
The default for maximal length of subwords that get propagated to the output of this filter

See Also:
Constant Field Values

dictionary

protected final CharArraySet dictionary

tokens

protected final java.util.LinkedList tokens

minWordSize

protected final int minWordSize

minSubwordSize

protected final int minSubwordSize

maxSubwordSize

protected final int maxSubwordSize

onlyLongestMatch

protected final boolean onlyLongestMatch
Constructor Detail

CompoundWordTokenFilterBase

protected CompoundWordTokenFilterBase(TokenStream input,
                                      java.lang.String[] dictionary,
                                      int minWordSize,
                                      int minSubwordSize,
                                      int maxSubwordSize,
                                      boolean onlyLongestMatch)

CompoundWordTokenFilterBase

protected CompoundWordTokenFilterBase(TokenStream input,
                                      java.lang.String[] dictionary,
                                      boolean onlyLongestMatch)

CompoundWordTokenFilterBase

protected CompoundWordTokenFilterBase(TokenStream input,
                                      java.util.Set dictionary,
                                      boolean onlyLongestMatch)

CompoundWordTokenFilterBase

protected CompoundWordTokenFilterBase(TokenStream input,
                                      java.lang.String[] dictionary)

CompoundWordTokenFilterBase

protected CompoundWordTokenFilterBase(TokenStream input,
                                      java.util.Set dictionary)

CompoundWordTokenFilterBase

protected CompoundWordTokenFilterBase(TokenStream input,
                                      java.util.Set dictionary,
                                      int minWordSize,
                                      int minSubwordSize,
                                      int maxSubwordSize,
                                      boolean onlyLongestMatch)
Method Detail

makeDictionary

public static final java.util.Set makeDictionary(java.lang.String[] dictionary)
Create a set of words from an array The resulting Set does case insensitive matching TODO We should look for a faster dictionary lookup approach.

Parameters:
dictionary -
Returns:

next

public Token next(Token reusableToken)
           throws java.io.IOException
Overrides:
next in class TokenStream
Throws:
java.io.IOException

addAllLowerCase

protected static final void addAllLowerCase(java.util.Set target,
                                            java.util.Collection col)

makeLowerCaseCopy

protected static char[] makeLowerCaseCopy(char[] buffer)

createToken

protected final Token createToken(int offset,
                                  int length,
                                  Token prototype)

decompose

protected void decompose(Token token)

decomposeInternal

protected abstract void decomposeInternal(Token token)


Copyright © 2000-2009 Apache Software Foundation. All Rights Reserved.