ehatcher 2004/01/19 13:02:31 Modified: contributions/analyzers/src/java/org/apache/lucene/analysis/br BrazilianAnalyzer.java Log: Duplicate WordlistLoader removal. It exists in Lucene's core also. There was one code difference and the core implementation looked correct - please let me know if this is not correct Revision Changes Path 1.2 +3 -2 jakarta-lucene-sandbox/contributions/analyzers/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java Index: BrazilianAnalyzer.java =================================================================== RCS file: /home/cvs/jakarta-lucene-sandbox/contributions/analyzers/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java,v retrieving revision 1.1 retrieving revision 1.2 diff -u -r1.1 -r1.2 --- BrazilianAnalyzer.java 23 Dec 2003 18:46:34 -0000 1.1 +++ BrazilianAnalyzer.java 19 Jan 2004 21:02:31 -0000 1.2 @@ -4,6 +4,7 @@ import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.de.WordlistLoader; import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; import java.io.File; @@ -43,7 +44,7 @@ "seja","sem","sendo","seu","seus","sob","sobre","sua", "suas","tal","tambem","teu","teus","toda","todas","todo", "todos","tua","tuas","tudo","um","uma","umas","uns"}; - + /** * Contains the stopwords used with the StopFilter. @@ -100,7 +101,7 @@ public void setStemExclusionTable( File exclusionlist ) { excltable = WordlistLoader.getWordtable( exclusionlist ); } - + /** * Creates a TokenStream which tokenizes all the text in the provided Reader. *
--------------------------------------------------------------------- To unsubscribe, e-mail: [EMAIL PROTECTED] For additional commands, e-mail: [EMAIL PROTECTED]