Replaced deprecated constructors which contained parameter Version.

When looking into the code, these ended up toggling behaviour if Lucene version was later than 3.1.


Former-commit-id: b7641118b16ccfc904c8aaab3b2636d909d5b1d9
This commit is contained in:
Hans Joachim Desserud
2015-01-05 21:46:38 +01:00
parent 9c55b889cb
commit 754c2fc9bf
8 changed files with 16 additions and 34 deletions

View File

@@ -125,7 +125,7 @@ public final class CpeMemoryIndex {
} }
indexSearcher = new IndexSearcher(indexReader); indexSearcher = new IndexSearcher(indexReader);
searchingAnalyzer = createSearchingAnalyzer(); searchingAnalyzer = createSearchingAnalyzer();
queryParser = new QueryParser(LuceneUtils.CURRENT_VERSION, Fields.DOCUMENT_KEY, searchingAnalyzer); queryParser = new QueryParser(Fields.DOCUMENT_KEY, searchingAnalyzer);
openState = true; openState = true;
} }
} }

View File

@@ -19,7 +19,6 @@ package org.owasp.dependencycheck.data.lucene;
import java.io.Reader; import java.io.Reader;
import org.apache.lucene.analysis.util.CharTokenizer; import org.apache.lucene.analysis.util.CharTokenizer;
import org.apache.lucene.util.Version;
/** /**
* Tokenizes the input breaking it into tokens when non-alpha/numeric characters are found. * Tokenizes the input breaking it into tokens when non-alpha/numeric characters are found.
@@ -31,11 +30,10 @@ public class AlphaNumericTokenizer extends CharTokenizer {
/** /**
* Constructs a new AlphaNumericTokenizer. * Constructs a new AlphaNumericTokenizer.
* *
* @param matchVersion the lucene version
* @param in the Reader * @param in the Reader
*/ */
public AlphaNumericTokenizer(Version matchVersion, Reader in) { public AlphaNumericTokenizer(Reader in) {
super(matchVersion, in); super(in);
} }
/** /**

View File

@@ -59,7 +59,7 @@ public class FieldAnalyzer extends Analyzer {
*/ */
@Override @Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) { protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
final Tokenizer source = new AlphaNumericTokenizer(version, reader); final Tokenizer source = new AlphaNumericTokenizer(reader);
TokenStream stream = source; TokenStream stream = source;
@@ -72,7 +72,7 @@ public class FieldAnalyzer extends Analyzer {
| WordDelimiterFilter.SPLIT_ON_NUMERICS | WordDelimiterFilter.SPLIT_ON_NUMERICS
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null); | WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
stream = new LowerCaseFilter(version, stream); stream = new LowerCaseFilter(stream);
stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET); stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
return new TokenStreamComponents(source, stream); return new TokenStreamComponents(source, stream);

View File

@@ -62,7 +62,7 @@ public class SearchFieldAnalyzer extends Analyzer {
*/ */
@Override @Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) { protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
final Tokenizer source = new AlphaNumericTokenizer(version, reader); final Tokenizer source = new AlphaNumericTokenizer(reader);
TokenStream stream = source; TokenStream stream = source;
@@ -74,7 +74,7 @@ public class SearchFieldAnalyzer extends Analyzer {
| WordDelimiterFilter.SPLIT_ON_NUMERICS | WordDelimiterFilter.SPLIT_ON_NUMERICS
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null); | WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
stream = new LowerCaseFilter(version, stream); stream = new LowerCaseFilter(stream);
stream = new UrlTokenizingFilter(stream); stream = new UrlTokenizingFilter(stream);
concatenatingFilter = new TokenPairConcatenatingFilter(stream); concatenatingFilter = new TokenPairConcatenatingFilter(stream);
stream = concatenatingFilter; stream = concatenatingFilter;

View File

@@ -40,18 +40,11 @@ public class SearchVersionAnalyzer extends Analyzer {
// look at this article to implement // look at this article to implement
// http://www.codewrecks.com/blog/index.php/2012/08/25/index-your-blog-using-tags-and-lucene-net/ // http://www.codewrecks.com/blog/index.php/2012/08/25/index-your-blog-using-tags-and-lucene-net/
/**
* The Lucene Version used.
*/
private final Version version;
/** /**
* Creates a new SearchVersionAnalyzer. * Creates a new SearchVersionAnalyzer.
* *
* @param version the Lucene version
*/ */
public SearchVersionAnalyzer(Version version) { public SearchVersionAnalyzer() {
this.version = version;
} }
/** /**
@@ -63,9 +56,9 @@ public class SearchVersionAnalyzer extends Analyzer {
*/ */
@Override @Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) { protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
final Tokenizer source = new WhitespaceTokenizer(version, reader); final Tokenizer source = new WhitespaceTokenizer(reader);
TokenStream stream = source; TokenStream stream = source;
stream = new LowerCaseFilter(version, stream); stream = new LowerCaseFilter(stream);
stream = new VersionTokenizingFilter(stream); stream = new VersionTokenizingFilter(stream);
return new TokenStreamComponents(source, stream); return new TokenStreamComponents(source, stream);
} }

View File

@@ -23,7 +23,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LowerCaseFilter; import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.util.Version;
/** /**
* VersionAnalyzer is a Lucene Analyzer used to analyze version information. * VersionAnalyzer is a Lucene Analyzer used to analyze version information.
@@ -40,19 +39,11 @@ public class VersionAnalyzer extends Analyzer {
// look at this article to implement // look at this article to implement
// http://www.codewrecks.com/blog/index.php/2012/08/25/index-your-blog-using-tags-and-lucene-net/ // http://www.codewrecks.com/blog/index.php/2012/08/25/index-your-blog-using-tags-and-lucene-net/
/**
* The Lucene Version used.
*/
private final Version version;
/** /**
* Creates a new VersionAnalyzer. * Creates a new VersionAnalyzer.
* *
* @param version the Lucene version
*/ */
public VersionAnalyzer(Version version) { public VersionAnalyzer() {}
this.version = version;
}
/** /**
* Creates the TokenStreamComponents * Creates the TokenStreamComponents
@@ -63,9 +54,9 @@ public class VersionAnalyzer extends Analyzer {
*/ */
@Override @Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) { protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
final Tokenizer source = new WhitespaceTokenizer(version, reader); final Tokenizer source = new WhitespaceTokenizer(reader);
TokenStream stream = source; TokenStream stream = source;
stream = new LowerCaseFilter(version, stream); stream = new LowerCaseFilter(stream);
return new TokenStreamComponents(source, stream); return new TokenStreamComponents(source, stream);
} }
} }

View File

@@ -89,7 +89,7 @@ public class FieldAnalyzerTest {
map.put(field1, searchAnalyzerProduct); map.put(field1, searchAnalyzerProduct);
map.put(field2, searchAnalyzerVendor); map.put(field2, searchAnalyzerVendor);
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(LuceneUtils.CURRENT_VERSION), map); PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(LuceneUtils.CURRENT_VERSION), map);
QueryParser parser = new QueryParser(LuceneUtils.CURRENT_VERSION, field1, wrapper); QueryParser parser = new QueryParser(field1, wrapper);
Query q = parser.parse(querystr); Query q = parser.parse(querystr);
//System.out.println(q.toString()); //System.out.println(q.toString());

View File

@@ -61,7 +61,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
* test some examples * test some examples
*/ */
public void testExamples() throws IOException { public void testExamples() throws IOException {
Tokenizer wsTokenizer = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three")); Tokenizer wsTokenizer = new WhitespaceTokenizer(new StringReader("one two three"));
TokenStream filter = new TokenPairConcatenatingFilter(wsTokenizer); TokenStream filter = new TokenPairConcatenatingFilter(wsTokenizer);
assertTokenStreamContents(filter, assertTokenStreamContents(filter,
new String[]{"one", "onetwo", "two", "twothree", "three"}); new String[]{"one", "onetwo", "two", "twothree", "three"});
@@ -75,7 +75,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
@Test @Test
public void testClear() throws IOException { public void testClear() throws IOException {
TokenStream ts = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three")); TokenStream ts = new WhitespaceTokenizer(new StringReader("one two three"));
TokenPairConcatenatingFilter filter = new TokenPairConcatenatingFilter(ts); TokenPairConcatenatingFilter filter = new TokenPairConcatenatingFilter(ts);
assertTokenStreamContents(filter, new String[]{"one", "onetwo", "two", "twothree", "three"}); assertTokenStreamContents(filter, new String[]{"one", "onetwo", "two", "twothree", "three"});