reverted to lucene 4.7.2 - new versions of lucene are built using JDK 1.7+ and cause issues for the dependency-check Maven Plugin

Former-commit-id: 514cc4922c7f00f55b1dcd102f6d45491e90a5d8
This commit is contained in:
Jeremy Long
2015-03-01 21:43:52 -05:00
parent 09f1a0ac92
commit ff0daa8d66
8 changed files with 65 additions and 34 deletions

View File

@@ -340,6 +340,7 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
</plugins>
</reporting>
<dependencies>
<!-- Note, to stay compatible with Jenkins installations only JARs compiled to 1.6 can be used -->
<dependency>
<groupId>org.owasp</groupId>
<artifactId>dependency-check-utils</artifactId>
@@ -679,6 +680,8 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
</profile>
</profiles>
<properties>
<apache.lucene.version>4.10.3</apache.lucene.version>
<!-- new versions of lucene are compiled with JDK 1.7 and cannot be used ubiquitously in Jenkins
this, we cannot upgrade beyond 4.7.2 -->
<apache.lucene.version>4.7.2</apache.lucene.version>
</properties>
</project>

View File

@@ -48,8 +48,8 @@ import org.owasp.dependencycheck.data.nvdcve.DatabaseException;
import org.owasp.dependencycheck.utils.Pair;
/**
* An in memory lucene index that contains the vendor/product combinations from the CPE (application) identifiers within
* the NVD CVE data.
* An in memory lucene index that contains the vendor/product combinations from the CPE (application) identifiers within the NVD
* CVE data.
*
* @author Jeremy Long <jeremy.long@owasp.org>
*/
@@ -125,7 +125,7 @@ public final class CpeMemoryIndex {
}
indexSearcher = new IndexSearcher(indexReader);
searchingAnalyzer = createSearchingAnalyzer();
queryParser = new QueryParser(Fields.DOCUMENT_KEY, searchingAnalyzer);
queryParser = new QueryParser(LuceneUtils.CURRENT_VERSION, Fields.DOCUMENT_KEY, searchingAnalyzer);
openState = true;
}
}
@@ -153,7 +153,7 @@ public final class CpeMemoryIndex {
private Analyzer createIndexingAnalyzer() {
final Map fieldAnalyzers = new HashMap();
fieldAnalyzers.put(Fields.DOCUMENT_KEY, new KeywordAnalyzer());
return new PerFieldAnalyzerWrapper(new FieldAnalyzer(), fieldAnalyzers);
return new PerFieldAnalyzerWrapper(new FieldAnalyzer(LuceneUtils.CURRENT_VERSION), fieldAnalyzers);
}
/**
@@ -165,12 +165,12 @@ public final class CpeMemoryIndex {
private Analyzer createSearchingAnalyzer() {
final Map<String, Analyzer> fieldAnalyzers = new HashMap<String, Analyzer>();
fieldAnalyzers.put(Fields.DOCUMENT_KEY, new KeywordAnalyzer());
productSearchFieldAnalyzer = new SearchFieldAnalyzer();
vendorSearchFieldAnalyzer = new SearchFieldAnalyzer();
productSearchFieldAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
vendorSearchFieldAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
fieldAnalyzers.put(Fields.PRODUCT, productSearchFieldAnalyzer);
fieldAnalyzers.put(Fields.VENDOR, vendorSearchFieldAnalyzer);
return new PerFieldAnalyzerWrapper(new FieldAnalyzer(), fieldAnalyzers);
return new PerFieldAnalyzerWrapper(new FieldAnalyzer(LuceneUtils.CURRENT_VERSION), fieldAnalyzers);
}
/**

View File

@@ -19,6 +19,7 @@ package org.owasp.dependencycheck.data.lucene;
import java.io.Reader;
import org.apache.lucene.analysis.util.CharTokenizer;
import org.apache.lucene.util.Version;
/**
* Tokenizes the input breaking it into tokens when non-alpha/numeric characters are found.
@@ -30,10 +31,22 @@ public class AlphaNumericTokenizer extends CharTokenizer {
/**
* Constructs a new AlphaNumericTokenizer.
*
* @param matchVersion the lucene version
* @param in the Reader
*/
public AlphaNumericTokenizer(Reader in) {
super(in);
public AlphaNumericTokenizer(Version matchVersion, Reader in) {
super(matchVersion, in);
}
/**
* Constructs a new AlphaNumericTokenizer.
*
* @param matchVersion the lucene version
* @param factory the AttributeFactory
* @param in the Reader
*/
public AlphaNumericTokenizer(Version matchVersion, AttributeFactory factory, Reader in) {
super(matchVersion, factory, in);
}
/**

View File

@@ -25,21 +25,30 @@ import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
import org.apache.lucene.util.Version;
/**
* <p>
* A Lucene Analyzer that utilizes the WhitespaceTokenizer, WordDelimiterFilter, LowerCaseFilter, and StopFilter. The
* intended purpose of this Analyzer is to index the CPE fields vendor and product.</p>
* A Lucene Analyzer that utilizes the WhitespaceTokenizer, WordDelimiterFilter, LowerCaseFilter, and StopFilter. The intended
* purpose of this Analyzer is to index the CPE fields vendor and product.</p>
*
* @author Jeremy Long <jeremy.long@owasp.org>
*/
public class FieldAnalyzer extends Analyzer {
/**
* The Lucene Version used.
*/
private final Version version;
/**
* Creates a new FieldAnalyzer.
*
* @param version the Lucene version
*/
public FieldAnalyzer() { }
public FieldAnalyzer(Version version) {
this.version = version;
}
/**
* Creates the TokenStreamComponents
@@ -50,7 +59,7 @@ public class FieldAnalyzer extends Analyzer {
*/
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
final Tokenizer source = new AlphaNumericTokenizer(reader);
final Tokenizer source = new AlphaNumericTokenizer(version, reader);
TokenStream stream = source;
@@ -63,8 +72,8 @@ public class FieldAnalyzer extends Analyzer {
| WordDelimiterFilter.SPLIT_ON_NUMERICS
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
stream = new LowerCaseFilter(stream);
stream = new StopFilter(stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
stream = new LowerCaseFilter(version, stream);
stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
return new TokenStreamComponents(source, stream);
}

View File

@@ -29,10 +29,10 @@ import org.apache.lucene.util.Version;
public final class LuceneUtils {
/**
* The current version of Lucene being used. Declaring this one place so an upgrade doesn't require hunting through
* the code base.
* The current version of Lucene being used. Declaring this one place so an upgrade doesn't require hunting through the code
* base.
*/
public static final Version CURRENT_VERSION = Version.LATEST;
public static final Version CURRENT_VERSION = Version.LUCENE_47;
/**
* Private constructor as this is a utility class.

View File

@@ -25,6 +25,7 @@ import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
import org.apache.lucene.util.Version;
/**
* A Lucene field analyzer used to analyzer queries against the CPE data.
@@ -34,16 +35,22 @@ import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
public class SearchFieldAnalyzer extends Analyzer {
/**
* A local reference to the TokenPairConcatenatingFilter so that we can clear any left over state if this analyzer
* is re-used.
* The Lucene Version used.
*/
private final Version version;
/**
* A local reference to the TokenPairConcatenatingFilter so that we can clear any left over state if this analyzer is re-used.
*/
private TokenPairConcatenatingFilter concatenatingFilter;
/**
* Constructs a new SearchFieldAnalyzer.
*
* @param version the Lucene version
*/
public SearchFieldAnalyzer() { }
public SearchFieldAnalyzer(Version version) {
this.version = version;
}
/**
* Creates a the TokenStreamComponents used to analyze the stream.
@@ -54,7 +61,7 @@ public class SearchFieldAnalyzer extends Analyzer {
*/
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
final Tokenizer source = new AlphaNumericTokenizer(reader);
final Tokenizer source = new AlphaNumericTokenizer(version, reader);
TokenStream stream = source;
@@ -66,19 +73,18 @@ public class SearchFieldAnalyzer extends Analyzer {
| WordDelimiterFilter.SPLIT_ON_NUMERICS
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
stream = new LowerCaseFilter(stream);
stream = new LowerCaseFilter(version, stream);
stream = new UrlTokenizingFilter(stream);
concatenatingFilter = new TokenPairConcatenatingFilter(stream);
stream = concatenatingFilter;
stream = new StopFilter(stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
return new TokenStreamComponents(source, stream);
}
/**
* <p>
* Resets the analyzer and clears any internal state data that may have been left-over from previous uses of the
* analyzer.</p>
* Resets the analyzer and clears any internal state data that may have been left-over from previous uses of the analyzer.</p>
* <p>
* <b>If this analyzer is re-used this method must be called between uses.</b></p>
*/

View File

@@ -69,7 +69,7 @@ public class FieldAnalyzerTest {
@Test
public void testAnalyzers() throws Exception {
Analyzer analyzer = new FieldAnalyzer();
Analyzer analyzer = new FieldAnalyzer(LuceneUtils.CURRENT_VERSION);
Directory index = new RAMDirectory();
String field1 = "product";
@@ -83,13 +83,13 @@ public class FieldAnalyzerTest {
//Analyzer searchingAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
String querystr = "product:\"(Spring Framework Core)\" vendor:(SpringSource)";
SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer();
SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer();
SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
HashMap<String, Analyzer> map = new HashMap<String, Analyzer>();
map.put(field1, searchAnalyzerProduct);
map.put(field2, searchAnalyzerVendor);
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(), map);
QueryParser parser = new QueryParser(field1, wrapper);
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(LuceneUtils.CURRENT_VERSION), map);
QueryParser parser = new QueryParser(LuceneUtils.CURRENT_VERSION, field1, wrapper);
Query q = parser.parse(querystr);
//System.out.println(q.toString());

View File

@@ -61,7 +61,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
* test some examples
*/
public void testExamples() throws IOException {
Tokenizer wsTokenizer = new WhitespaceTokenizer(new StringReader("one two three"));
Tokenizer wsTokenizer = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three"));
TokenStream filter = new TokenPairConcatenatingFilter(wsTokenizer);
assertTokenStreamContents(filter,
new String[]{"one", "onetwo", "two", "twothree", "three"});
@@ -75,7 +75,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
@Test
public void testClear() throws IOException {
TokenStream ts = new WhitespaceTokenizer(new StringReader("one two three"));
TokenStream ts = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three"));
TokenPairConcatenatingFilter filter = new TokenPairConcatenatingFilter(ts);
assertTokenStreamContents(filter, new String[]{"one", "onetwo", "two", "twothree", "three"});