From ff0daa8d66b0c9445494fdf330ffcaae12eda529 Mon Sep 17 00:00:00 2001
From: Jeremy Long
Date: Sun, 1 Mar 2015 21:43:52 -0500
Subject: [PATCH] reverted to lucene 4.7.2 - new versions of lucene are built
using JDK 1.7+ and cause issues for the dependency-check Maven Plugin
Former-commit-id: 514cc4922c7f00f55b1dcd102f6d45491e90a5d8
---
dependency-check-core/pom.xml | 5 ++++-
.../data/cpe/CpeMemoryIndex.java | 14 ++++++------
.../data/lucene/AlphaNumericTokenizer.java | 17 ++++++++++++--
.../data/lucene/FieldAnalyzer.java | 21 +++++++++++++-----
.../data/lucene/LuceneUtils.java | 6 ++---
.../data/lucene/SearchFieldAnalyzer.java | 22 ++++++++++++-------
.../data/lucene/FieldAnalyzerTest.java | 10 ++++-----
.../TokenPairConcatenatingFilterTest.java | 4 ++--
8 files changed, 65 insertions(+), 34 deletions(-)
diff --git a/dependency-check-core/pom.xml b/dependency-check-core/pom.xml
index afe4144ad..66485cfa2 100644
--- a/dependency-check-core/pom.xml
+++ b/dependency-check-core/pom.xml
@@ -340,6 +340,7 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
+
org.owasp
dependency-check-utils
@@ -679,6 +680,8 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
- 4.10.3
+
+ 4.7.2
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cpe/CpeMemoryIndex.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cpe/CpeMemoryIndex.java
index e3fc75bae..355d82506 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cpe/CpeMemoryIndex.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cpe/CpeMemoryIndex.java
@@ -48,8 +48,8 @@ import org.owasp.dependencycheck.data.nvdcve.DatabaseException;
import org.owasp.dependencycheck.utils.Pair;
/**
- * An in memory lucene index that contains the vendor/product combinations from the CPE (application) identifiers within
- * the NVD CVE data.
+ * An in memory lucene index that contains the vendor/product combinations from the CPE (application) identifiers within the NVD
+ * CVE data.
*
* @author Jeremy Long
*/
@@ -125,7 +125,7 @@ public final class CpeMemoryIndex {
}
indexSearcher = new IndexSearcher(indexReader);
searchingAnalyzer = createSearchingAnalyzer();
- queryParser = new QueryParser(Fields.DOCUMENT_KEY, searchingAnalyzer);
+ queryParser = new QueryParser(LuceneUtils.CURRENT_VERSION, Fields.DOCUMENT_KEY, searchingAnalyzer);
openState = true;
}
}
@@ -153,7 +153,7 @@ public final class CpeMemoryIndex {
private Analyzer createIndexingAnalyzer() {
final Map fieldAnalyzers = new HashMap();
fieldAnalyzers.put(Fields.DOCUMENT_KEY, new KeywordAnalyzer());
- return new PerFieldAnalyzerWrapper(new FieldAnalyzer(), fieldAnalyzers);
+ return new PerFieldAnalyzerWrapper(new FieldAnalyzer(LuceneUtils.CURRENT_VERSION), fieldAnalyzers);
}
/**
@@ -165,12 +165,12 @@ public final class CpeMemoryIndex {
private Analyzer createSearchingAnalyzer() {
final Map fieldAnalyzers = new HashMap();
fieldAnalyzers.put(Fields.DOCUMENT_KEY, new KeywordAnalyzer());
- productSearchFieldAnalyzer = new SearchFieldAnalyzer();
- vendorSearchFieldAnalyzer = new SearchFieldAnalyzer();
+ productSearchFieldAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
+ vendorSearchFieldAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
fieldAnalyzers.put(Fields.PRODUCT, productSearchFieldAnalyzer);
fieldAnalyzers.put(Fields.VENDOR, vendorSearchFieldAnalyzer);
- return new PerFieldAnalyzerWrapper(new FieldAnalyzer(), fieldAnalyzers);
+ return new PerFieldAnalyzerWrapper(new FieldAnalyzer(LuceneUtils.CURRENT_VERSION), fieldAnalyzers);
}
/**
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/AlphaNumericTokenizer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/AlphaNumericTokenizer.java
index a8f0c07ba..1a58f22a7 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/AlphaNumericTokenizer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/AlphaNumericTokenizer.java
@@ -19,6 +19,7 @@ package org.owasp.dependencycheck.data.lucene;
import java.io.Reader;
import org.apache.lucene.analysis.util.CharTokenizer;
+import org.apache.lucene.util.Version;
/**
* Tokenizes the input breaking it into tokens when non-alpha/numeric characters are found.
@@ -30,10 +31,22 @@ public class AlphaNumericTokenizer extends CharTokenizer {
/**
* Constructs a new AlphaNumericTokenizer.
*
+ * @param matchVersion the lucene version
* @param in the Reader
*/
- public AlphaNumericTokenizer(Reader in) {
- super(in);
+ public AlphaNumericTokenizer(Version matchVersion, Reader in) {
+ super(matchVersion, in);
+ }
+
+ /**
+ * Constructs a new AlphaNumericTokenizer.
+ *
+ * @param matchVersion the lucene version
+ * @param factory the AttributeFactory
+ * @param in the Reader
+ */
+ public AlphaNumericTokenizer(Version matchVersion, AttributeFactory factory, Reader in) {
+ super(matchVersion, factory, in);
}
/**
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzer.java
index 797733edb..700034c9d 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzer.java
@@ -25,21 +25,30 @@ import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
+import org.apache.lucene.util.Version;
/**
*
- * A Lucene Analyzer that utilizes the WhitespaceTokenizer, WordDelimiterFilter, LowerCaseFilter, and StopFilter. The
- * intended purpose of this Analyzer is to index the CPE fields vendor and product.
+ * A Lucene Analyzer that utilizes the WhitespaceTokenizer, WordDelimiterFilter, LowerCaseFilter, and StopFilter. The intended
+ * purpose of this Analyzer is to index the CPE fields vendor and product.
*
* @author Jeremy Long
*/
public class FieldAnalyzer extends Analyzer {
+ /**
+ * The Lucene Version used.
+ */
+ private final Version version;
+
/**
* Creates a new FieldAnalyzer.
*
+ * @param version the Lucene version
*/
- public FieldAnalyzer() { }
+ public FieldAnalyzer(Version version) {
+ this.version = version;
+ }
/**
* Creates the TokenStreamComponents
@@ -50,7 +59,7 @@ public class FieldAnalyzer extends Analyzer {
*/
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
- final Tokenizer source = new AlphaNumericTokenizer(reader);
+ final Tokenizer source = new AlphaNumericTokenizer(version, reader);
TokenStream stream = source;
@@ -63,8 +72,8 @@ public class FieldAnalyzer extends Analyzer {
| WordDelimiterFilter.SPLIT_ON_NUMERICS
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
- stream = new LowerCaseFilter(stream);
- stream = new StopFilter(stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ stream = new LowerCaseFilter(version, stream);
+ stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
return new TokenStreamComponents(source, stream);
}
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/LuceneUtils.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/LuceneUtils.java
index 3a44c6485..cbe6f1d32 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/LuceneUtils.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/LuceneUtils.java
@@ -29,10 +29,10 @@ import org.apache.lucene.util.Version;
public final class LuceneUtils {
/**
- * The current version of Lucene being used. Declaring this one place so an upgrade doesn't require hunting through
- * the code base.
+ * The current version of Lucene being used. Declaring this one place so an upgrade doesn't require hunting through the code
+ * base.
*/
- public static final Version CURRENT_VERSION = Version.LATEST;
+ public static final Version CURRENT_VERSION = Version.LUCENE_47;
/**
* Private constructor as this is a utility class.
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchFieldAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchFieldAnalyzer.java
index 72880fedf..634287f5f 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchFieldAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchFieldAnalyzer.java
@@ -25,6 +25,7 @@ import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
+import org.apache.lucene.util.Version;
/**
* A Lucene field analyzer used to analyzer queries against the CPE data.
@@ -34,16 +35,22 @@ import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
public class SearchFieldAnalyzer extends Analyzer {
/**
- * A local reference to the TokenPairConcatenatingFilter so that we can clear any left over state if this analyzer
- * is re-used.
+ * The Lucene Version used.
+ */
+ private final Version version;
+ /**
+ * A local reference to the TokenPairConcatenatingFilter so that we can clear any left over state if this analyzer is re-used.
*/
private TokenPairConcatenatingFilter concatenatingFilter;
/**
* Constructs a new SearchFieldAnalyzer.
*
+ * @param version the Lucene version
*/
- public SearchFieldAnalyzer() { }
+ public SearchFieldAnalyzer(Version version) {
+ this.version = version;
+ }
/**
* Creates a the TokenStreamComponents used to analyze the stream.
@@ -54,7 +61,7 @@ public class SearchFieldAnalyzer extends Analyzer {
*/
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
- final Tokenizer source = new AlphaNumericTokenizer(reader);
+ final Tokenizer source = new AlphaNumericTokenizer(version, reader);
TokenStream stream = source;
@@ -66,19 +73,18 @@ public class SearchFieldAnalyzer extends Analyzer {
| WordDelimiterFilter.SPLIT_ON_NUMERICS
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
- stream = new LowerCaseFilter(stream);
+ stream = new LowerCaseFilter(version, stream);
stream = new UrlTokenizingFilter(stream);
concatenatingFilter = new TokenPairConcatenatingFilter(stream);
stream = concatenatingFilter;
- stream = new StopFilter(stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
return new TokenStreamComponents(source, stream);
}
/**
*
- * Resets the analyzer and clears any internal state data that may have been left-over from previous uses of the
- * analyzer.
+ * Resets the analyzer and clears any internal state data that may have been left-over from previous uses of the analyzer.
*
* If this analyzer is re-used this method must be called between uses.
*/
diff --git a/dependency-check-core/src/test/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzerTest.java b/dependency-check-core/src/test/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzerTest.java
index 13ccded65..6fae2382f 100644
--- a/dependency-check-core/src/test/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzerTest.java
+++ b/dependency-check-core/src/test/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzerTest.java
@@ -69,7 +69,7 @@ public class FieldAnalyzerTest {
@Test
public void testAnalyzers() throws Exception {
- Analyzer analyzer = new FieldAnalyzer();
+ Analyzer analyzer = new FieldAnalyzer(LuceneUtils.CURRENT_VERSION);
Directory index = new RAMDirectory();
String field1 = "product";
@@ -83,13 +83,13 @@ public class FieldAnalyzerTest {
//Analyzer searchingAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
String querystr = "product:\"(Spring Framework Core)\" vendor:(SpringSource)";
- SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer();
- SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer();
+ SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
+ SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
HashMap map = new HashMap();
map.put(field1, searchAnalyzerProduct);
map.put(field2, searchAnalyzerVendor);
- PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(), map);
- QueryParser parser = new QueryParser(field1, wrapper);
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(LuceneUtils.CURRENT_VERSION), map);
+ QueryParser parser = new QueryParser(LuceneUtils.CURRENT_VERSION, field1, wrapper);
Query q = parser.parse(querystr);
//System.out.println(q.toString());
diff --git a/dependency-check-core/src/test/java/org/owasp/dependencycheck/data/lucene/TokenPairConcatenatingFilterTest.java b/dependency-check-core/src/test/java/org/owasp/dependencycheck/data/lucene/TokenPairConcatenatingFilterTest.java
index 90c43829b..c2ac23d79 100644
--- a/dependency-check-core/src/test/java/org/owasp/dependencycheck/data/lucene/TokenPairConcatenatingFilterTest.java
+++ b/dependency-check-core/src/test/java/org/owasp/dependencycheck/data/lucene/TokenPairConcatenatingFilterTest.java
@@ -61,7 +61,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
* test some examples
*/
public void testExamples() throws IOException {
- Tokenizer wsTokenizer = new WhitespaceTokenizer(new StringReader("one two three"));
+ Tokenizer wsTokenizer = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three"));
TokenStream filter = new TokenPairConcatenatingFilter(wsTokenizer);
assertTokenStreamContents(filter,
new String[]{"one", "onetwo", "two", "twothree", "three"});
@@ -75,7 +75,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
@Test
public void testClear() throws IOException {
- TokenStream ts = new WhitespaceTokenizer(new StringReader("one two three"));
+ TokenStream ts = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three"));
TokenPairConcatenatingFilter filter = new TokenPairConcatenatingFilter(ts);
assertTokenStreamContents(filter, new String[]{"one", "onetwo", "two", "twothree", "three"});