reverted to lucene 4.7.2 - new versions of lucene are built using JDK 1.7+ and cause issues for the dependency-check Maven Plugin

Former-commit-id: 514cc4922c7f00f55b1dcd102f6d45491e90a5d8
This commit is contained in:
Jeremy Long
2015-03-01 21:43:52 -05:00
parent 09f1a0ac92
commit ff0daa8d66
8 changed files with 65 additions and 34 deletions

View File

@@ -69,7 +69,7 @@ public class FieldAnalyzerTest {
@Test
public void testAnalyzers() throws Exception {
Analyzer analyzer = new FieldAnalyzer();
Analyzer analyzer = new FieldAnalyzer(LuceneUtils.CURRENT_VERSION);
Directory index = new RAMDirectory();
String field1 = "product";
@@ -83,13 +83,13 @@ public class FieldAnalyzerTest {
//Analyzer searchingAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
String querystr = "product:\"(Spring Framework Core)\" vendor:(SpringSource)";
SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer();
SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer();
SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
HashMap<String, Analyzer> map = new HashMap<String, Analyzer>();
map.put(field1, searchAnalyzerProduct);
map.put(field2, searchAnalyzerVendor);
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(), map);
QueryParser parser = new QueryParser(field1, wrapper);
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(LuceneUtils.CURRENT_VERSION), map);
QueryParser parser = new QueryParser(LuceneUtils.CURRENT_VERSION, field1, wrapper);
Query q = parser.parse(querystr);
//System.out.println(q.toString());

View File

@@ -61,7 +61,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
* test some examples
*/
public void testExamples() throws IOException {
Tokenizer wsTokenizer = new WhitespaceTokenizer(new StringReader("one two three"));
Tokenizer wsTokenizer = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three"));
TokenStream filter = new TokenPairConcatenatingFilter(wsTokenizer);
assertTokenStreamContents(filter,
new String[]{"one", "onetwo", "two", "twothree", "three"});
@@ -75,7 +75,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
@Test
public void testClear() throws IOException {
TokenStream ts = new WhitespaceTokenizer(new StringReader("one two three"));
TokenStream ts = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three"));
TokenPairConcatenatingFilter filter = new TokenPairConcatenatingFilter(ts);
assertTokenStreamContents(filter, new String[]{"one", "onetwo", "two", "twothree", "three"});