mirror of
https://github.com/ysoftdevs/DependencyCheck.git
synced 2026-01-14 15:53:36 +01:00
updated lucene version number
Former-commit-id: cb826e6fac1b2ba1bd04b68b0929b3dc7ec0b22f
This commit is contained in:
@@ -101,21 +101,6 @@ public class CPEAnalyzerTest extends BaseIndexTestCase {
|
||||
Assert.assertTrue(expResult.equals(queryText));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of open method, of class CPEAnalyzer.
|
||||
*
|
||||
* @throws Exception is thrown when an exception occurs
|
||||
*/
|
||||
@Test
|
||||
public void testOpen() throws Exception {
|
||||
CPEAnalyzer instance = new CPEAnalyzer();
|
||||
Assert.assertFalse(instance.isOpen());
|
||||
instance.open();
|
||||
Assert.assertTrue(instance.isOpen());
|
||||
instance.close();
|
||||
Assert.assertFalse(instance.isOpen());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of determineCPE method, of class CPEAnalyzer.
|
||||
*
|
||||
|
||||
@@ -72,7 +72,7 @@ public class FieldAnalyzerTest {
|
||||
@Test
|
||||
public void testAnalyzers() throws Exception {
|
||||
|
||||
Analyzer analyzer = new FieldAnalyzer(Version.LUCENE_43);
|
||||
Analyzer analyzer = new FieldAnalyzer(LuceneUtils.CURRENT_VERSION);
|
||||
Directory index = new RAMDirectory();
|
||||
|
||||
String field1 = "product";
|
||||
@@ -83,16 +83,16 @@ public class FieldAnalyzerTest {
|
||||
|
||||
createIndex(analyzer, index, field1, text1, field2, text2);
|
||||
|
||||
//Analyzer searchingAnalyzer = new SearchFieldAnalyzer(Version.LUCENE_43);
|
||||
//Analyzer searchingAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
|
||||
String querystr = "product:\"(Spring Framework Core)\" vendor:(SpringSource)";
|
||||
|
||||
SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer(Version.LUCENE_43);
|
||||
SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer(Version.LUCENE_43);
|
||||
SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
|
||||
SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
|
||||
HashMap<String, Analyzer> map = new HashMap<String, Analyzer>();
|
||||
map.put(field1, searchAnalyzerProduct);
|
||||
map.put(field2, searchAnalyzerVendor);
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(Version.LUCENE_43), map);
|
||||
QueryParser parser = new QueryParser(Version.LUCENE_43, field1, wrapper);
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(LuceneUtils.CURRENT_VERSION), map);
|
||||
QueryParser parser = new QueryParser(LuceneUtils.CURRENT_VERSION, field1, wrapper);
|
||||
|
||||
Query q = parser.parse(querystr);
|
||||
//System.out.println(q.toString());
|
||||
@@ -116,7 +116,7 @@ public class FieldAnalyzerTest {
|
||||
}
|
||||
|
||||
private void createIndex(Analyzer analyzer, Directory index, String field1, String text1, String field2, String text2) throws IOException {
|
||||
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, analyzer);
|
||||
IndexWriterConfig config = new IndexWriterConfig(LuceneUtils.CURRENT_VERSION, analyzer);
|
||||
IndexWriter w = new IndexWriter(index, config);
|
||||
addDoc(w, field1, text1, field2, text2);
|
||||
w.close();
|
||||
|
||||
@@ -53,7 +53,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
|
||||
* test some examples
|
||||
*/
|
||||
public void testExamples() throws IOException {
|
||||
Tokenizer wsTokenizer = new WhitespaceTokenizer(Version.LUCENE_43, new StringReader("one two three"));
|
||||
Tokenizer wsTokenizer = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three"));
|
||||
TokenStream filter = new TokenPairConcatenatingFilter(wsTokenizer);
|
||||
assertTokenStreamContents(filter,
|
||||
new String[]{"one", "onetwo", "two", "twothree", "three"});
|
||||
@@ -65,7 +65,7 @@ public class TokenPairConcatenatingFilterTest extends BaseTokenStreamTestCase {
|
||||
@Test
|
||||
public void testClear() throws IOException {
|
||||
|
||||
TokenStream ts = new WhitespaceTokenizer(Version.LUCENE_43, new StringReader("one two three"));
|
||||
TokenStream ts = new WhitespaceTokenizer(LuceneUtils.CURRENT_VERSION, new StringReader("one two three"));
|
||||
TokenPairConcatenatingFilter filter = new TokenPairConcatenatingFilter(ts);
|
||||
assertTokenStreamContents(filter, new String[]{"one", "onetwo", "two", "twothree", "three"});
|
||||
|
||||
|
||||
Reference in New Issue
Block a user