mirror of
https://github.com/ysoftdevs/DependencyCheck.git
synced 2026-01-14 15:53:36 +01:00
updates and bug fixes for CPE data
Former-commit-id: 367da7219f76f370e87aa410d70a83553e71d828
This commit is contained in:
@@ -18,7 +18,6 @@ package org.codesecure.dependencycheck;
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
@@ -26,13 +25,10 @@ import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.LogManager;
|
||||
import java.util.logging.Logger;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import org.apache.commons.cli.ParseException;
|
||||
import org.codesecure.dependencycheck.data.cpe.xml.Importer;
|
||||
import org.codesecure.dependencycheck.reporting.ReportGenerator;
|
||||
import org.codesecure.dependencycheck.dependency.Dependency;
|
||||
import org.codesecure.dependencycheck.utils.CliParser;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
/*
|
||||
* This file is part of App.
|
||||
@@ -73,11 +69,12 @@ public class App {
|
||||
//while java doc for JUL says to use preferences api - it throws an exception...
|
||||
//Preferences.systemRoot().put("java.util.logging.config.file", "log.properties");
|
||||
//System.getProperties().put("java.util.logging.config.file", "configuration/log.properties");
|
||||
File dir = new File("logs");
|
||||
|
||||
if (!dir.exists()) {
|
||||
dir.mkdir();
|
||||
}
|
||||
//removed the file handler. since this is a console app - just write to console.
|
||||
// File dir = new File("logs");
|
||||
// if (!dir.exists()) {
|
||||
// dir.mkdir();
|
||||
// }
|
||||
try {
|
||||
InputStream in = App.class.getClassLoader().getResourceAsStream(LOG_PROPERTIES_FILE);
|
||||
LogManager.getLogManager().reset();
|
||||
@@ -114,8 +111,6 @@ public class App {
|
||||
|
||||
if (cli.isGetVersion()) {
|
||||
cli.printVersionInfo();
|
||||
} else if (cli.isLoadCPE()) {
|
||||
loadCPE(cli.getCpeFile());
|
||||
} else if (cli.isRunScan()) {
|
||||
runScan(cli.getReportDirectory(), cli.getApplicationName(), cli.getScanFiles(), cli.isAutoUpdate());
|
||||
} else {
|
||||
@@ -124,23 +119,6 @@ public class App {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the specified CPE.XML file into Lucene Index.
|
||||
*
|
||||
* @param cpePath
|
||||
*/
|
||||
private void loadCPE(String cpePath) {
|
||||
try {
|
||||
Importer.importXML(cpePath);
|
||||
} catch (ParserConfigurationException ex) {
|
||||
Logger.getLogger(App.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (SAXException ex) {
|
||||
Logger.getLogger(App.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(App.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Scans the specified directories and writes the dependency reports to the
|
||||
* reportDirectory.
|
||||
|
||||
@@ -21,7 +21,6 @@ package org.codesecure.dependencycheck.analyzer;
|
||||
import org.codesecure.dependencycheck.dependency.Dependency;
|
||||
import org.codesecure.dependencycheck.dependency.Evidence;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
*
|
||||
@@ -83,38 +82,7 @@ public class FileNameAnalyzer implements Analyzer {
|
||||
}
|
||||
|
||||
/**
|
||||
* An enumeration to keep track of the characters in a string as it is being
|
||||
* read in one character at a time.
|
||||
*/
|
||||
private enum STRING_STATE {
|
||||
|
||||
ALPHA,
|
||||
NUMBER,
|
||||
PERIOD,
|
||||
OTHER
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines type of the character passed in.
|
||||
*
|
||||
* @param c a character
|
||||
* @return a STRING_STATE representing whether the character is number,
|
||||
* alpha, or other.
|
||||
*/
|
||||
private STRING_STATE determineState(char c) {
|
||||
if (c >= '0' && c <= '9') {
|
||||
return STRING_STATE.NUMBER;
|
||||
} else if (c == '.') {
|
||||
return STRING_STATE.PERIOD;
|
||||
} else if (c >= 'a' && c <= 'z') {
|
||||
return STRING_STATE.ALPHA;
|
||||
} else {
|
||||
return STRING_STATE.OTHER;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Collects information about the file such as hashsums.
|
||||
* Collects information about the file name.
|
||||
*
|
||||
* @param dependency the dependency to analyze.
|
||||
* @throws AnalysisException is thrown if there is an error reading the JAR
|
||||
@@ -122,48 +90,21 @@ public class FileNameAnalyzer implements Analyzer {
|
||||
*/
|
||||
public void analyze(Dependency dependency) throws AnalysisException {
|
||||
|
||||
analyzeFileName(dependency);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyzes the filename of the dependency and adds it to the evidence
|
||||
* collections.
|
||||
*
|
||||
* @param dependency the dependency to analyze.
|
||||
*/
|
||||
private void analyzeFileName(Dependency dependency) {
|
||||
String fileName = dependency.getFileName();
|
||||
//slightly process the filename to chunk it into distinct words, numbers.
|
||||
// Yes, the lucene analyzer might do this, but I want a little better control
|
||||
// over the process.
|
||||
String fileNameEvidence = fileName.substring(0, fileName.length() - 4).toLowerCase().replace('-', ' ').replace('_', ' ');
|
||||
StringBuilder sb = new StringBuilder(fileNameEvidence.length());
|
||||
STRING_STATE state = determineState(fileNameEvidence.charAt(0));
|
||||
|
||||
for (int i = 0; i < fileNameEvidence.length(); i++) {
|
||||
char c = fileNameEvidence.charAt(i);
|
||||
STRING_STATE newState = determineState(c);
|
||||
if (newState != state) {
|
||||
if ((state != STRING_STATE.NUMBER && newState == STRING_STATE.PERIOD)
|
||||
|| (state == STRING_STATE.PERIOD && newState != STRING_STATE.NUMBER)
|
||||
|| (state == STRING_STATE.ALPHA || newState == STRING_STATE.ALPHA)
|
||||
|| ((state == STRING_STATE.OTHER || newState == STRING_STATE.OTHER) && c != ' ')) {
|
||||
sb.append(' ');
|
||||
}
|
||||
}
|
||||
state = newState;
|
||||
sb.append(c);
|
||||
int pos = fileName.lastIndexOf(".");
|
||||
if (pos > 0) {
|
||||
fileName = fileName.substring(0, pos - 1);
|
||||
}
|
||||
Pattern rx = Pattern.compile("\\s\\s+");
|
||||
fileNameEvidence = rx.matcher(sb.toString()).replaceAll(" ");
|
||||
|
||||
dependency.getProductEvidence().addEvidence("file", "name",
|
||||
fileNameEvidence, Evidence.Confidence.HIGH);
|
||||
fileName, Evidence.Confidence.HIGH);
|
||||
|
||||
dependency.getVendorEvidence().addEvidence("file", "name",
|
||||
fileNameEvidence, Evidence.Confidence.HIGH);
|
||||
if (fileNameEvidence.matches(".*\\d.*")) {
|
||||
fileName, Evidence.Confidence.HIGH);
|
||||
|
||||
if (fileName.matches(".*\\d.*")) {
|
||||
dependency.getVersionEvidence().addEvidence("file", "name",
|
||||
fileNameEvidence, Evidence.Confidence.HIGH);
|
||||
fileName, Evidence.Confidence.HIGH);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,18 +24,11 @@ import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.StringTokenizer;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
//TODO convert to the analyzing query parser
|
||||
//import org.apache.lucene.queryparser.analyzing.AnalyzingQueryParser;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.codesecure.dependencycheck.analyzer.AnalysisException;
|
||||
import org.codesecure.dependencycheck.analyzer.AnalysisPhase;
|
||||
import org.codesecure.dependencycheck.data.lucene.LuceneUtils;
|
||||
@@ -80,14 +73,6 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
* The CPE Index.
|
||||
*/
|
||||
protected Index cpe = null;
|
||||
/**
|
||||
* The Lucene IndexSearcher.
|
||||
*/
|
||||
private IndexSearcher indexSearcher = null;
|
||||
/**
|
||||
* The Lucene QueryParser.
|
||||
*/
|
||||
private QueryParser queryParser = null;
|
||||
|
||||
/**
|
||||
* Opens the data source.
|
||||
@@ -98,17 +83,12 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
public void open() throws IOException {
|
||||
cpe = new Index();
|
||||
cpe.open();
|
||||
indexSearcher = cpe.getIndexSearcher();
|
||||
Analyzer analyzer = cpe.getAnalyzer();
|
||||
queryParser = new QueryParser(Version.LUCENE_40, Fields.NAME, analyzer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the data source.
|
||||
*/
|
||||
public void close() {
|
||||
queryParser = null;
|
||||
indexSearcher = null;
|
||||
cpe.close();
|
||||
}
|
||||
|
||||
@@ -232,11 +212,7 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
value = value.substring(8).replaceAll("\\.", " ");
|
||||
}
|
||||
if (sb.indexOf(value) < 0) {
|
||||
// if (value.length() > 200) {
|
||||
// sb.append(value.substring(0, 200)).append(' ');
|
||||
// } else {
|
||||
sb.append(value).append(' ');
|
||||
// }
|
||||
}
|
||||
}
|
||||
return sb.toString();
|
||||
@@ -257,23 +233,6 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Searches the Lucene CPE index to identify possible CPE entries associated
|
||||
* with the supplied vendor, product, and version.
|
||||
*
|
||||
* @param vendor the text used to search the vendor field.
|
||||
* @param product the text used to search the product field.
|
||||
* @param version the text used to search the version field.
|
||||
* @return a list of possible CPE values.
|
||||
* @throws CorruptIndexException when the Lucene index is corrupt.
|
||||
* @throws IOException when the Lucene index is not found.
|
||||
* @throws ParseException when the generated query is not valid.
|
||||
*/
|
||||
protected List<Entry> searchCPE(String vendor, String product, String version)
|
||||
throws CorruptIndexException, IOException, ParseException {
|
||||
return searchCPE(vendor, product, version, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Searches the Lucene CPE index to identify possible CPE entries
|
||||
* associated with the supplied vendor, product, and version.</p>
|
||||
@@ -302,10 +261,9 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
if (searchString == null) {
|
||||
return ret;
|
||||
}
|
||||
Query query = queryParser.parse(searchString);
|
||||
TopDocs docs = indexSearcher.search(query, MAX_QUERY_RESULTS);
|
||||
TopDocs docs = cpe.search(searchString, MAX_QUERY_RESULTS);
|
||||
for (ScoreDoc d : docs.scoreDocs) {
|
||||
Document doc = indexSearcher.doc(d.doc);
|
||||
Document doc = cpe.getDocument(d.doc);
|
||||
Entry entry = Entry.parse(doc);
|
||||
entry.setSearchScore(d.score);
|
||||
if (!ret.contains(entry)) {
|
||||
@@ -343,11 +301,11 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!appendWeightedSearch(sb, Fields.PRODUCT, product.toLowerCase(), produdctWeightings)) {
|
||||
if (!appendWeightedSearch(sb, Fields.PRODUCT, product, produdctWeightings)) {
|
||||
return null;
|
||||
}
|
||||
sb.append(" AND ");
|
||||
if (!appendWeightedSearch(sb, Fields.VENDOR, vendor.toLowerCase(), vendorWeighting)) {
|
||||
if (!appendWeightedSearch(sb, Fields.VENDOR, vendor, vendorWeighting)) {
|
||||
return null;
|
||||
}
|
||||
sb.append(" AND ");
|
||||
|
||||
@@ -19,54 +19,28 @@ package org.codesecure.dependencycheck.data.cpe;
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.codesecure.dependencycheck.data.lucene.AbstractIndex;
|
||||
import org.codesecure.dependencycheck.data.CachedWebDataSource;
|
||||
import org.codesecure.dependencycheck.data.UpdateException;
|
||||
import org.codesecure.dependencycheck.utils.Downloader;
|
||||
import org.codesecure.dependencycheck.utils.Settings;
|
||||
import org.codesecure.dependencycheck.data.cpe.xml.Importer;
|
||||
import org.codesecure.dependencycheck.utils.DownloadFailedException;
|
||||
import org.xml.sax.SAXException;
|
||||
import org.codesecure.dependencycheck.data.lucene.FieldAnalyzer;
|
||||
import org.codesecure.dependencycheck.data.lucene.SearchFieldAnalyzer;
|
||||
|
||||
/**
|
||||
* The Index class is used to utilize and maintain the CPE Index.
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
|
||||
/**
|
||||
* The name of the properties file containing the timestamp of the last
|
||||
* update.
|
||||
*/
|
||||
private static final String UPDATE_PROPERTIES_FILE = "lastupdated.prop";
|
||||
/**
|
||||
* The properties file key for the last updated field.
|
||||
*/
|
||||
private static final String LAST_UPDATED = "lastupdated";
|
||||
public class Index extends AbstractIndex {
|
||||
|
||||
/**
|
||||
* Returns the directory that holds the CPE Index.
|
||||
@@ -114,230 +88,59 @@ public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
* @return the CPE Analyzer.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Analyzer createAnalyzer() {
|
||||
public Analyzer createIndexingAnalyzer() {
|
||||
Map fieldAnalyzers = new HashMap();
|
||||
|
||||
fieldAnalyzers.put(Fields.VERSION, new KeywordAnalyzer());
|
||||
fieldAnalyzers.put(Fields.NAME, new KeywordAnalyzer());
|
||||
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(
|
||||
new StandardAnalyzer(Version.LUCENE_40), fieldAnalyzers);
|
||||
new FieldAnalyzer(Version.LUCENE_40), fieldAnalyzers);
|
||||
|
||||
return wrapper;
|
||||
}
|
||||
private SearchFieldAnalyzer productSearchFieldAnalyzer = null;
|
||||
private SearchFieldAnalyzer vendorSearchFieldAnalyzer = null;
|
||||
|
||||
/**
|
||||
* Creates an Analyzer for searching the CPE Index.
|
||||
*
|
||||
* @return the CPE Analyzer.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Analyzer createSearchingAnalyzer() {
|
||||
Map fieldAnalyzers = new HashMap();
|
||||
|
||||
fieldAnalyzers.put(Fields.VERSION, new KeywordAnalyzer());
|
||||
fieldAnalyzers.put(Fields.NAME, new KeywordAnalyzer());
|
||||
productSearchFieldAnalyzer = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
vendorSearchFieldAnalyzer = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
fieldAnalyzers.put(Fields.PRODUCT, productSearchFieldAnalyzer);
|
||||
fieldAnalyzers.put(Fields.VENDOR, vendorSearchFieldAnalyzer);
|
||||
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(
|
||||
new FieldAnalyzer(Version.LUCENE_40), fieldAnalyzers);
|
||||
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads the latest CPE XML file from the web and imports it into the
|
||||
* current CPE Index.
|
||||
*
|
||||
* @throws UpdateException is thrown if there is a problem updating the
|
||||
* index.
|
||||
*
|
||||
* @deprecated this should no longer be used as the raw CPE hosted at NIST is not complete enough.
|
||||
* Creates the Lucene QueryParser used when querying the index
|
||||
* @return a QueryParser.
|
||||
*/
|
||||
public void update() throws UpdateException {
|
||||
try {
|
||||
long timeStamp = updateNeeded();
|
||||
if (timeStamp > 0) {
|
||||
URL url = new URL(Settings.getString(Settings.KEYS.CPE_URL));
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Updating CPE :" + url.toString());
|
||||
File outputPath = null;
|
||||
try {
|
||||
outputPath = File.createTempFile("cpe", ".xml");
|
||||
Downloader.fetchFile(url, outputPath, true);
|
||||
Importer.importXML(outputPath.toString());
|
||||
writeLastUpdatedPropertyFile(timeStamp);
|
||||
} catch (ParserConfigurationException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (SAXException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (IOException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} finally {
|
||||
try {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.delete();
|
||||
}
|
||||
} finally {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (MalformedURLException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (DownloadFailedException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
}
|
||||
public QueryParser createQueryParser() {
|
||||
return new QueryParser(Version.LUCENE_40, Fields.NAME, getSearchingAnalyzer());
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a properties file containing the last updated date to the CPE
|
||||
* directory.
|
||||
*
|
||||
* @param timeStamp the timestamp to write.
|
||||
*
|
||||
* @deprecated this should no longer be used as the raw CPE hosted at NIST is not complete enough.
|
||||
* Resets the searching analyzers
|
||||
*/
|
||||
private void writeLastUpdatedPropertyFile(long timeStamp) throws UpdateException {
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate the last updated properties file.", ex);
|
||||
protected void resetSearchingAnalyzer() {
|
||||
if (productSearchFieldAnalyzer != null) {
|
||||
productSearchFieldAnalyzer.clear();
|
||||
}
|
||||
File cpeProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
Properties prop = new Properties();
|
||||
prop.put(Index.LAST_UPDATED, String.valueOf(timeStamp));
|
||||
OutputStream os = null;
|
||||
OutputStreamWriter out = null;
|
||||
try {
|
||||
os = new FileOutputStream(cpeProp);
|
||||
out = new OutputStreamWriter(os, "UTF-8");
|
||||
prop.store(out, dir);
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
if (os != null) {
|
||||
try {
|
||||
os.flush();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
try {
|
||||
os.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
if (vendorSearchFieldAnalyzer != null) {
|
||||
vendorSearchFieldAnalyzer.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the index needs to be updated. This is done by fetching the
|
||||
* cpe.meta data and checking the lastModifiedDate. If the CPE data needs to
|
||||
* be refreshed this method will return the timestamp of the new CPE. If an
|
||||
* update is not required this function will return 0.
|
||||
*
|
||||
* @return the timestamp of the currently published CPE.xml if the index
|
||||
* needs to be updated, otherwise returns 0..
|
||||
* @throws MalformedURLException is thrown if the URL for the CPE Meta data
|
||||
* is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error
|
||||
* downloading the cpe.meta data file.
|
||||
* @throws UpdateException is thrown if there is an error locating the last updated
|
||||
* properties file.
|
||||
*
|
||||
* @deprecated this should no longer be used as the raw CPE hosted at NIST is not complete enough.
|
||||
*/
|
||||
public long updateNeeded() throws MalformedURLException, DownloadFailedException, UpdateException {
|
||||
long retVal = 0;
|
||||
long lastUpdated = 0;
|
||||
long currentlyPublishedDate = retrieveCurrentCPETimestampFromWeb();
|
||||
if (currentlyPublishedDate == 0) {
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from cpe.meta file");
|
||||
}
|
||||
|
||||
//String dir = Settings.getString(Settings.KEYS.CPE_INDEX);
|
||||
File f;
|
||||
try {
|
||||
f = getDataDirectory(); //new File(dir);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
if (!f.exists()) {
|
||||
retVal = currentlyPublishedDate;
|
||||
} else {
|
||||
File cpeProp;
|
||||
try {
|
||||
cpeProp = new File(f.getCanonicalPath() + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to find last updated properties file.", ex);
|
||||
}
|
||||
if (!cpeProp.exists()) {
|
||||
retVal = currentlyPublishedDate;
|
||||
} else {
|
||||
Properties prop = new Properties();
|
||||
InputStream is = null;
|
||||
try {
|
||||
is = new FileInputStream(cpeProp);
|
||||
prop.load(is);
|
||||
lastUpdated = Long.parseLong(prop.getProperty(Index.LAST_UPDATED));
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} finally {
|
||||
if (is != null) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (currentlyPublishedDate > lastUpdated) {
|
||||
retVal = currentlyPublishedDate;
|
||||
}
|
||||
}
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the timestamp from the CPE meta data file.
|
||||
*
|
||||
* @return the timestamp from the currently published cpe.meta.
|
||||
* @throws MalformedURLException is thrown if the URL for the CPE Meta data
|
||||
* is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error
|
||||
* downloading the cpe.meta data file.
|
||||
*/
|
||||
private long retrieveCurrentCPETimestampFromWeb() throws MalformedURLException, DownloadFailedException {
|
||||
long timestamp = 0;
|
||||
File tmp = null;
|
||||
InputStream is = null;
|
||||
try {
|
||||
tmp = File.createTempFile("cpe", "meta");
|
||||
URL url = new URL(Settings.getString(Settings.KEYS.CPE_META_URL));
|
||||
Downloader.fetchFile(url, tmp);
|
||||
Properties prop = new Properties();
|
||||
is = new FileInputStream(tmp);
|
||||
prop.load(is);
|
||||
timestamp = Long.parseLong(prop.getProperty("lastModifiedDate"));
|
||||
} catch (IOException ex) {
|
||||
throw new DownloadFailedException("Unable to create temporary file for CPE Meta File download.", ex);
|
||||
} finally {
|
||||
try {
|
||||
if (is != null) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
}
|
||||
}
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.delete();
|
||||
}
|
||||
} finally {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
return timestamp;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
package org.codesecure.dependencycheck.data.cpe.xml;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.parsers.SAXParser;
|
||||
import javax.xml.parsers.SAXParserFactory;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
/**
|
||||
* Imports a CPE XML file into the Lucene CPE Index.
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class Importer {
|
||||
|
||||
/**
|
||||
* Private constructor for utility class.
|
||||
*/
|
||||
private Importer() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports the CPE XML File into the Lucene Index.
|
||||
*
|
||||
* @param file containing the path to the CPE XML file.
|
||||
* @throws ParserConfigurationException is thrown if the parser is
|
||||
* misconfigured.
|
||||
* @throws SAXException is thrown when there is a SAXException.
|
||||
* @throws IOException is thrown when there is an IOException.
|
||||
* @throws CorruptIndexException is thrown when the Lucene index is corrupt.
|
||||
*/
|
||||
public static void importXML(File file) throws CorruptIndexException, ParserConfigurationException, IOException, SAXException {
|
||||
SAXParserFactory factory = SAXParserFactory.newInstance();
|
||||
SAXParser saxParser = factory.newSAXParser();
|
||||
CPEHandler handler = new CPEHandler();
|
||||
Indexer indexer = new Indexer();
|
||||
indexer.openIndexWriter();
|
||||
handler.registerSaveDelegate(indexer);
|
||||
try {
|
||||
saxParser.parse(file, handler);
|
||||
} catch (SAXException ex) {
|
||||
Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
indexer.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports the CPE XML File into the Lucene Index.
|
||||
*
|
||||
* @param path the path to the CPE XML file.
|
||||
* @throws ParserConfigurationException is thrown if the parser is
|
||||
* misconfigured.
|
||||
* @throws SAXException is thrown when there is a SAXException.
|
||||
* @throws IOException is thrown when there is an IOException.
|
||||
*/
|
||||
public static void importXML(String path) throws ParserConfigurationException, SAXException, IOException {
|
||||
File f = new File(path);
|
||||
if (!f.exists()) {
|
||||
f.mkdirs();
|
||||
}
|
||||
Importer.importXML(f);
|
||||
}
|
||||
}
|
||||
@@ -22,12 +22,17 @@ import java.io.IOException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.Version;
|
||||
@@ -57,9 +62,17 @@ public abstract class AbstractIndex {
|
||||
*/
|
||||
private IndexSearcher indexSearcher = null;
|
||||
/**
|
||||
* The Lucene Analyzer.
|
||||
* The Lucene Analyzer used for Indexing.
|
||||
*/
|
||||
private Analyzer analyzer = null;
|
||||
private Analyzer indexingAnalyzer = null;
|
||||
/**
|
||||
* The Lucene Analyzer used for Searching
|
||||
*/
|
||||
private Analyzer searchingAnalyzer = null;
|
||||
/**
|
||||
* The Lucene QueryParser used for Searching
|
||||
*/
|
||||
private QueryParser queryParser = null;
|
||||
/**
|
||||
* Indicates whether or not the Lucene Index is open.
|
||||
*/
|
||||
@@ -72,7 +85,8 @@ public abstract class AbstractIndex {
|
||||
*/
|
||||
public void open() throws IOException {
|
||||
directory = this.getDirectory();
|
||||
analyzer = this.getAnalyzer(); //new StandardAnalyzer(Version.LUCENE_35);
|
||||
indexingAnalyzer = this.getIndexingAnalyzer();
|
||||
searchingAnalyzer = this.getSearchingAnalyzer();
|
||||
indexOpen = true;
|
||||
}
|
||||
|
||||
@@ -102,10 +116,16 @@ public abstract class AbstractIndex {
|
||||
indexSearcher = null;
|
||||
}
|
||||
|
||||
if (analyzer != null) {
|
||||
analyzer.close();
|
||||
analyzer = null;
|
||||
if (indexingAnalyzer != null) {
|
||||
indexingAnalyzer.close();
|
||||
indexingAnalyzer = null;
|
||||
}
|
||||
|
||||
if (searchingAnalyzer != null) {
|
||||
searchingAnalyzer.close();
|
||||
searchingAnalyzer = null;
|
||||
}
|
||||
|
||||
try {
|
||||
directory.close();
|
||||
} catch (IOException ex) {
|
||||
@@ -135,7 +155,7 @@ public abstract class AbstractIndex {
|
||||
if (!isOpen()) {
|
||||
open();
|
||||
}
|
||||
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_40, analyzer);
|
||||
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_40, indexingAnalyzer);
|
||||
indexWriter = new IndexWriter(directory, conf);
|
||||
}
|
||||
|
||||
@@ -176,7 +196,7 @@ public abstract class AbstractIndex {
|
||||
* @throws CorruptIndexException is thrown if the index is corrupt.
|
||||
* @throws IOException is thrown if there is an exception reading the index.
|
||||
*/
|
||||
public IndexSearcher getIndexSearcher() throws CorruptIndexException, IOException {
|
||||
protected IndexSearcher getIndexSearcher() throws CorruptIndexException, IOException {
|
||||
if (indexReader == null) {
|
||||
openIndexReader();
|
||||
}
|
||||
@@ -187,29 +207,116 @@ public abstract class AbstractIndex {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an Analyzer for the Lucene Index.
|
||||
* Returns an Analyzer to be used when indexing.
|
||||
*
|
||||
* @return an Analyzer.
|
||||
*/
|
||||
public Analyzer getAnalyzer() {
|
||||
if (analyzer == null) {
|
||||
analyzer = createAnalyzer();
|
||||
public Analyzer getIndexingAnalyzer() {
|
||||
if (indexingAnalyzer == null) {
|
||||
indexingAnalyzer = createIndexingAnalyzer();
|
||||
}
|
||||
return analyzer;
|
||||
return indexingAnalyzer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the directory that contains the Lucene Index.
|
||||
* Returns an analyzer used for searching the index
|
||||
* @return a lucene analyzer
|
||||
*/
|
||||
protected Analyzer getSearchingAnalyzer() {
|
||||
if (searchingAnalyzer == null) {
|
||||
searchingAnalyzer = createSearchingAnalyzer();
|
||||
}
|
||||
return searchingAnalyzer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a query parser
|
||||
* @return a query parser
|
||||
*/
|
||||
protected QueryParser getQueryParser() {
|
||||
if (queryParser == null) {
|
||||
queryParser = createQueryParser();
|
||||
}
|
||||
return queryParser;
|
||||
}
|
||||
|
||||
/**
|
||||
* Searches the index using the given search string
|
||||
* @param searchString the query text
|
||||
* @param maxQueryResults the maximum number of documents to return
|
||||
* @return the TopDocs found by the search
|
||||
* @throws ParseException thrown when the searchString is invalid
|
||||
* @throws IOException is thrown if there is an issue with the underlying Index
|
||||
*/
|
||||
public TopDocs search(String searchString, int maxQueryResults) throws ParseException, IOException {
|
||||
|
||||
QueryParser parser = getQueryParser();
|
||||
|
||||
Query query = parser.parse(searchString);
|
||||
|
||||
resetSearchingAnalyzer();
|
||||
|
||||
IndexSearcher is = getIndexSearcher();
|
||||
|
||||
TopDocs docs = is.search(query, maxQueryResults);
|
||||
|
||||
return docs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Searches the index using the given query
|
||||
* @param query the query used to search the index
|
||||
* @param maxQueryResults the max number of results to return
|
||||
* @return the TopDocs found be the query
|
||||
* @throws CorruptIndexException thrown if the Index is corrupt
|
||||
* @throws IOException thrown if there is an IOException
|
||||
*/
|
||||
public TopDocs search(Query query, int maxQueryResults) throws CorruptIndexException, IOException {
|
||||
IndexSearcher is = getIndexSearcher();
|
||||
return is.search(query, maxQueryResults);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a document from the Index
|
||||
* @param documentId the id of the document to retrieve
|
||||
* @return the Document
|
||||
* @throws IOException thrown if there is an IOException
|
||||
*/
|
||||
public Document getDocument(int documentId) throws IOException {
|
||||
IndexSearcher is = getIndexSearcher();
|
||||
return is.doc(documentId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the directory that contains the Lucene Index
|
||||
*
|
||||
* @return a Lucene Directory.
|
||||
* @throws IOException is thrown when an IOException occurs.
|
||||
* @return a Lucene Directory
|
||||
* @throws IOException is thrown when an IOException occurs
|
||||
*/
|
||||
public abstract Directory getDirectory() throws IOException;
|
||||
|
||||
/**
|
||||
* Creates the Lucene Analyzer used when indexing and searching the index.
|
||||
* Creates the Lucene Analyzer used when indexing
|
||||
*
|
||||
* @return a Lucene Analyzer.
|
||||
* @return a Lucene Analyzer
|
||||
*/
|
||||
public abstract Analyzer createAnalyzer();
|
||||
public abstract Analyzer createIndexingAnalyzer();
|
||||
|
||||
/**
|
||||
* Creates the Lucene Analyzer used when querying the index
|
||||
*
|
||||
* @return a Lucene Analyzer
|
||||
*/
|
||||
public abstract Analyzer createSearchingAnalyzer();
|
||||
|
||||
/**
|
||||
* Creates the Lucene QueryParser used when querying the index
|
||||
* @return a QueryParser
|
||||
*/
|
||||
public abstract QueryParser createQueryParser();
|
||||
|
||||
/**
|
||||
* Resets the searching analyzers
|
||||
*/
|
||||
protected abstract void resetSearchingAnalyzer();
|
||||
}
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
package org.codesecure.dependencycheck.data.lucene;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.Reader;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* <p>A Lucene Analyzer that utilizes the WhitespaceTokenizer, WordDelimiterFilter,
|
||||
* LowerCaseFilter, and StopFilter. The intended purpose of this Analyzer is
|
||||
* to index the CPE fields vendor and product.</p>
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class FieldAnalyzer extends Analyzer {
|
||||
|
||||
/**
|
||||
* The Lucene Version used
|
||||
*/
|
||||
private Version version = null;
|
||||
|
||||
/**
|
||||
* Creates a new FieldAnalyzer
|
||||
* @param version the Lucene version
|
||||
*/
|
||||
public FieldAnalyzer(Version version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the TokenStreamComponents
|
||||
*
|
||||
* @param fieldName the field name being analyzed
|
||||
* @param reader the reader containing the input
|
||||
* @return the TokenStreamComponents
|
||||
*/
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer source = new WhitespaceTokenizer(version, reader);
|
||||
|
||||
TokenStream stream = source;
|
||||
|
||||
stream = new WordDelimiterFilter(stream,
|
||||
WordDelimiterFilter.CATENATE_WORDS
|
||||
| WordDelimiterFilter.GENERATE_WORD_PARTS
|
||||
| WordDelimiterFilter.GENERATE_NUMBER_PARTS
|
||||
| WordDelimiterFilter.PRESERVE_ORIGINAL
|
||||
| WordDelimiterFilter.SPLIT_ON_CASE_CHANGE
|
||||
| WordDelimiterFilter.SPLIT_ON_NUMERICS
|
||||
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
|
||||
|
||||
stream = new LowerCaseFilter(version, stream);
|
||||
stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||
|
||||
return new TokenStreamComponents(source, stream);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
package org.codesecure.dependencycheck.data.lucene;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.Reader;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* A Lucene field analyzer used to analyzer queries against the CPE data.
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class SearchFieldAnalyzer extends Analyzer {
|
||||
|
||||
/**
|
||||
* The Lucene Version used
|
||||
*/
|
||||
private Version version = null;
|
||||
/**
|
||||
* A local reference to the TokenPairConcatenatingFilter so that we
|
||||
* can clear any left over state if this analyzer is re-used.
|
||||
*/
|
||||
private TokenPairConcatenatingFilter concatenatingFilter = null;
|
||||
|
||||
/**
|
||||
* Constructs a new SearchFieldAnalyzer
|
||||
* @param version the Lucene version
|
||||
*/
|
||||
public SearchFieldAnalyzer(Version version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a the TokenStreamComponents used to analyze the stream.
|
||||
* @param fieldName the field that this lucene analyzer will process
|
||||
* @param reader a reader containing the tokens
|
||||
* @return the token stream filter chain
|
||||
*/
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer source = new WhitespaceTokenizer(version, reader);
|
||||
|
||||
TokenStream stream = source;
|
||||
|
||||
stream = new WordDelimiterFilter(stream,
|
||||
WordDelimiterFilter.GENERATE_WORD_PARTS
|
||||
| WordDelimiterFilter.GENERATE_NUMBER_PARTS
|
||||
| WordDelimiterFilter.PRESERVE_ORIGINAL
|
||||
| WordDelimiterFilter.SPLIT_ON_CASE_CHANGE
|
||||
| WordDelimiterFilter.SPLIT_ON_NUMERICS
|
||||
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
|
||||
|
||||
stream = new LowerCaseFilter(version, stream);
|
||||
concatenatingFilter = new TokenPairConcatenatingFilter(stream);
|
||||
stream = concatenatingFilter;
|
||||
stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||
|
||||
return new TokenStreamComponents(source, stream);
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Resets the analyzer and clears any internal state data that may
|
||||
* have been left-over from previous uses of the analyzer.</p>
|
||||
* <p><b>If this analyzer is re-used this method must be called between uses.</b></p>
|
||||
*/
|
||||
public void clear() {
|
||||
concatenatingFilter.clear();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
package org.codesecure.dependencycheck.data.lucene;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
|
||||
/**
|
||||
* <p>Takes a TokenStream and adds additional tokens by concatenating pairs of words.</p>
|
||||
* <p><b>Example:</b> "Spring Framework Core" -> "Spring SpringFramework Framework FrameworkCore Core".</p>
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public final class TokenPairConcatenatingFilter extends TokenFilter {
|
||||
|
||||
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
|
||||
private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
|
||||
private String previousWord = null;
|
||||
private LinkedList<String> words = null;
|
||||
|
||||
/**
|
||||
* Consructs a new TokenPairConcatenatingFilter
|
||||
* @param stream the TokenStream that this filter will process
|
||||
*/
|
||||
public TokenPairConcatenatingFilter(TokenStream stream) {
|
||||
super(stream);
|
||||
words = new LinkedList<String>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Increments the underlying TokenStream and sets CharTermAtttributes to
|
||||
* construct an expanded set of tokens by concatenting tokens with the
|
||||
* previous token.
|
||||
*
|
||||
* @return whether or not we have hit the end of the TokenStream
|
||||
* @throws IOException is thrown when an IOException occurs
|
||||
*/
|
||||
@Override
|
||||
public boolean incrementToken() throws IOException {
|
||||
|
||||
//collect all the terms into the words collaction
|
||||
while (input.incrementToken()) {
|
||||
String word = new String(termAtt.buffer(), 0, termAtt.length());
|
||||
words.add(word);
|
||||
}
|
||||
|
||||
//if we have a previousTerm - write it out as its own token concatonated
|
||||
// with the current word (if one is available).
|
||||
if (previousWord != null && words.size() > 0) {
|
||||
String word = words.getFirst();
|
||||
clearAttributes();
|
||||
termAtt.append(previousWord).append(word);
|
||||
posIncAtt.setPositionIncrement(0);
|
||||
previousWord = null;
|
||||
return true;
|
||||
}
|
||||
//if we have words, write it out as a single token
|
||||
if (words.size() > 0) {
|
||||
String word = words.removeFirst();
|
||||
clearAttributes();
|
||||
termAtt.append(word);
|
||||
previousWord = word;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Resets the Filter and clears any internal state data that may
|
||||
* have been left-over from previous uses of the Filter.</p>
|
||||
* <p><b>If this Filter is re-used this method must be called between uses.</b></p>
|
||||
*/
|
||||
public void clear() {
|
||||
previousWord = null;
|
||||
words.clear();
|
||||
}
|
||||
}
|
||||
@@ -19,28 +19,17 @@ package org.codesecure.dependencycheck.data.nvdcve;
|
||||
*/
|
||||
|
||||
import java.io.*;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.*;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.codesecure.dependencycheck.data.CachedWebDataSource;
|
||||
import org.codesecure.dependencycheck.data.UpdateException;
|
||||
import org.codesecure.dependencycheck.data.lucene.AbstractIndex;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.xml.Importer;
|
||||
import org.codesecure.dependencycheck.utils.DownloadFailedException;
|
||||
import org.codesecure.dependencycheck.utils.Downloader;
|
||||
import org.codesecure.dependencycheck.utils.FileUtils;
|
||||
import org.codesecure.dependencycheck.utils.Settings;
|
||||
|
||||
/**
|
||||
@@ -48,28 +37,7 @@ import org.codesecure.dependencycheck.utils.Settings;
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
|
||||
/**
|
||||
* The current version of the index
|
||||
*/
|
||||
public static final String INDEX_VERSION = "1.0";
|
||||
/**
|
||||
* The name of the properties file containing the timestamp of the last
|
||||
* update.
|
||||
*/
|
||||
private static final String UPDATE_PROPERTIES_FILE = "lastupdated.prop";
|
||||
/**
|
||||
* The properties file key for the last updated field - used to store the
|
||||
* last updated time of the Modified NVD CVE xml file.
|
||||
*/
|
||||
private static final String LAST_UPDATED_MODIFIED = "lastupdated.modified";
|
||||
/**
|
||||
* Stores the last updated time for each of the NVD CVE files. These
|
||||
* timestamps should be updated if we process the modified file within 7
|
||||
* days of the last update.
|
||||
*/
|
||||
private static final String LAST_UPDATED_BASE = "lastupdated.";
|
||||
public class Index extends AbstractIndex {
|
||||
|
||||
/**
|
||||
* Returns the directory that holds the NVD CVE Index. Note, this
|
||||
@@ -117,7 +85,7 @@ public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
* @return the VULNERABLE_CPE Analyzer.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Analyzer createAnalyzer() {
|
||||
public Analyzer createIndexingAnalyzer() {
|
||||
Map fieldAnalyzers = new HashMap();
|
||||
|
||||
fieldAnalyzers.put(Fields.CVE_ID, new KeywordAnalyzer());
|
||||
@@ -130,459 +98,35 @@ public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Downloads the latest NVD CVE XML file from the web and imports it into
|
||||
* the current CVE Index.</p>
|
||||
* Creates an Analyzer for the NVD VULNERABLE_CPE Index.
|
||||
*
|
||||
* @throws UpdateException is thrown if there is an error updating the index
|
||||
* @return the VULNERABLE_CPE Analyzer.
|
||||
*/
|
||||
public void update() throws UpdateException {
|
||||
try {
|
||||
Map<String, NvdCveUrl> update = updateNeeded();
|
||||
int maxUpdates = 0;
|
||||
for (NvdCveUrl cve : update.values()) {
|
||||
if (cve.getNeedsUpdate()) {
|
||||
maxUpdates += 1;
|
||||
}
|
||||
}
|
||||
if (maxUpdates > 3) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "NVD CVE requires several updates; this could take a couple of minutes.");
|
||||
}
|
||||
int count = 0;
|
||||
for (NvdCveUrl cve : update.values()) {
|
||||
if (cve.getNeedsUpdate()) {
|
||||
count += 1;
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Updating NVD CVE (" + count + " of " + maxUpdates + ")");
|
||||
URL url = new URL(cve.getUrl());
|
||||
File outputPath = null;
|
||||
try {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Downloading " + cve.getUrl());
|
||||
outputPath = File.createTempFile("cve" + cve.getId() + "_", ".xml");
|
||||
Downloader.fetchFile(url, outputPath, false);
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Processing " + cve.getUrl());
|
||||
Importer.importXML(outputPath.toString());
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Completed updated " + count + " of " + maxUpdates);
|
||||
} catch (FileNotFoundException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (IOException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} finally {
|
||||
try {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.delete();
|
||||
}
|
||||
} finally {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (maxUpdates >= 1) {
|
||||
writeLastUpdatedPropertyFile(update);
|
||||
}
|
||||
} catch (MalformedURLException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (DownloadFailedException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
}
|
||||
@SuppressWarnings("unchecked")
|
||||
public Analyzer createSearchingAnalyzer() {
|
||||
Map fieldAnalyzers = new HashMap();
|
||||
|
||||
fieldAnalyzers.put(Fields.CVE_ID, new KeywordAnalyzer());
|
||||
fieldAnalyzers.put(Fields.VULNERABLE_CPE, new KeywordAnalyzer());
|
||||
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(
|
||||
new StandardAnalyzer(Version.LUCENE_40), fieldAnalyzers);
|
||||
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a properties file containing the last updated date to the
|
||||
* VULNERABLE_CPE directory.
|
||||
*
|
||||
* @param timeStamp the timestamp to write.
|
||||
* Creates the Lucene QueryParser used when querying the index
|
||||
* @return a QueryParser
|
||||
*/
|
||||
private void writeLastUpdatedPropertyFile(Map<String, NvdCveUrl> updated) throws UpdateException {
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
File cveProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
Properties prop = new Properties();
|
||||
prop.put("version", INDEX_VERSION);
|
||||
for (NvdCveUrl cve : updated.values()) {
|
||||
prop.put(LAST_UPDATED_BASE + cve.id, String.valueOf(cve.getTimestamp()));
|
||||
}
|
||||
|
||||
OutputStream os = null;
|
||||
try {
|
||||
os = new FileOutputStream(cveProp);
|
||||
OutputStreamWriter out = new OutputStreamWriter(os, "UTF-8");
|
||||
prop.store(out, dir);
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to find last updated properties file.", ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to update last updated properties file.", ex);
|
||||
} finally {
|
||||
if (os != null) {
|
||||
try {
|
||||
os.flush();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
try {
|
||||
os.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
public QueryParser createQueryParser() {
|
||||
return new QueryParser(Version.LUCENE_40, Fields.VULNERABLE_CPE, getSearchingAnalyzer());
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the index needs to be updated. This is done by fetching the
|
||||
* nvd cve meta data and checking the last update date. If the data needs to
|
||||
* be refreshed this method will return the NvdCveUrl for the files that
|
||||
* need to be updated.
|
||||
*
|
||||
* @return the NvdCveUrl of the files that need to be updated.
|
||||
* @throws MalformedURLException is thrown if the URL for the NVD CVE Meta
|
||||
* data is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error.
|
||||
* downloading the nvd cve download data file.
|
||||
* @throws UpdateException Is thrown if there is an issue with the last updated properties file.
|
||||
* Resets the searching analyzers
|
||||
*/
|
||||
public Map<String, NvdCveUrl> updateNeeded() throws MalformedURLException, DownloadFailedException, UpdateException {
|
||||
|
||||
Map<String, NvdCveUrl> currentlyPublished;
|
||||
try {
|
||||
currentlyPublished = retrieveCurrentTimestampsFromWeb();
|
||||
} catch (InvalidDataException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from nvd cve downloads page", ex);
|
||||
}
|
||||
if (currentlyPublished == null) {
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from nvd cve downloads page");
|
||||
}
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
|
||||
File f = new File(dir);
|
||||
if (f.exists()) {
|
||||
File cveProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
if (cveProp.exists()) {
|
||||
Properties prop = new Properties();
|
||||
InputStream is = null;
|
||||
try {
|
||||
is = new FileInputStream(cveProp);
|
||||
prop.load(is);
|
||||
|
||||
if (prop.getProperty("version") == null) {
|
||||
is.close();
|
||||
//this is an old version of the lucene index - just delete it
|
||||
FileUtils.delete(f);
|
||||
|
||||
//this importer also updates the CPE index and it is also using an old version
|
||||
org.codesecure.dependencycheck.data.cpe.Index cpeidx = new org.codesecure.dependencycheck.data.cpe.Index();
|
||||
File cpeDir = cpeidx.getDataDirectory();
|
||||
FileUtils.delete(cpeDir);
|
||||
return currentlyPublished;
|
||||
}
|
||||
|
||||
long lastUpdated = Long.parseLong(prop.getProperty(Index.LAST_UPDATED_MODIFIED));
|
||||
Date now = new Date();
|
||||
int days = Settings.getInt(Settings.KEYS.CVE_MODIFIED_VALID_FOR_DAYS);
|
||||
int maxEntries = Settings.getInt(Settings.KEYS.CVE_URL_COUNT);
|
||||
if (lastUpdated == currentlyPublished.get("modified").timestamp) {
|
||||
currentlyPublished.clear(); //we don't need to update anything.
|
||||
} else if (withinRange(lastUpdated, now.getTime(), days)) {
|
||||
currentlyPublished.get("modified").setNeedsUpdate(true);
|
||||
for (int i = 1; i <= maxEntries; i++) {
|
||||
currentlyPublished.get(String.valueOf(i)).setNeedsUpdate(false);
|
||||
}
|
||||
} else { //we figure out which of the several XML files need to be downloaded.
|
||||
currentlyPublished.get("modified").setNeedsUpdate(false);
|
||||
for (int i = 1; i <= maxEntries; i++) {
|
||||
NvdCveUrl cve = currentlyPublished.get(String.valueOf(i));
|
||||
long currentTimestamp = 0;
|
||||
try {
|
||||
currentTimestamp = Long.parseLong(prop.getProperty(LAST_UPDATED_BASE + String.valueOf(i), "0"));
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, "Error parsing " + LAST_UPDATED_BASE
|
||||
+ String.valueOf(i) + " from nvdcve.lastupdated", ex);
|
||||
}
|
||||
if (currentTimestamp == cve.getTimestamp()) {
|
||||
cve.setNeedsUpdate(false); //they default to true.
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} finally {
|
||||
if (is != null) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return currentlyPublished;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the epoch date is within the range specified of the
|
||||
* compareTo epoch time. This takes the (compareTo-date)/1000/60/60/24 to
|
||||
* get the number of days. If the calculated days is less then the range the
|
||||
* date is considered valid.
|
||||
*
|
||||
* @param date the date to be checked.
|
||||
* @param compareTo the date to compare to.
|
||||
* @param range the range in days to be considered valid.
|
||||
* @return whether or not the date is within the range.
|
||||
*/
|
||||
private boolean withinRange(long date, long compareTo, int range) {
|
||||
double differenceInDays = (compareTo - date) / 1000 / 60 / 60 / 24;
|
||||
return differenceInDays < range;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the timestamps from the NVD CVE meta data file.
|
||||
*
|
||||
* @return the timestamp from the currently published nvdcve downloads page
|
||||
* @throws MalformedURLException is thrown if the URL for the NVD CCE Meta
|
||||
* data is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error
|
||||
* downloading the nvd cve meta data file
|
||||
* @throws InvalidDataException is thrown if there is an exception parsing
|
||||
* the timestamps
|
||||
*/
|
||||
protected Map<String, NvdCveUrl> retrieveCurrentTimestampsFromWeb() throws MalformedURLException, DownloadFailedException, InvalidDataException {
|
||||
Map<String, NvdCveUrl> map = new HashMap<String, NvdCveUrl>();
|
||||
|
||||
File tmp = null;
|
||||
try {
|
||||
tmp = File.createTempFile("cve", "meta");
|
||||
URL url = new URL(Settings.getString(Settings.KEYS.CVE_META_URL));
|
||||
Downloader.fetchFile(url, tmp);
|
||||
String html = readFile(tmp);
|
||||
|
||||
String retrieveUrl = Settings.getString(Settings.KEYS.CVE_MODIFIED_URL);
|
||||
NvdCveUrl cve = createNvdCveUrl("modified", retrieveUrl, html);
|
||||
cve.setNeedsUpdate(false); //the others default to true, to make life easier later this should default to false.
|
||||
map.put("modified", cve);
|
||||
int max = Settings.getInt(Settings.KEYS.CVE_URL_COUNT);
|
||||
for (int i = 1; i <= max; i++) {
|
||||
retrieveUrl = Settings.getString(Settings.KEYS.CVE_BASE_URL + i);
|
||||
String key = Integer.toString(i);
|
||||
cve = createNvdCveUrl(key, retrieveUrl, html);
|
||||
map.put(key, cve);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new DownloadFailedException("Unable to create temporary file for NVD CVE Meta File download.", ex);
|
||||
} finally {
|
||||
try {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.delete();
|
||||
}
|
||||
} finally {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new NvdCveUrl object from the provide id, url, and text/html
|
||||
* from the NVD CVE downloads page.
|
||||
*
|
||||
* @param id the name of this NVD CVE Url
|
||||
* @param retrieveUrl the URL to download the file from
|
||||
* @param text a bit of HTML from the NVD CVE downloads page that contains
|
||||
* the URL and the last updated timestamp.
|
||||
* @return a shiny new NvdCveUrl object.
|
||||
* @throws InvalidDataException is thrown if the timestamp could not be
|
||||
* extracted from the provided text.
|
||||
*/
|
||||
private NvdCveUrl createNvdCveUrl(String id, String retrieveUrl, String text) throws InvalidDataException {
|
||||
Pattern pattern = Pattern.compile(Pattern.quote(retrieveUrl) + ".+?\\<br");
|
||||
Matcher m = pattern.matcher(text);
|
||||
NvdCveUrl item = new NvdCveUrl();
|
||||
item.id = id;
|
||||
item.url = retrieveUrl;
|
||||
if (m.find()) {
|
||||
String line = m.group();
|
||||
int pos = line.indexOf("Updated:");
|
||||
if (pos > 0) {
|
||||
pos += 9;
|
||||
try {
|
||||
String timestampstr = line.substring(pos, line.length() - 3).replace("at ", "");
|
||||
long timestamp = getEpochTimeFromDateTime(timestampstr);
|
||||
item.setTimestamp(timestamp);
|
||||
} catch (NumberFormatException ex) {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain a valid timestamp for '" + retrieveUrl + "'.", ex);
|
||||
}
|
||||
} else {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain the updated timestamp for '" + retrieveUrl + "'.");
|
||||
}
|
||||
} else {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain the url for '" + retrieveUrl + "'.");
|
||||
}
|
||||
return item;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a timestamp in the format of "MM/dd/yy hh:mm" into a calendar
|
||||
* object and returns the epoch time. Note, this removes the millisecond
|
||||
* portion of the epoch time so all numbers returned should end in 000.
|
||||
*
|
||||
* @param timestamp a string in the format of "MM/dd/yy hh:mm"
|
||||
* @return a Calendar object.
|
||||
* @throws NumberFormatException if the timestamp was parsed incorrectly.
|
||||
*/
|
||||
private long getEpochTimeFromDateTime(String timestamp) throws NumberFormatException {
|
||||
Calendar c = new GregorianCalendar();
|
||||
int month = Integer.parseInt(timestamp.substring(0, 2));
|
||||
int date = Integer.parseInt(timestamp.substring(3, 5));
|
||||
int year = 2000 + Integer.parseInt(timestamp.substring(6, 8));
|
||||
int hourOfDay = Integer.parseInt(timestamp.substring(9, 11));
|
||||
int minute = Integer.parseInt(timestamp.substring(12, 14));
|
||||
c.set(year, month, date, hourOfDay, minute, 0);
|
||||
long t = c.getTimeInMillis();
|
||||
t = (t / 1000) * 1000;
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a file into a string.
|
||||
*
|
||||
* @param file the file to be read.
|
||||
* @return the contents of the file.
|
||||
* @throws IOException is thrown if an IOExcpetion occurs.
|
||||
*/
|
||||
private String readFile(File file) throws IOException {
|
||||
InputStreamReader stream = new InputStreamReader(new FileInputStream(file), "UTF-8");
|
||||
StringBuilder str = new StringBuilder((int) file.length());
|
||||
try {
|
||||
char[] buf = new char[8096];
|
||||
int read = stream.read(buf, 0, 8096);
|
||||
while (read > 0) {
|
||||
str.append(buf, 0, read);
|
||||
read = stream.read(buf, 0, 8096);
|
||||
}
|
||||
} finally {
|
||||
stream.close();
|
||||
}
|
||||
return str.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* A pojo that contains the Url and timestamp of the current NvdCve XML
|
||||
* files.
|
||||
*/
|
||||
protected class NvdCveUrl {
|
||||
|
||||
/**
|
||||
* an id.
|
||||
*/
|
||||
private String id;
|
||||
|
||||
/**
|
||||
* Get the value of id
|
||||
*
|
||||
* @return the value of id
|
||||
*/
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of id
|
||||
*
|
||||
* @param id new value of id
|
||||
*/
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
/**
|
||||
* a url.
|
||||
*/
|
||||
private String url;
|
||||
|
||||
/**
|
||||
* Get the value of url
|
||||
*
|
||||
* @return the value of url
|
||||
*/
|
||||
public String getUrl() {
|
||||
return url;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of url
|
||||
*
|
||||
* @param url new value of url
|
||||
*/
|
||||
public void setUrl(String url) {
|
||||
this.url = url;
|
||||
}
|
||||
/**
|
||||
* a timestamp - epoch time.
|
||||
*/
|
||||
private long timestamp;
|
||||
|
||||
/**
|
||||
* Get the value of timestamp - epoch time
|
||||
*
|
||||
* @return the value of timestamp - epoch time
|
||||
*/
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of timestamp - epoch time
|
||||
*
|
||||
* @param timestamp new value of timestamp - epoch time
|
||||
*/
|
||||
public void setTimestamp(long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
/**
|
||||
* indicates whether or not this item should be updated.
|
||||
*/
|
||||
private boolean needsUpdate = true;
|
||||
|
||||
/**
|
||||
* Get the value of needsUpdate
|
||||
*
|
||||
* @return the value of needsUpdate
|
||||
*/
|
||||
public boolean getNeedsUpdate() {
|
||||
return needsUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of needsUpdate
|
||||
*
|
||||
* @param needsUpdate new value of needsUpdate
|
||||
*/
|
||||
public void setNeedsUpdate(boolean needsUpdate) {
|
||||
this.needsUpdate = needsUpdate;
|
||||
}
|
||||
protected void resetSearchingAnalyzer() {
|
||||
//do nothing
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,10 +56,6 @@ public class NvdCveAnalyzer implements org.codesecure.dependencycheck.analyzer.A
|
||||
* The CVE Index.
|
||||
*/
|
||||
protected Index cve = null;
|
||||
/**
|
||||
* The Lucene IndexSearcher.
|
||||
*/
|
||||
private IndexSearcher indexSearcher = null;
|
||||
|
||||
/**
|
||||
* Opens the data source.
|
||||
@@ -70,14 +66,12 @@ public class NvdCveAnalyzer implements org.codesecure.dependencycheck.analyzer.A
|
||||
public void open() throws IOException {
|
||||
cve = new Index();
|
||||
cve.open();
|
||||
indexSearcher = cve.getIndexSearcher();
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the data source.
|
||||
*/
|
||||
public void close() {
|
||||
indexSearcher = null;
|
||||
cve.close();
|
||||
}
|
||||
|
||||
@@ -132,9 +126,9 @@ public class NvdCveAnalyzer implements org.codesecure.dependencycheck.analyzer.A
|
||||
query.add(query1, BooleanClause.Occur.SHOULD);
|
||||
query.add(query2, BooleanClause.Occur.SHOULD);
|
||||
|
||||
TopDocs docs = indexSearcher.search(query, MAX_QUERY_RESULTS);
|
||||
TopDocs docs = cve.search(query, MAX_QUERY_RESULTS);
|
||||
for (ScoreDoc d : docs.scoreDocs) {
|
||||
Document doc = indexSearcher.doc(d.doc);
|
||||
Document doc = cve.getDocument(d.doc);
|
||||
String xml = doc.get(Fields.XML);
|
||||
Vulnerability vuln;
|
||||
try {
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.*;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
|
||||
/**
|
||||
* Imports a NVD CVE XML file into the Lucene NVD CVE Index.
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class Importer {
|
||||
|
||||
/**
|
||||
* Private constructor for utility class.
|
||||
*/
|
||||
private Importer() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports the NVD CVE XML File into the Lucene Index.
|
||||
*
|
||||
* @param file containing the path to the NVD CVE XML file.
|
||||
*/
|
||||
public static void importXML(File file) {
|
||||
NvdCveParser indexer = null;
|
||||
org.codesecure.dependencycheck.data.cpe.xml.Indexer cpeIndexer = null;
|
||||
try {
|
||||
indexer = new NvdCveParser();
|
||||
indexer.openIndexWriter();
|
||||
|
||||
//HACK - hack to ensure all CPE data is stored in the index.
|
||||
cpeIndexer = new org.codesecure.dependencycheck.data.cpe.xml.Indexer();
|
||||
cpeIndexer.openIndexWriter();
|
||||
indexer.setCPEIndexer(cpeIndexer);
|
||||
|
||||
indexer.parse(file);
|
||||
} catch (CorruptIndexException ex) {
|
||||
Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
if (indexer != null) {
|
||||
indexer.close();
|
||||
}
|
||||
if (cpeIndexer != null) {
|
||||
cpeIndexer.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
// public static void importXML(File file) throws FileNotFoundException, IOException, JAXBException,
|
||||
// ParserConfigurationException, SAXException {
|
||||
//
|
||||
// SAXParserFactory factory = SAXParserFactory.newInstance();
|
||||
// factory.setNamespaceAware(true);
|
||||
// XMLReader reader = factory.newSAXParser().getXMLReader();
|
||||
//
|
||||
// JAXBContext context = JAXBContext.newInstance("org.codesecure.dependencycheck.data.nvdcve.generated");
|
||||
// NvdCveXmlFilter filter = new NvdCveXmlFilter(context);
|
||||
//
|
||||
// Indexer indexer = new Indexer();
|
||||
// indexer.openIndexWriter();
|
||||
//
|
||||
// filter.registerSaveDelegate(indexer);
|
||||
//
|
||||
// reader.setContentHandler(filter);
|
||||
// Reader fileReader = new FileReader(file);
|
||||
// InputSource is = new InputSource(fileReader);
|
||||
// try {
|
||||
// reader.parse(is);
|
||||
// } catch (IOException ex) {
|
||||
// Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
// } catch (SAXException ex) {
|
||||
// Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
// } finally {
|
||||
// indexer.close();
|
||||
// }
|
||||
// }
|
||||
|
||||
/**
|
||||
* Imports the CPE XML File into the Lucene Index.
|
||||
*
|
||||
* @param path the path to the CPE XML file.
|
||||
*/
|
||||
public static void importXML(String path) {
|
||||
File f = new File(path);
|
||||
if (!f.exists()) {
|
||||
f.mkdirs();
|
||||
}
|
||||
Importer.importXML(f);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,627 @@
|
||||
package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import org.codesecure.dependencycheck.data.CachedWebDataSource;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.Index;
|
||||
import org.codesecure.dependencycheck.data.UpdateException;
|
||||
import org.codesecure.dependencycheck.utils.DownloadFailedException;
|
||||
import org.codesecure.dependencycheck.utils.Downloader;
|
||||
import org.codesecure.dependencycheck.utils.FileUtils;
|
||||
import org.codesecure.dependencycheck.utils.Settings;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class IndexUpdater extends Index implements CachedWebDataSource {
|
||||
|
||||
/**
|
||||
* The name of the properties file containing the timestamp of the last
|
||||
* update.
|
||||
*/
|
||||
private static final String UPDATE_PROPERTIES_FILE = "lastupdated.prop";
|
||||
/**
|
||||
* The properties file key for the last updated field - used to store the
|
||||
* last updated time of the Modified NVD CVE xml file.
|
||||
*/
|
||||
private static final String LAST_UPDATED_MODIFIED = "lastupdated.modified";
|
||||
/**
|
||||
* Stores the last updated time for each of the NVD CVE files. These
|
||||
* timestamps should be updated if we process the modified file within 7
|
||||
* days of the last update.
|
||||
*/
|
||||
private static final String LAST_UPDATED_BASE = "lastupdated.";
|
||||
/**
|
||||
* The current version of the index
|
||||
*/
|
||||
public static final String INDEX_VERSION = "1.1";
|
||||
|
||||
/**
|
||||
* <p>Downloads the latest NVD CVE XML file from the web and imports it into
|
||||
* the current CVE Index.</p>
|
||||
*
|
||||
* @throws UpdateException is thrown if there is an error updating the index
|
||||
*/
|
||||
public void update() throws UpdateException {
|
||||
try {
|
||||
Map<String, NvdCveUrl> update = updateNeeded();
|
||||
int maxUpdates = 0;
|
||||
for (NvdCveUrl cve : update.values()) {
|
||||
if (cve.getNeedsUpdate()) {
|
||||
maxUpdates += 1;
|
||||
}
|
||||
}
|
||||
if (maxUpdates > 3) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING,
|
||||
"NVD CVE requires several updates; this could take a couple of minutes.");
|
||||
}
|
||||
int count = 0;
|
||||
for (NvdCveUrl cve : update.values()) {
|
||||
if (cve.getNeedsUpdate()) {
|
||||
count += 1;
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING,
|
||||
"Updating NVD CVE ({0} of {1})", new Object[]{count, maxUpdates});
|
||||
URL url = new URL(cve.getUrl());
|
||||
File outputPath = null;
|
||||
try {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING, "Downloading {0}", cve.getUrl());
|
||||
outputPath = File.createTempFile("cve" + cve.getId() + "_", ".xml");
|
||||
Downloader.fetchFile(url, outputPath, false);
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING, "Processing {0}", cve.getUrl());
|
||||
importXML(outputPath.toString());
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING,
|
||||
"Completed updated {0} of {1}", new Object[]{count, maxUpdates});
|
||||
} catch (FileNotFoundException ex) {
|
||||
//Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (IOException ex) {
|
||||
//Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} finally {
|
||||
try {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.delete();
|
||||
}
|
||||
} finally {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (maxUpdates >= 1) {
|
||||
writeLastUpdatedPropertyFile(update);
|
||||
}
|
||||
} catch (MalformedURLException ex) {
|
||||
//Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (DownloadFailedException ex) {
|
||||
//Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports the NVD CVE XML File into the Lucene Index.
|
||||
*
|
||||
* @param file containing the path to the NVD CVE XML file.
|
||||
*/
|
||||
private void importXML(File file) {
|
||||
if (!file.exists()) {
|
||||
file.mkdirs();
|
||||
}
|
||||
NvdCveParser indexer = null;
|
||||
org.codesecure.dependencycheck.data.cpe.xml.Indexer cpeIndexer = null;
|
||||
try {
|
||||
indexer = new NvdCveParser();
|
||||
indexer.openIndexWriter();
|
||||
|
||||
//HACK - hack to ensure all CPE data is stored in the index.
|
||||
cpeIndexer = new org.codesecure.dependencycheck.data.cpe.xml.Indexer();
|
||||
cpeIndexer.openIndexWriter();
|
||||
indexer.setCPEIndexer(cpeIndexer);
|
||||
|
||||
indexer.parse(file);
|
||||
} catch (CorruptIndexException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
if (indexer != null) {
|
||||
indexer.close();
|
||||
}
|
||||
if (cpeIndexer != null) {
|
||||
cpeIndexer.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
// public static void importXML(File file) throws FileNotFoundException, IOException, JAXBException,
|
||||
// ParserConfigurationException, SAXException {
|
||||
//
|
||||
// SAXParserFactory factory = SAXParserFactory.newInstance();
|
||||
// factory.setNamespaceAware(true);
|
||||
// XMLReader reader = factory.newSAXParser().getXMLReader();
|
||||
//
|
||||
// JAXBContext context = JAXBContext.newInstance("org.codesecure.dependencycheck.data.nvdcve.generated");
|
||||
// NvdCveXmlFilter filter = new NvdCveXmlFilter(context);
|
||||
//
|
||||
// Indexer indexer = new Indexer();
|
||||
// indexer.openIndexWriter();
|
||||
//
|
||||
// filter.registerSaveDelegate(indexer);
|
||||
//
|
||||
// reader.setContentHandler(filter);
|
||||
// Reader fileReader = new FileReader(file);
|
||||
// InputSource is = new InputSource(fileReader);
|
||||
// try {
|
||||
// reader.parse(is);
|
||||
// } catch (IOException ex) {
|
||||
// Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
// } catch (SAXException ex) {
|
||||
// Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
// } finally {
|
||||
// indexer.close();
|
||||
// }
|
||||
// }
|
||||
|
||||
/**
|
||||
* Imports the CPE XML File into the Lucene Index.
|
||||
*
|
||||
* @param path the path to the CPE XML file.
|
||||
*/
|
||||
private void importXML(String path) {
|
||||
File f = new File(path);
|
||||
importXML(f);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a properties file containing the last updated date to the
|
||||
* VULNERABLE_CPE directory.
|
||||
*
|
||||
* @param timeStamp the timestamp to write.
|
||||
*/
|
||||
private void writeLastUpdatedPropertyFile(Map<String, NvdCveUrl> updated) throws UpdateException {
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
File cveProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
Properties prop = new Properties();
|
||||
prop.put("version", INDEX_VERSION);
|
||||
for (NvdCveUrl cve : updated.values()) {
|
||||
prop.put(LAST_UPDATED_BASE + cve.id, String.valueOf(cve.getTimestamp()));
|
||||
}
|
||||
|
||||
OutputStream os = null;
|
||||
try {
|
||||
os = new FileOutputStream(cveProp);
|
||||
OutputStreamWriter out = new OutputStreamWriter(os, "UTF-8");
|
||||
prop.store(out, dir);
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to find last updated properties file.", ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to update last updated properties file.", ex);
|
||||
} finally {
|
||||
if (os != null) {
|
||||
try {
|
||||
os.flush();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
try {
|
||||
os.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the index needs to be updated. This is done by fetching the
|
||||
* nvd cve meta data and checking the last update date. If the data needs to
|
||||
* be refreshed this method will return the NvdCveUrl for the files that
|
||||
* need to be updated.
|
||||
*
|
||||
* @return the NvdCveUrl of the files that need to be updated.
|
||||
* @throws MalformedURLException is thrown if the URL for the NVD CVE Meta
|
||||
* data is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error.
|
||||
* downloading the nvd cve download data file.
|
||||
* @throws UpdateException Is thrown if there is an issue with the last updated properties file.
|
||||
*/
|
||||
public Map<String, NvdCveUrl> updateNeeded() throws MalformedURLException, DownloadFailedException, UpdateException {
|
||||
|
||||
Map<String, NvdCveUrl> currentlyPublished;
|
||||
try {
|
||||
currentlyPublished = retrieveCurrentTimestampsFromWeb();
|
||||
} catch (InvalidDataException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from nvd cve downloads page", ex);
|
||||
}
|
||||
if (currentlyPublished == null) {
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from nvd cve downloads page");
|
||||
}
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
|
||||
File f = new File(dir);
|
||||
if (f.exists()) {
|
||||
File cveProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
if (cveProp.exists()) {
|
||||
Properties prop = new Properties();
|
||||
InputStream is = null;
|
||||
try {
|
||||
is = new FileInputStream(cveProp);
|
||||
prop.load(is);
|
||||
|
||||
boolean deleteAndRecreate = false;
|
||||
float version = 0;
|
||||
|
||||
if (prop.getProperty("version") == null) {
|
||||
deleteAndRecreate = true;
|
||||
} else {
|
||||
try {
|
||||
version = Float.parseFloat(prop.getProperty("version"));
|
||||
float currentVersion = Float.parseFloat(INDEX_VERSION);
|
||||
if (currentVersion > version) {
|
||||
deleteAndRecreate = true;
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
deleteAndRecreate = true;
|
||||
}
|
||||
}
|
||||
if (deleteAndRecreate) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING, "Index version is old. Rebuilding the index.");
|
||||
is.close();
|
||||
//this is an old version of the lucene index - just delete it
|
||||
FileUtils.delete(f);
|
||||
|
||||
//this importer also updates the CPE index and it is also using an old version
|
||||
org.codesecure.dependencycheck.data.cpe.Index cpeidx = new org.codesecure.dependencycheck.data.cpe.Index();
|
||||
File cpeDir = cpeidx.getDataDirectory();
|
||||
FileUtils.delete(cpeDir);
|
||||
return currentlyPublished;
|
||||
}
|
||||
|
||||
long lastUpdated = Long.parseLong(prop.getProperty(LAST_UPDATED_MODIFIED));
|
||||
Date now = new Date();
|
||||
int days = Settings.getInt(Settings.KEYS.CVE_MODIFIED_VALID_FOR_DAYS);
|
||||
int maxEntries = Settings.getInt(Settings.KEYS.CVE_URL_COUNT);
|
||||
if (lastUpdated == currentlyPublished.get("modified").timestamp) {
|
||||
currentlyPublished.clear(); //we don't need to update anything.
|
||||
} else if (withinRange(lastUpdated, now.getTime(), days)) {
|
||||
currentlyPublished.get("modified").setNeedsUpdate(true);
|
||||
for (int i = 1; i <= maxEntries; i++) {
|
||||
currentlyPublished.get(String.valueOf(i)).setNeedsUpdate(false);
|
||||
}
|
||||
} else { //we figure out which of the several XML files need to be downloaded.
|
||||
currentlyPublished.get("modified").setNeedsUpdate(false);
|
||||
for (int i = 1; i <= maxEntries; i++) {
|
||||
NvdCveUrl cve = currentlyPublished.get(String.valueOf(i));
|
||||
long currentTimestamp = 0;
|
||||
try {
|
||||
currentTimestamp = Long.parseLong(prop.getProperty(LAST_UPDATED_BASE + String.valueOf(i), "0"));
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.FINEST, "Error parsing " + LAST_UPDATED_BASE
|
||||
+ String.valueOf(i) + " from nvdcve.lastupdated", ex);
|
||||
}
|
||||
if (currentTimestamp == cve.getTimestamp()) {
|
||||
cve.setNeedsUpdate(false); //they default to true.
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.FINEST, null, ex);
|
||||
} finally {
|
||||
if (is != null) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return currentlyPublished;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the epoch date is within the range specified of the
|
||||
* compareTo epoch time. This takes the (compareTo-date)/1000/60/60/24 to
|
||||
* get the number of days. If the calculated days is less then the range the
|
||||
* date is considered valid.
|
||||
*
|
||||
* @param date the date to be checked.
|
||||
* @param compareTo the date to compare to.
|
||||
* @param range the range in days to be considered valid.
|
||||
* @return whether or not the date is within the range.
|
||||
*/
|
||||
private boolean withinRange(long date, long compareTo, int range) {
|
||||
double differenceInDays = (compareTo - date) / 1000 / 60 / 60 / 24;
|
||||
return differenceInDays < range;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the timestamps from the NVD CVE meta data file.
|
||||
*
|
||||
* @return the timestamp from the currently published nvdcve downloads page
|
||||
* @throws MalformedURLException is thrown if the URL for the NVD CCE Meta
|
||||
* data is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error
|
||||
* downloading the nvd cve meta data file
|
||||
* @throws InvalidDataException is thrown if there is an exception parsing
|
||||
* the timestamps
|
||||
*/
|
||||
protected Map<String, NvdCveUrl> retrieveCurrentTimestampsFromWeb() throws MalformedURLException, DownloadFailedException, InvalidDataException {
|
||||
Map<String, NvdCveUrl> map = new HashMap<String, NvdCveUrl>();
|
||||
|
||||
File tmp = null;
|
||||
try {
|
||||
tmp = File.createTempFile("cve", "meta");
|
||||
URL url = new URL(Settings.getString(Settings.KEYS.CVE_META_URL));
|
||||
Downloader.fetchFile(url, tmp);
|
||||
String html = readFile(tmp);
|
||||
|
||||
String retrieveUrl = Settings.getString(Settings.KEYS.CVE_MODIFIED_URL);
|
||||
NvdCveUrl cve = createNvdCveUrl("modified", retrieveUrl, html);
|
||||
cve.setNeedsUpdate(false); //the others default to true, to make life easier later this should default to false.
|
||||
map.put("modified", cve);
|
||||
int max = Settings.getInt(Settings.KEYS.CVE_URL_COUNT);
|
||||
for (int i = 1; i <= max; i++) {
|
||||
retrieveUrl = Settings.getString(Settings.KEYS.CVE_BASE_URL + i);
|
||||
String key = Integer.toString(i);
|
||||
cve = createNvdCveUrl(key, retrieveUrl, html);
|
||||
map.put(key, cve);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new DownloadFailedException("Unable to create temporary file for NVD CVE Meta File download.", ex);
|
||||
} finally {
|
||||
try {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.delete();
|
||||
}
|
||||
} finally {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new NvdCveUrl object from the provide id, url, and text/html
|
||||
* from the NVD CVE downloads page.
|
||||
*
|
||||
* @param id the name of this NVD CVE Url
|
||||
* @param retrieveUrl the URL to download the file from
|
||||
* @param text a bit of HTML from the NVD CVE downloads page that contains
|
||||
* the URL and the last updated timestamp.
|
||||
* @return a shiny new NvdCveUrl object.
|
||||
* @throws InvalidDataException is thrown if the timestamp could not be
|
||||
* extracted from the provided text.
|
||||
*/
|
||||
private NvdCveUrl createNvdCveUrl(String id, String retrieveUrl, String text) throws InvalidDataException {
|
||||
Pattern pattern = Pattern.compile(Pattern.quote(retrieveUrl) + ".+?\\<br");
|
||||
Matcher m = pattern.matcher(text);
|
||||
NvdCveUrl item = new NvdCveUrl();
|
||||
item.id = id;
|
||||
item.url = retrieveUrl;
|
||||
if (m.find()) {
|
||||
String line = m.group();
|
||||
int pos = line.indexOf("Updated:");
|
||||
if (pos > 0) {
|
||||
pos += 9;
|
||||
try {
|
||||
String timestampstr = line.substring(pos, line.length() - 3).replace("at ", "");
|
||||
long timestamp = getEpochTimeFromDateTime(timestampstr);
|
||||
item.setTimestamp(timestamp);
|
||||
} catch (NumberFormatException ex) {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain a valid timestamp for '" + retrieveUrl + "'.", ex);
|
||||
}
|
||||
} else {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain the updated timestamp for '" + retrieveUrl + "'.");
|
||||
}
|
||||
} else {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain the url for '" + retrieveUrl + "'.");
|
||||
}
|
||||
return item;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a timestamp in the format of "MM/dd/yy hh:mm" into a calendar
|
||||
* object and returns the epoch time. Note, this removes the millisecond
|
||||
* portion of the epoch time so all numbers returned should end in 000.
|
||||
*
|
||||
* @param timestamp a string in the format of "MM/dd/yy hh:mm"
|
||||
* @return a Calendar object.
|
||||
* @throws NumberFormatException if the timestamp was parsed incorrectly.
|
||||
*/
|
||||
private long getEpochTimeFromDateTime(String timestamp) throws NumberFormatException {
|
||||
Calendar c = new GregorianCalendar();
|
||||
int month = Integer.parseInt(timestamp.substring(0, 2));
|
||||
int date = Integer.parseInt(timestamp.substring(3, 5));
|
||||
int year = 2000 + Integer.parseInt(timestamp.substring(6, 8));
|
||||
int hourOfDay = Integer.parseInt(timestamp.substring(9, 11));
|
||||
int minute = Integer.parseInt(timestamp.substring(12, 14));
|
||||
c.set(year, month, date, hourOfDay, minute, 0);
|
||||
long t = c.getTimeInMillis();
|
||||
t = (t / 1000) * 1000;
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a file into a string.
|
||||
*
|
||||
* @param file the file to be read.
|
||||
* @return the contents of the file.
|
||||
* @throws IOException is thrown if an IOExcpetion occurs.
|
||||
*/
|
||||
private String readFile(File file) throws IOException {
|
||||
InputStreamReader stream = new InputStreamReader(new FileInputStream(file), "UTF-8");
|
||||
StringBuilder str = new StringBuilder((int) file.length());
|
||||
try {
|
||||
char[] buf = new char[8096];
|
||||
int read = stream.read(buf, 0, 8096);
|
||||
while (read > 0) {
|
||||
str.append(buf, 0, read);
|
||||
read = stream.read(buf, 0, 8096);
|
||||
}
|
||||
} finally {
|
||||
stream.close();
|
||||
}
|
||||
return str.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* A pojo that contains the Url and timestamp of the current NvdCve XML
|
||||
* files.
|
||||
*/
|
||||
protected class NvdCveUrl {
|
||||
|
||||
/**
|
||||
* an id.
|
||||
*/
|
||||
private String id;
|
||||
|
||||
/**
|
||||
* Get the value of id
|
||||
*
|
||||
* @return the value of id
|
||||
*/
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of id
|
||||
*
|
||||
* @param id new value of id
|
||||
*/
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
/**
|
||||
* a url.
|
||||
*/
|
||||
private String url;
|
||||
|
||||
/**
|
||||
* Get the value of url
|
||||
*
|
||||
* @return the value of url
|
||||
*/
|
||||
public String getUrl() {
|
||||
return url;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of url
|
||||
*
|
||||
* @param url new value of url
|
||||
*/
|
||||
public void setUrl(String url) {
|
||||
this.url = url;
|
||||
}
|
||||
/**
|
||||
* a timestamp - epoch time.
|
||||
*/
|
||||
private long timestamp;
|
||||
|
||||
/**
|
||||
* Get the value of timestamp - epoch time
|
||||
*
|
||||
* @return the value of timestamp - epoch time
|
||||
*/
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of timestamp - epoch time
|
||||
*
|
||||
* @param timestamp new value of timestamp - epoch time
|
||||
*/
|
||||
public void setTimestamp(long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
/**
|
||||
* indicates whether or not this item should be updated.
|
||||
*/
|
||||
private boolean needsUpdate = true;
|
||||
|
||||
/**
|
||||
* Get the value of needsUpdate
|
||||
*
|
||||
* @return the value of needsUpdate
|
||||
*/
|
||||
public boolean getNeedsUpdate() {
|
||||
return needsUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of needsUpdate
|
||||
*
|
||||
* @param needsUpdate new value of needsUpdate
|
||||
*/
|
||||
public void setNeedsUpdate(boolean needsUpdate) {
|
||||
this.needsUpdate = needsUpdate;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package org.codesecure.dependencycheck.data.nvdcve;
|
||||
package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
@@ -93,9 +93,15 @@ public class NvdCveParser extends Index {
|
||||
//facts occur more often, do them first.
|
||||
Matcher matcherFact = rxFact.matcher(str);
|
||||
if (matcherFact.matches()) {
|
||||
String cpe = matcherFact.group(1);
|
||||
String cpe = matcherFact.group(1).trim();
|
||||
if (cpe != null && cpe.startsWith("cpe:/a:")) {
|
||||
skipEntry = false;
|
||||
|
||||
//TODO deal with other possible :-: scenarios. do we need to be concerned about those?
|
||||
if (cpe.endsWith(":-")) {
|
||||
cpe = cpe.substring(0, cpe.length() - 2);
|
||||
}
|
||||
|
||||
addVulnerableCpe(cpe, doc);
|
||||
}
|
||||
continue;
|
||||
|
||||
@@ -376,15 +376,13 @@ public class Dependency {
|
||||
return false;
|
||||
}
|
||||
|
||||
String fnd = str.toLowerCase();
|
||||
|
||||
if (vendorEvidence.containsUsedString(str)) {
|
||||
return true;
|
||||
}
|
||||
if (productEvidence.containsUsedString(str)) {
|
||||
return true;
|
||||
}
|
||||
if (versionEvidence.containsUsedString(fnd)) {
|
||||
if (versionEvidence.containsUsedString(str)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
@@ -86,9 +86,6 @@ public final class CliParser {
|
||||
* SCAN or CPE command line arguments that does not exist.
|
||||
*/
|
||||
private void validateArgs() throws FileNotFoundException, ParseException {
|
||||
if (isLoadCPE()) {
|
||||
validatePathExists(getCpeFile());
|
||||
}
|
||||
if (isRunScan()) {
|
||||
validatePathExists(getScanFiles());
|
||||
if (!line.hasOption(ArgumentName.OUT)) {
|
||||
@@ -171,10 +168,6 @@ public final class CliParser {
|
||||
.withDescription("the path to scan - this option can be specified multiple times.")
|
||||
.create(ArgumentName.SCAN_SHORT);
|
||||
|
||||
Option load = OptionBuilder.withArgName("file").hasArg().withLongOpt(ArgumentName.CPE)
|
||||
.withDescription("load the CPE xml file.")
|
||||
.create(ArgumentName.CPE_SHORT);
|
||||
|
||||
Option props = OptionBuilder.withArgName("file").hasArg().withLongOpt(ArgumentName.PROP)
|
||||
.withDescription("a property file to load.")
|
||||
.create(ArgumentName.PROP_SHORT);
|
||||
@@ -187,7 +180,6 @@ public final class CliParser {
|
||||
|
||||
OptionGroup og = new OptionGroup();
|
||||
og.addOption(path);
|
||||
og.addOption(load);
|
||||
|
||||
Options opts = new Options();
|
||||
opts.addOptionGroup(og);
|
||||
@@ -219,15 +211,6 @@ public final class CliParser {
|
||||
return (line != null) ? line.hasOption(ArgumentName.HELP) : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the 'cpe' command line argument was passed in.
|
||||
*
|
||||
* @return whether or not the 'cpe' command line argument was passed in
|
||||
*/
|
||||
public boolean isLoadCPE() {
|
||||
return (line != null) ? isValid && line.hasOption(ArgumentName.CPE) : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the 'scan' command line argument was passed in.
|
||||
*
|
||||
@@ -267,16 +250,6 @@ public final class CliParser {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the file command line parameter(s) specified for the 'cpe'
|
||||
* argument.
|
||||
*
|
||||
* @return the file paths specified on the command line
|
||||
*/
|
||||
public String getCpeFile() {
|
||||
return line.getOptionValue(ArgumentName.CPE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the file command line parameter(s) specified for the 'scan'
|
||||
* argument.
|
||||
@@ -343,16 +316,6 @@ public final class CliParser {
|
||||
* The short CLI argument name specifing the directory/file to scan
|
||||
*/
|
||||
public static final String SCAN_SHORT = "s";
|
||||
/**
|
||||
* The long CLI argument name specifing the path to the CPE.XML file to
|
||||
* import
|
||||
*/
|
||||
public static final String CPE = "cpe";
|
||||
/**
|
||||
* The short CLI argument name specifing the path to the CPE.XML file to
|
||||
* import
|
||||
*/
|
||||
public static final String CPE_SHORT = "c";
|
||||
/**
|
||||
* The long CLI argument name specifing that the CPE/CVE/etc. data
|
||||
* should not be automatically updated.
|
||||
|
||||
@@ -36,7 +36,14 @@ public class Settings {
|
||||
/**
|
||||
* The collection of keys used within the properties file.
|
||||
*/
|
||||
public abstract class KEYS {
|
||||
public static class KEYS {
|
||||
|
||||
/**
|
||||
* private contructor because this is a "utility" class containing constants
|
||||
*/
|
||||
private KEYS() {
|
||||
//do nothing
|
||||
}
|
||||
|
||||
/**
|
||||
* The properties key for the path where the CPE Lucene Index will be
|
||||
|
||||
@@ -1 +1 @@
|
||||
org.codesecure.dependencycheck.data.nvdcve.Index
|
||||
org.codesecure.dependencycheck.data.nvdcve.xml.IndexUpdater
|
||||
@@ -1,4 +1,5 @@
|
||||
handlers=java.util.logging.ConsoleHandler, java.util.logging.FileHandler
|
||||
handlers=java.util.logging.ConsoleHandler
|
||||
#, java.util.logging.FileHandler
|
||||
|
||||
# logging levels
|
||||
# FINEST, FINER, FINE, CONFIG, INFO, WARNING and SEVERE.
|
||||
@@ -9,8 +10,8 @@ java.util.logging.ConsoleHandler.level=WARNING
|
||||
org.codesecure.dependencycheck.data.nvdcve.xml
|
||||
|
||||
# Configure the FileHandler.
|
||||
java.util.logging.FileHandler.formatter=java.util.logging.SimpleFormatter
|
||||
java.util.logging.FileHandler.level=FINEST
|
||||
#java.util.logging.FileHandler.formatter=java.util.logging.SimpleFormatter
|
||||
#java.util.logging.FileHandler.level=FINEST
|
||||
|
||||
# The following special tokens can be used in the pattern property
|
||||
# which specifies the location and name of the log file.
|
||||
@@ -20,4 +21,4 @@ java.util.logging.FileHandler.level=FINEST
|
||||
# %g - generation number for rotating logs
|
||||
# %u - unique number to avoid conflicts
|
||||
# FileHandler writes to %h/demo0.log by default.
|
||||
java.util.logging.FileHandler.pattern=./logs/DependencyCheck%u.log
|
||||
#java.util.logging.FileHandler.pattern=./logs/DependencyCheck%u.log
|
||||
@@ -113,40 +113,14 @@ public class CPEAnalyzerTest extends BaseIndexTestCase {
|
||||
assertTrue("Incorrect match", depends.getIdentifiers().get(0).getValue().equals(expResult));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of searchCPE method, of class CPEAnalyzer.
|
||||
* @throws Exception is thrown when an exception occurs
|
||||
*/
|
||||
@Test
|
||||
public void testSearchCPE_3args() throws Exception {
|
||||
System.out.println("searchCPE - 3 args");
|
||||
String vendor = "apache software foundation";
|
||||
String product = "struts 2 core";
|
||||
String version = "2.1.2";
|
||||
CPEAnalyzer instance = new CPEAnalyzer();
|
||||
instance.open();
|
||||
String expResult = "cpe:/a:apache:struts:2.1.2";
|
||||
List<Entry> result = instance.searchCPE(vendor, product, version);
|
||||
assertEquals(expResult, result.get(0).getName());
|
||||
|
||||
vendor = "apache software foundation";
|
||||
product = "struts 2 core";
|
||||
version = "2.3.1.2";
|
||||
|
||||
expResult = "cpe:/a:apache:struts:2.3.1.2";
|
||||
result = instance.searchCPE(vendor, product, version);
|
||||
assertEquals(expResult, result.get(0).getName());
|
||||
|
||||
instance.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of searchCPE method, of class CPEAnalyzer.
|
||||
* @throws Exception is thrown when an exception occurs
|
||||
*/
|
||||
@Test
|
||||
public void testSearchCPE_5args() throws Exception {
|
||||
System.out.println("searchCPE - 5 args");
|
||||
public void testSearchCPE() throws Exception {
|
||||
System.out.println("searchCPE");
|
||||
String vendor = "apache software foundation";
|
||||
String product = "struts 2 core";
|
||||
String version = "2.1.2";
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* To change this template, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package org.codesecure.dependencycheck.data.cpe.xml;
|
||||
|
||||
import java.io.File;
|
||||
import junit.framework.TestCase;
|
||||
import org.codesecure.dependencycheck.data.cpe.xml.Importer;
|
||||
import org.xml.sax.Attributes;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author jeremy
|
||||
*/
|
||||
public class ImporterTest extends TestCase {
|
||||
|
||||
public ImporterTest(String testName) {
|
||||
super(testName);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Test of all methods within class CPEHandler.
|
||||
* @throws Exception is thrown when an excretion occurs.
|
||||
*/
|
||||
public void testHandler() throws Exception {
|
||||
System.out.println("importXML");
|
||||
|
||||
File path = new File(this.getClass().getClassLoader().getResource("official-cpe-dictionary_v2.2.xml").getPath());
|
||||
|
||||
Importer.importXML(path.getCanonicalPath());
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
package org.codesecure.dependencycheck.data.lucene;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopScoreDocCollector;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.Query;
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class FieldAnalyzerTest {
|
||||
|
||||
public FieldAnalyzerTest() {
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void setUpClass() throws Exception {
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownClass() throws Exception {
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAnalyzers() throws Exception {
|
||||
|
||||
Analyzer analyzer = new FieldAnalyzer(Version.LUCENE_40);
|
||||
Directory index = new RAMDirectory();
|
||||
|
||||
String field1 = "product";
|
||||
String text1 = "springframework";
|
||||
|
||||
String field2 = "vendor";
|
||||
String text2 = "springsource";
|
||||
|
||||
createIndex(analyzer, index, field1, text1, field2, text2);
|
||||
|
||||
//Analyzer searchingAnalyzer = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
String querystr = "product:\"(Spring Framework Core)\" vendor:(SpringSource)";
|
||||
|
||||
SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
HashMap<String,Analyzer> map = new HashMap<String,Analyzer>();
|
||||
map.put(field1, searchAnalyzerProduct);
|
||||
map.put(field2, searchAnalyzerVendor);
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(Version.LUCENE_40), map);
|
||||
QueryParser parser = new QueryParser(Version.LUCENE_40, field1, wrapper);
|
||||
|
||||
Query q = parser.parse(querystr);
|
||||
//System.out.println(q.toString());
|
||||
|
||||
int hitsPerPage = 10;
|
||||
|
||||
IndexReader reader = DirectoryReader.open(index);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
|
||||
searcher.search(q, collector);
|
||||
ScoreDoc[] hits = collector.topDocs().scoreDocs;
|
||||
|
||||
assertEquals("Did not find 1 document?", 1, hits.length);
|
||||
|
||||
searchAnalyzerProduct.clear(); //ensure we don't have anything left over from the previuos search.
|
||||
searchAnalyzerVendor.clear();
|
||||
querystr = "product:(Apache Struts) vendor:(Apache)";
|
||||
Query q2 = parser.parse(querystr);
|
||||
//System.out.println(q2.toString());
|
||||
assertFalse("second parsing contains previousWord from the TokenPairConcatenatingFilter", q2.toString().contains("core"));
|
||||
}
|
||||
|
||||
private void createIndex(Analyzer analyzer, Directory index, String field1, String text1, String field2, String text2) throws IOException {
|
||||
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_40, analyzer);
|
||||
IndexWriter w = new IndexWriter(index, config);
|
||||
addDoc(w, field1, text1, field2, text2);
|
||||
w.close();
|
||||
}
|
||||
|
||||
private static void addDoc(IndexWriter w, String field1, String text1, String field2, String text2) throws IOException {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField(field1, text1, Field.Store.YES));
|
||||
doc.add(new TextField(field2, text2, Field.Store.YES));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
}
|
||||
@@ -2,19 +2,20 @@
|
||||
* To change this template, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package org.codesecure.dependencycheck.data.nvdcve;
|
||||
package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
|
||||
|
||||
import java.util.Map;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.BaseIndexTestCase;
|
||||
import org.junit.*;
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
* @author Jeremy
|
||||
*/
|
||||
public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
public class IndexUpdaterIntegrationTest extends BaseIndexTestCase {
|
||||
|
||||
public IndexIntegrationTest(String testName) {
|
||||
public IndexUpdaterIntegrationTest(String testName) {
|
||||
super(testName);
|
||||
}
|
||||
|
||||
@@ -40,8 +41,8 @@ public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
@Test
|
||||
public void testRetrieveCurrentTimestampFromWeb() throws Exception {
|
||||
System.out.println("retrieveCurrentTimestampFromWeb");
|
||||
Index instance = new Index();
|
||||
Map<String, Index.NvdCveUrl> result = instance.retrieveCurrentTimestampsFromWeb();
|
||||
IndexUpdater instance = new IndexUpdater();
|
||||
Map<String, IndexUpdater.NvdCveUrl> result = instance.retrieveCurrentTimestampsFromWeb();
|
||||
assertEquals(12, result.size());
|
||||
}
|
||||
|
||||
@@ -51,7 +52,7 @@ public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
@Test
|
||||
public void testUpdate() throws Exception {
|
||||
System.out.println("update");
|
||||
Index instance = new Index();
|
||||
IndexUpdater instance = new IndexUpdater();
|
||||
instance.update();
|
||||
}
|
||||
|
||||
@@ -61,7 +62,7 @@ public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
@Test
|
||||
public void testUpdateNeeded() throws Exception {
|
||||
System.out.println("updateNeeded");
|
||||
Index instance = new Index();
|
||||
IndexUpdater instance = new IndexUpdater();
|
||||
instance.updateNeeded();
|
||||
//if an exception is thrown this test fails. However, because it depends on the
|
||||
// order of the tests what this will return I am just testing for the exception.
|
||||
@@ -9,7 +9,6 @@ import java.io.IOException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.InvalidDataException;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
||||
@@ -53,7 +53,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -73,7 +72,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertTrue(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -91,31 +89,6 @@ public class CliParserTest extends TestCase {
|
||||
assertTrue(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of parse method with jar and cpe args, of class CliParser.
|
||||
* @throws Exception thrown when an excpetion occurs.
|
||||
*/
|
||||
@Test
|
||||
public void testParse_scan_cpe() throws Exception {
|
||||
System.out.println("parse -cpe file -scan file");
|
||||
|
||||
String[] args = {"-scan", "file", "-cpe", "file"};
|
||||
|
||||
CliParser instance = new CliParser();
|
||||
try {
|
||||
instance.parse(args);
|
||||
} catch (ParseException ex) {
|
||||
assertTrue(ex.getMessage().contains("an option from this group has already been selected"));
|
||||
}
|
||||
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
@@ -146,7 +119,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -170,8 +142,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -194,7 +164,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -215,78 +184,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertTrue(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of parse method with cpe arg, of class CliParser.
|
||||
* @throws Exception thrown when an excpetion occurs.
|
||||
*/
|
||||
@Test
|
||||
public void testParse_cpe() throws Exception {
|
||||
System.out.println("parse -cpe");
|
||||
|
||||
String[] args = {"-cpe"};
|
||||
|
||||
CliParser instance = new CliParser();
|
||||
|
||||
try {
|
||||
instance.parse(args);
|
||||
} catch (ParseException ex) {
|
||||
assertTrue(ex.getMessage().contains("Missing argument"));
|
||||
}
|
||||
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of parse method with jar arg, of class CliParser.
|
||||
* @throws Exception thrown when an excpetion occurs.
|
||||
*/
|
||||
@Test
|
||||
public void testParse_cpe_unknownFile() throws Exception {
|
||||
System.out.println("parse -cpe cpe.that.does.not.exist");
|
||||
|
||||
String[] args = {"-cpe", "cpe.that.does.not.exist"};
|
||||
|
||||
CliParser instance = new CliParser();
|
||||
try {
|
||||
instance.parse(args);
|
||||
} catch (FileNotFoundException ex) {
|
||||
assertTrue(ex.getMessage().contains("Invalid file argument"));
|
||||
}
|
||||
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of parse method with jar arg, of class CliParser.
|
||||
* @throws Exception thrown when an excpetion occurs.
|
||||
*/
|
||||
@Test
|
||||
public void testParse_cpe_withFileExists() throws Exception {
|
||||
System.out.println("parse -cpe checkSumTest.file");
|
||||
File path = new File(this.getClass().getClassLoader().getResource("checkSumTest.file").getPath());
|
||||
String[] args = {"-cpe", path.getCanonicalPath()};
|
||||
|
||||
CliParser instance = new CliParser();
|
||||
instance.parse(args);
|
||||
|
||||
assertEquals(path.getCanonicalPath(), instance.getCpeFile());
|
||||
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertTrue(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -342,7 +239,7 @@ public class CliParserTest extends TestCase {
|
||||
String text = (new String(baos.toByteArray()));
|
||||
String[] lines = text.split(System.getProperty("line.separator"));
|
||||
assertTrue(lines[0].startsWith("usage: "));
|
||||
assertTrue((lines.length>2));
|
||||
assertTrue((lines.length > 2));
|
||||
} catch (IOException ex) {
|
||||
System.setOut(out);
|
||||
fail("CliParser.printVersionInfo did not write anything to system.out.");
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
BIN
src/test/resources/struts.jar
Normal file
BIN
src/test/resources/struts.jar
Normal file
Binary file not shown.
Reference in New Issue
Block a user