mirror of
https://github.com/ysoftdevs/DependencyCheck.git
synced 2026-01-14 07:43:40 +01:00
Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
38c1e31476 | ||
|
|
9e489c0c55 | ||
|
|
76899996c2 | ||
|
|
283dcae297 | ||
|
|
566f36e577 | ||
|
|
6e23fd7251 | ||
|
|
a16bcfbc10 | ||
|
|
2fcc325af7 | ||
|
|
5334cf7def | ||
|
|
a8a85a5947 |
@@ -7,10 +7,10 @@ If found, it will generate a report linking to the associated CVE entries.
|
||||
Usage:
|
||||
$ mvn package
|
||||
$ cd target
|
||||
$ java -jar DependencyCheck-0.2.3.jar -h
|
||||
$ java -jar DependencyCheck-0.2.3.jar -a Testing -out . -scan ./test-classes/org.mortbay.jetty.jar -scan ./test-classes/struts2-core-2.1.2.jar -scan ./lib
|
||||
$ java -jar DependencyCheck-0.2.4.0.jar -h
|
||||
$ java -jar DependencyCheck-0.2.4.0.jar -a Testing -out . -scan ./test-classes/org.mortbay.jetty.jar -scan ./test-classes/struts2-core-2.1.2.jar -scan ./lib
|
||||
|
||||
Then load the resulting 'Testing.html' into your favorite browser.
|
||||
Then load the resulting 'DependencyCheck-Report.html' into your favorite browser.
|
||||
|
||||
Author: Jeremy Long (jeremy.long@gmail.com)
|
||||
|
||||
|
||||
39
pom.xml
39
pom.xml
@@ -23,7 +23,7 @@ along with DependencyCheck. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
<groupId>org.codesecure</groupId>
|
||||
<artifactId>DependencyCheck</artifactId>
|
||||
<version>0.2.3</version>
|
||||
<version>0.2.4.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>DependencyCheck</name>
|
||||
@@ -100,7 +100,7 @@ along with DependencyCheck. If not, see <http://www.gnu.org/licenses/>.
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.8.1</version>
|
||||
<configuration>
|
||||
<bottom>Copyright © 2012 Jeremy Long. All Rights Reserved.</bottom>
|
||||
<bottom>Copyright© 2012 Jeremy Long. All Rights Reserved.</bottom>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@@ -243,6 +243,9 @@ along with DependencyCheck. If not, see <http://www.gnu.org/licenses/>.
|
||||
<value>target/data/cpe</value>
|
||||
</property>
|
||||
</systemProperties>
|
||||
<includes>
|
||||
<include>**/*IntegrationTest.java</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
@@ -328,6 +331,11 @@ along with DependencyCheck. If not, see <http://www.gnu.org/licenses/>.
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>taglist-maven-plugin</artifactId>
|
||||
<version>2.4</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
@@ -364,6 +372,11 @@ along with DependencyCheck. If not, see <http://www.gnu.org/licenses/>.
|
||||
</reportSet>
|
||||
</reportSets>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>findbugs-maven-plugin</artifactId>
|
||||
<version>2.5.2</version>
|
||||
</plugin>
|
||||
</reportPlugins>
|
||||
</configuration>
|
||||
</plugin>
|
||||
@@ -386,7 +399,18 @@ along with DependencyCheck. If not, see <http://www.gnu.org/licenses/>.
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-core</artifactId>
|
||||
<version>3.5.0</version>
|
||||
<version>4.0.0</version>
|
||||
<!--<version>3.5.0</version>-->
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-analyzers-common</artifactId>
|
||||
<version>4.0.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-queryparser</artifactId>
|
||||
<version>4.0.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
@@ -448,5 +472,14 @@ along with DependencyCheck. If not, see <http://www.gnu.org/licenses/>.
|
||||
<artifactId>hawtdb</artifactId>
|
||||
<version>1.6</version>
|
||||
</dependency>-->
|
||||
|
||||
<!-- The following dependencies are only scanned during integration testing -->
|
||||
<!--<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-beans</artifactId>
|
||||
<version>2.5.5</version>
|
||||
<scope>test</scope>
|
||||
</dependency>-->
|
||||
|
||||
</dependencies>
|
||||
</project>
|
||||
|
||||
@@ -18,7 +18,6 @@ package org.codesecure.dependencycheck;
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
@@ -26,13 +25,10 @@ import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.LogManager;
|
||||
import java.util.logging.Logger;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import org.apache.commons.cli.ParseException;
|
||||
import org.codesecure.dependencycheck.data.cpe.xml.Importer;
|
||||
import org.codesecure.dependencycheck.reporting.ReportGenerator;
|
||||
import org.codesecure.dependencycheck.dependency.Dependency;
|
||||
import org.codesecure.dependencycheck.utils.CliParser;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
/*
|
||||
* This file is part of App.
|
||||
@@ -73,11 +69,12 @@ public class App {
|
||||
//while java doc for JUL says to use preferences api - it throws an exception...
|
||||
//Preferences.systemRoot().put("java.util.logging.config.file", "log.properties");
|
||||
//System.getProperties().put("java.util.logging.config.file", "configuration/log.properties");
|
||||
File dir = new File("logs");
|
||||
|
||||
if (!dir.exists()) {
|
||||
dir.mkdir();
|
||||
}
|
||||
//removed the file handler. since this is a console app - just write to console.
|
||||
// File dir = new File("logs");
|
||||
// if (!dir.exists()) {
|
||||
// dir.mkdir();
|
||||
// }
|
||||
try {
|
||||
InputStream in = App.class.getClassLoader().getResourceAsStream(LOG_PROPERTIES_FILE);
|
||||
LogManager.getLogManager().reset();
|
||||
@@ -114,8 +111,6 @@ public class App {
|
||||
|
||||
if (cli.isGetVersion()) {
|
||||
cli.printVersionInfo();
|
||||
} else if (cli.isLoadCPE()) {
|
||||
loadCPE(cli.getCpeFile());
|
||||
} else if (cli.isRunScan()) {
|
||||
runScan(cli.getReportDirectory(), cli.getApplicationName(), cli.getScanFiles(), cli.isAutoUpdate());
|
||||
} else {
|
||||
@@ -124,23 +119,6 @@ public class App {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the specified CPE.XML file into Lucene Index.
|
||||
*
|
||||
* @param cpePath
|
||||
*/
|
||||
private void loadCPE(String cpePath) {
|
||||
try {
|
||||
Importer.importXML(cpePath);
|
||||
} catch (ParserConfigurationException ex) {
|
||||
Logger.getLogger(App.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (SAXException ex) {
|
||||
Logger.getLogger(App.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(App.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Scans the specified directories and writes the dependency reports to the
|
||||
* reportDirectory.
|
||||
@@ -158,9 +136,9 @@ public class App {
|
||||
scanner.analyzeDependencies();
|
||||
List<Dependency> dependencies = scanner.getDependencies();
|
||||
|
||||
ReportGenerator report = new ReportGenerator();
|
||||
ReportGenerator report = new ReportGenerator(applicationName, dependencies, scanner.getAnalyzers());
|
||||
try {
|
||||
report.generateReports(reportDirectory, applicationName, dependencies);
|
||||
report.generateReports(reportDirectory);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(App.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (Exception ex) {
|
||||
|
||||
@@ -195,7 +195,8 @@ public class Engine {
|
||||
try {
|
||||
a.initialize();
|
||||
} catch (Exception ex) {
|
||||
Logger.getLogger(Engine.class.getName()).log(Level.SEVERE, "Exception occured initializing " + a.getName() + ".", ex);
|
||||
Logger.getLogger(Engine.class.getName()).log(Level.SEVERE,
|
||||
"Exception occured initializing " + a.getName() + ".", ex);
|
||||
try {
|
||||
a.close();
|
||||
} catch (Exception ex1) {
|
||||
@@ -254,8 +255,23 @@ public class Engine {
|
||||
try {
|
||||
source.update();
|
||||
} catch (UpdateException ex) {
|
||||
Logger.getLogger(Engine.class.getName()).log(Level.SEVERE, "Unable to update " + source.getClass().getName(), ex);
|
||||
Logger.getLogger(Engine.class.getName()).log(Level.SEVERE,
|
||||
"Unable to update " + source.getClass().getName(), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a full list of all of the analyzers. This is useful
|
||||
* for reporting which analyzers where used.
|
||||
* @return a list of Analyzers
|
||||
*/
|
||||
public List<Analyzer> getAnalyzers() {
|
||||
List<Analyzer> ret = new ArrayList<Analyzer>();
|
||||
for (AnalysisPhase phase : AnalysisPhase.values()) {
|
||||
List<Analyzer> analyzerList = analyzers.get(phase);
|
||||
ret.addAll(analyzerList);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ package org.codesecure.dependencycheck.analyzer;
|
||||
import org.codesecure.dependencycheck.dependency.Dependency;
|
||||
import org.codesecure.dependencycheck.dependency.Evidence;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
*
|
||||
@@ -83,38 +82,7 @@ public class FileNameAnalyzer implements Analyzer {
|
||||
}
|
||||
|
||||
/**
|
||||
* An enumeration to keep track of the characters in a string as it is being
|
||||
* read in one character at a time.
|
||||
*/
|
||||
private enum STRING_STATE {
|
||||
|
||||
ALPHA,
|
||||
NUMBER,
|
||||
PERIOD,
|
||||
OTHER
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines type of the character passed in.
|
||||
*
|
||||
* @param c a character
|
||||
* @return a STRING_STATE representing whether the character is number,
|
||||
* alpha, or other.
|
||||
*/
|
||||
private STRING_STATE determineState(char c) {
|
||||
if (c >= '0' && c <= '9') {
|
||||
return STRING_STATE.NUMBER;
|
||||
} else if (c == '.') {
|
||||
return STRING_STATE.PERIOD;
|
||||
} else if (c >= 'a' && c <= 'z') {
|
||||
return STRING_STATE.ALPHA;
|
||||
} else {
|
||||
return STRING_STATE.OTHER;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Collects information about the file such as hashsums.
|
||||
* Collects information about the file name.
|
||||
*
|
||||
* @param dependency the dependency to analyze.
|
||||
* @throws AnalysisException is thrown if there is an error reading the JAR
|
||||
@@ -122,48 +90,21 @@ public class FileNameAnalyzer implements Analyzer {
|
||||
*/
|
||||
public void analyze(Dependency dependency) throws AnalysisException {
|
||||
|
||||
analyzeFileName(dependency);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyzes the filename of the dependency and adds it to the evidence
|
||||
* collections.
|
||||
*
|
||||
* @param dependency the dependency to analyze.
|
||||
*/
|
||||
private void analyzeFileName(Dependency dependency) {
|
||||
String fileName = dependency.getFileName();
|
||||
//slightly process the filename to chunk it into distinct words, numbers.
|
||||
// Yes, the lucene analyzer might do this, but I want a little better control
|
||||
// over the process.
|
||||
String fileNameEvidence = fileName.substring(0, fileName.length() - 4).toLowerCase().replace('-', ' ').replace('_', ' ');
|
||||
StringBuilder sb = new StringBuilder(fileNameEvidence.length());
|
||||
STRING_STATE state = determineState(fileNameEvidence.charAt(0));
|
||||
|
||||
for (int i = 0; i < fileNameEvidence.length(); i++) {
|
||||
char c = fileNameEvidence.charAt(i);
|
||||
STRING_STATE newState = determineState(c);
|
||||
if (newState != state) {
|
||||
if ((state != STRING_STATE.NUMBER && newState == STRING_STATE.PERIOD)
|
||||
|| (state == STRING_STATE.PERIOD && newState != STRING_STATE.NUMBER)
|
||||
|| (state == STRING_STATE.ALPHA || newState == STRING_STATE.ALPHA)
|
||||
|| ((state == STRING_STATE.OTHER || newState == STRING_STATE.OTHER) && c != ' ')) {
|
||||
sb.append(' ');
|
||||
}
|
||||
}
|
||||
state = newState;
|
||||
sb.append(c);
|
||||
int pos = fileName.lastIndexOf(".");
|
||||
if (pos > 0) {
|
||||
fileName = fileName.substring(0, pos - 1);
|
||||
}
|
||||
Pattern rx = Pattern.compile("\\s\\s+");
|
||||
fileNameEvidence = rx.matcher(sb.toString()).replaceAll(" ");
|
||||
|
||||
dependency.getProductEvidence().addEvidence("file", "name",
|
||||
fileNameEvidence, Evidence.Confidence.HIGH);
|
||||
fileName, Evidence.Confidence.HIGH);
|
||||
|
||||
dependency.getVendorEvidence().addEvidence("file", "name",
|
||||
fileNameEvidence, Evidence.Confidence.HIGH);
|
||||
if (fileNameEvidence.matches(".*\\d.*")) {
|
||||
fileName, Evidence.Confidence.HIGH);
|
||||
|
||||
if (fileName.matches(".*\\d.*")) {
|
||||
dependency.getVersionEvidence().addEvidence("file", "name",
|
||||
fileNameEvidence, Evidence.Confidence.HIGH);
|
||||
fileName, Evidence.Confidence.HIGH);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -186,6 +186,7 @@ public class JarAnalyzer extends AbstractAnalyzer {
|
||||
parseManifest(dependency);
|
||||
analyzePackageNames(dependency);
|
||||
analyzePOM(dependency);
|
||||
addPredefinedData(dependency);
|
||||
} catch (IOException ex) {
|
||||
throw new AnalysisException("Exception occured reading the JAR file.", ex);
|
||||
} catch (JAXBException ex) {
|
||||
@@ -228,7 +229,7 @@ public class JarAnalyzer extends AbstractAnalyzer {
|
||||
}
|
||||
} else if (!entry.isDirectory() && "pom.properties".equals(entryName)) {
|
||||
if (pomProperties == null) {
|
||||
Reader reader = new InputStreamReader(zin);
|
||||
Reader reader = new InputStreamReader(zin, "UTF-8");
|
||||
pomProperties = new Properties();
|
||||
pomProperties.load(reader);
|
||||
zin.closeEntry();
|
||||
@@ -481,6 +482,12 @@ public class JarAnalyzer extends AbstractAnalyzer {
|
||||
protected void parseManifest(Dependency dependency) throws IOException {
|
||||
JarFile jar = new JarFile(dependency.getActualFilePath());
|
||||
Manifest manifest = jar.getManifest();
|
||||
if (manifest == null) {
|
||||
Logger.getLogger(JarAnalyzer.class.getName()).log(Level.SEVERE,
|
||||
"Jar file '{0}' does not contain a manifest.",
|
||||
dependency.getFileName());
|
||||
return;
|
||||
}
|
||||
Attributes atts = manifest.getMainAttributes();
|
||||
|
||||
EvidenceCollection vendorEvidence = dependency.getVendorEvidence();
|
||||
@@ -615,4 +622,15 @@ public class JarAnalyzer extends AbstractAnalyzer {
|
||||
sb.append(text.substring(end + 1));
|
||||
return interpolateString(sb.toString(), properties); //yes yes, this should be a loop...
|
||||
}
|
||||
|
||||
private void addPredefinedData(Dependency dependency) {
|
||||
Evidence spring = new Evidence("Manifest",
|
||||
"Implementation-Title",
|
||||
"Spring Framework",
|
||||
Evidence.Confidence.HIGH);
|
||||
|
||||
if (dependency.getProductEvidence().getEvidence().contains(spring)) {
|
||||
dependency.getVendorEvidence().addEvidence("a priori", "vendor", "SpringSource", Evidence.Confidence.HIGH);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,16 +24,11 @@ import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.StringTokenizer;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.queryParser.ParseException;
|
||||
import org.apache.lucene.queryParser.QueryParser;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.codesecure.dependencycheck.analyzer.AnalysisException;
|
||||
import org.codesecure.dependencycheck.analyzer.AnalysisPhase;
|
||||
import org.codesecure.dependencycheck.data.lucene.LuceneUtils;
|
||||
@@ -78,14 +73,6 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
* The CPE Index.
|
||||
*/
|
||||
protected Index cpe = null;
|
||||
/**
|
||||
* The Lucene IndexSearcher.
|
||||
*/
|
||||
private IndexSearcher indexSearcher = null;
|
||||
/**
|
||||
* The Lucene QueryParser.
|
||||
*/
|
||||
private QueryParser queryParser = null;
|
||||
|
||||
/**
|
||||
* Opens the data source.
|
||||
@@ -96,18 +83,12 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
public void open() throws IOException {
|
||||
cpe = new Index();
|
||||
cpe.open();
|
||||
indexSearcher = cpe.getIndexSearcher();
|
||||
Analyzer analyzer = cpe.getAnalyzer();
|
||||
//TITLE is the default field because it contains venddor, product, and version all in one.
|
||||
queryParser = new QueryParser(Version.LUCENE_35, Fields.TITLE, analyzer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the data source.
|
||||
*/
|
||||
public void close() {
|
||||
queryParser = null;
|
||||
indexSearcher = null;
|
||||
cpe.close();
|
||||
}
|
||||
|
||||
@@ -149,23 +130,11 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
Confidence versionConf = Confidence.HIGH;
|
||||
|
||||
String vendors = addEvidenceWithoutDuplicateTerms("", dependency.getVendorEvidence(), vendorConf);
|
||||
//dependency.getVendorEvidence().toString(vendorConf);
|
||||
// if ("".equals(vendors)) {
|
||||
// vendors = STRING_THAT_WILL_NEVER_BE_IN_THE_INDEX;
|
||||
// }
|
||||
String products = addEvidenceWithoutDuplicateTerms("", dependency.getProductEvidence(), productConf);
|
||||
///dependency.getProductEvidence().toString(productConf);
|
||||
// if ("".equals(products)) {
|
||||
// products = STRING_THAT_WILL_NEVER_BE_IN_THE_INDEX;
|
||||
// }
|
||||
String versions = addEvidenceWithoutDuplicateTerms("", dependency.getVersionEvidence(), versionConf);
|
||||
//dependency.getVersionEvidence().toString(versionConf);
|
||||
// if ("".equals(versions)) {
|
||||
// versions = STRING_THAT_WILL_NEVER_BE_IN_THE_INDEX;
|
||||
// }
|
||||
|
||||
boolean found = false;
|
||||
int cnt = 0;
|
||||
int ctr = 0;
|
||||
do {
|
||||
List<Entry> entries = searchCPE(vendors, products, versions, dependency.getProductEvidence().getWeighting(),
|
||||
dependency.getVendorEvidence().getWeighting());
|
||||
@@ -178,22 +147,20 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
dependency.addIdentifier(
|
||||
"cpe",
|
||||
e.getName(),
|
||||
e.getTitle(),
|
||||
"http://web.nvd.nist.gov/view/vuln/search?cpe="
|
||||
+ URLEncoder.encode(e.getName(), "UTF-8"));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!found) {
|
||||
int round = cnt % 3;
|
||||
int round = ctr % 3;
|
||||
if (round == 0) {
|
||||
vendorConf = reduceConfidence(vendorConf);
|
||||
if (dependency.getVendorEvidence().contains(vendorConf)) {
|
||||
//vendors += " " + dependency.getVendorEvidence().toString(vendorConf);
|
||||
vendors = addEvidenceWithoutDuplicateTerms(vendors, dependency.getVendorEvidence(), vendorConf);
|
||||
} else {
|
||||
cnt += 1;
|
||||
ctr += 1;
|
||||
round += 1;
|
||||
}
|
||||
}
|
||||
@@ -203,7 +170,7 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
//products += " " + dependency.getProductEvidence().toString(productConf);
|
||||
products = addEvidenceWithoutDuplicateTerms(products, dependency.getProductEvidence(), productConf);
|
||||
} else {
|
||||
cnt += 1;
|
||||
ctr += 1;
|
||||
round += 1;
|
||||
}
|
||||
}
|
||||
@@ -215,7 +182,7 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (!found && (++cnt) < 9);
|
||||
} while (!found && (++ctr) < 9);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -245,11 +212,7 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
value = value.substring(8).replaceAll("\\.", " ");
|
||||
}
|
||||
if (sb.indexOf(value) < 0) {
|
||||
// if (value.length() > 200) {
|
||||
// sb.append(value.substring(0, 200)).append(' ');
|
||||
// } else {
|
||||
sb.append(value).append(' ');
|
||||
// }
|
||||
}
|
||||
}
|
||||
return sb.toString();
|
||||
@@ -270,23 +233,6 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Searches the Lucene CPE index to identify possible CPE entries associated
|
||||
* with the supplied vendor, product, and version.
|
||||
*
|
||||
* @param vendor the text used to search the vendor field.
|
||||
* @param product the text used to search the product field.
|
||||
* @param version the text used to search the version field.
|
||||
* @return a list of possible CPE values.
|
||||
* @throws CorruptIndexException when the Lucene index is corrupt.
|
||||
* @throws IOException when the Lucene index is not found.
|
||||
* @throws ParseException when the generated query is not valid.
|
||||
*/
|
||||
protected List<Entry> searchCPE(String vendor, String product, String version)
|
||||
throws CorruptIndexException, IOException, ParseException {
|
||||
return searchCPE(vendor, product, version, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Searches the Lucene CPE index to identify possible CPE entries
|
||||
* associated with the supplied vendor, product, and version.</p>
|
||||
@@ -315,10 +261,9 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
if (searchString == null) {
|
||||
return ret;
|
||||
}
|
||||
Query query = queryParser.parse(searchString);
|
||||
TopDocs docs = indexSearcher.search(query, MAX_QUERY_RESULTS);
|
||||
TopDocs docs = cpe.search(searchString, MAX_QUERY_RESULTS);
|
||||
for (ScoreDoc d : docs.scoreDocs) {
|
||||
Document doc = indexSearcher.doc(d.doc);
|
||||
Document doc = cpe.getDocument(d.doc);
|
||||
Entry entry = Entry.parse(doc);
|
||||
entry.setSearchScore(d.score);
|
||||
if (!ret.contains(entry)) {
|
||||
@@ -356,11 +301,11 @@ public class CPEAnalyzer implements org.codesecure.dependencycheck.analyzer.Anal
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!appendWeightedSearch(sb, Fields.PRODUCT, product.toLowerCase(), produdctWeightings)) {
|
||||
if (!appendWeightedSearch(sb, Fields.PRODUCT, product, produdctWeightings)) {
|
||||
return null;
|
||||
}
|
||||
sb.append(" AND ");
|
||||
if (!appendWeightedSearch(sb, Fields.VENDOR, vendor.toLowerCase(), vendorWeighting)) {
|
||||
if (!appendWeightedSearch(sb, Fields.VENDOR, vendor, vendorWeighting)) {
|
||||
return null;
|
||||
}
|
||||
sb.append(" AND ");
|
||||
|
||||
@@ -45,21 +45,16 @@ public class Entry {
|
||||
public static Entry parse(Document doc) {
|
||||
Entry entry = new Entry();
|
||||
try {
|
||||
entry.setName(doc.get(Fields.NAME));
|
||||
entry.setTitle(doc.get(Fields.TITLE));
|
||||
entry.parseName(doc.get(Fields.NAME));
|
||||
} catch (UnsupportedEncodingException ex) {
|
||||
Logger.getLogger(Entry.class.getName()).log(Level.SEVERE, null, ex);
|
||||
entry.name = doc.get(Fields.NAME);
|
||||
}
|
||||
// entry.vendor = doc.get(Fields.VENDOR);
|
||||
// entry.version = doc.get(Fields.VERSION);
|
||||
// //entry.revision = doc.get(Fields.REVISION);
|
||||
// entry.product = doc.get(Fields.TITLE);
|
||||
// entry.nvdId = doc.get(Fields.NVDID);
|
||||
return entry;
|
||||
}
|
||||
/**
|
||||
* The title of the CPE
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
protected String title;
|
||||
|
||||
@@ -67,6 +62,7 @@ public class Entry {
|
||||
* Get the value of title
|
||||
*
|
||||
* @return the value of title
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
public String getTitle() {
|
||||
return title;
|
||||
@@ -76,6 +72,7 @@ public class Entry {
|
||||
* Set the value of title
|
||||
*
|
||||
* @param title new value of title
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
public void setTitle(String title) {
|
||||
this.title = title;
|
||||
@@ -95,18 +92,16 @@ public class Entry {
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of name and calls parseName to obtain the
|
||||
* vendor:product:version:revision
|
||||
* Set the value of name
|
||||
*
|
||||
* @param name new value of name
|
||||
* @throws UnsupportedEncodingException should never be thrown...
|
||||
*/
|
||||
public void setName(String name) throws UnsupportedEncodingException {
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
parseName();
|
||||
}
|
||||
/**
|
||||
* The status of the CPE Entry.
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
protected String status;
|
||||
|
||||
@@ -114,6 +109,7 @@ public class Entry {
|
||||
* Get the value of status
|
||||
*
|
||||
* @return the value of status
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
public String getStatus() {
|
||||
return status;
|
||||
@@ -123,31 +119,35 @@ public class Entry {
|
||||
* Set the value of status
|
||||
*
|
||||
* @param status new value of status
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
public void setStatus(String status) {
|
||||
this.status = status;
|
||||
}
|
||||
/**
|
||||
* The modification date of the CPE Entry.
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
protected Date modificationDate;
|
||||
private Date modificationDate;
|
||||
|
||||
/**
|
||||
* Get the value of modificationDate
|
||||
*
|
||||
* @return the value of modificationDate
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
public Date getModificationDate() {
|
||||
return modificationDate;
|
||||
return (Date) modificationDate.clone();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of modificationDate
|
||||
*
|
||||
* @param modificationDate new value of modificationDate
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
public void setModificationDate(Date modificationDate) {
|
||||
this.modificationDate = modificationDate;
|
||||
this.modificationDate = (Date) modificationDate.clone();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -157,6 +157,7 @@ public class Entry {
|
||||
*
|
||||
* @param modificationDate new value of modificationDate
|
||||
* @throws ParseException is thrown when a parse exception occurs.
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
public void setModificationDate(String modificationDate) throws ParseException {
|
||||
|
||||
@@ -170,6 +171,7 @@ public class Entry {
|
||||
}
|
||||
/**
|
||||
* The nvdId.
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
protected String nvdId;
|
||||
|
||||
@@ -177,6 +179,7 @@ public class Entry {
|
||||
* Get the value of nvdId
|
||||
*
|
||||
* @return the value of nvdId
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
public String getNvdId() {
|
||||
return nvdId;
|
||||
@@ -186,6 +189,7 @@ public class Entry {
|
||||
* Set the value of nvdId
|
||||
*
|
||||
* @param nvdId new value of nvdId
|
||||
* @deprecated This field is no longer used
|
||||
*/
|
||||
public void setNvdId(String nvdId) {
|
||||
this.nvdId = nvdId;
|
||||
@@ -310,15 +314,17 @@ public class Entry {
|
||||
* <p>Results in:</p> <ul> <li>Vendor: apache</li> <li>Product: struts</li>
|
||||
* <li>Version: 1.1</li> <li>Revision: rc2</li> </ul>
|
||||
*
|
||||
* @param cpeName the cpe name
|
||||
* @throws UnsupportedEncodingException should never be thrown...
|
||||
*/
|
||||
private void parseName() throws UnsupportedEncodingException {
|
||||
if (name != null && name.length() > 7) {
|
||||
String[] data = name.substring(7).split(":");
|
||||
public void parseName(String cpeName) throws UnsupportedEncodingException {
|
||||
this.name = cpeName;
|
||||
if (cpeName != null && cpeName.length() > 7) {
|
||||
String[] data = cpeName.substring(7).split(":");
|
||||
if (data.length >= 1) {
|
||||
vendor = URLDecoder.decode(data[0], "UTF-8");
|
||||
vendor = URLDecoder.decode(data[0], "UTF-8").replaceAll("[_-]", " ");
|
||||
if (data.length >= 2) {
|
||||
product = URLDecoder.decode(data[1], "UTF-8");
|
||||
product = URLDecoder.decode(data[1], "UTF-8").replaceAll("[_-]", " ");
|
||||
if (data.length >= 3) {
|
||||
version = URLDecoder.decode(data[2], "UTF-8");
|
||||
if (data.length >= 4) {
|
||||
|
||||
@@ -34,22 +34,13 @@ public abstract class Fields {
|
||||
* The key for the vendor field.
|
||||
*/
|
||||
public static final String VENDOR = "vendor";
|
||||
/**
|
||||
* The key for the version field.
|
||||
*/
|
||||
public static final String VERSION = "version";
|
||||
//public static final String REVISION = "revision";
|
||||
/**
|
||||
* The key for the product field.
|
||||
*/
|
||||
public static final String PRODUCT = "product";
|
||||
/**
|
||||
* The key for the title field. This is a field combining vendor, product,
|
||||
* and version.
|
||||
* The key for the version field.
|
||||
*/
|
||||
public static final String TITLE = "title";
|
||||
/**
|
||||
* The key for the nvdId field.
|
||||
*/
|
||||
public static final String NVDID = "nvdid";
|
||||
public static final String VERSION = "version";
|
||||
//public static final String REVISION = "revision";
|
||||
}
|
||||
|
||||
@@ -19,54 +19,28 @@ package org.codesecure.dependencycheck.data.cpe;
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.KeywordAnalyzer;
|
||||
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.codesecure.dependencycheck.data.lucene.AbstractIndex;
|
||||
import org.codesecure.dependencycheck.data.CachedWebDataSource;
|
||||
import org.codesecure.dependencycheck.data.UpdateException;
|
||||
import org.codesecure.dependencycheck.utils.Downloader;
|
||||
import org.codesecure.dependencycheck.utils.Settings;
|
||||
import org.codesecure.dependencycheck.data.cpe.xml.Importer;
|
||||
import org.codesecure.dependencycheck.utils.DownloadFailedException;
|
||||
import org.xml.sax.SAXException;
|
||||
import org.codesecure.dependencycheck.data.lucene.FieldAnalyzer;
|
||||
import org.codesecure.dependencycheck.data.lucene.SearchFieldAnalyzer;
|
||||
|
||||
/**
|
||||
* The Index class is used to utilize and maintain the CPE Index.
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
|
||||
/**
|
||||
* The name of the properties file containing the timestamp of the last
|
||||
* update.
|
||||
*/
|
||||
private static final String UPDATE_PROPERTIES_FILE = "lastupdated.prop";
|
||||
/**
|
||||
* The properties file key for the last updated field.
|
||||
*/
|
||||
private static final String LAST_UPDATED = "lastupdated";
|
||||
public class Index extends AbstractIndex {
|
||||
|
||||
/**
|
||||
* Returns the directory that holds the CPE Index.
|
||||
@@ -88,7 +62,7 @@ public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
* @return the data directory for this index.
|
||||
* @throws IOException is thrown if an IOException occurs of course...
|
||||
*/
|
||||
protected File getDataDirectory() throws IOException {
|
||||
public File getDataDirectory() throws IOException {
|
||||
String fileName = Settings.getString(Settings.KEYS.CPE_INDEX);
|
||||
String filePath = Index.class.getProtectionDomain().getCodeSource().getLocation().getPath();
|
||||
String decodedPath = URLDecoder.decode(filePath, "UTF-8");
|
||||
@@ -100,6 +74,11 @@ public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
}
|
||||
File path = new File(exePath.getCanonicalFile() + File.separator + fileName);
|
||||
path = new File(path.getCanonicalPath());
|
||||
if (!path.exists()) {
|
||||
if (!path.mkdirs()) {
|
||||
throw new IOException("Unable to create CPE Data directory");
|
||||
}
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
@@ -109,212 +88,59 @@ public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
* @return the CPE Analyzer.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Analyzer createAnalyzer() {
|
||||
public Analyzer createIndexingAnalyzer() {
|
||||
Map fieldAnalyzers = new HashMap();
|
||||
|
||||
fieldAnalyzers.put(Fields.VERSION, new KeywordAnalyzer());
|
||||
fieldAnalyzers.put(Fields.NAME, new KeywordAnalyzer());
|
||||
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(
|
||||
new StandardAnalyzer(Version.LUCENE_35), fieldAnalyzers);
|
||||
new FieldAnalyzer(Version.LUCENE_40), fieldAnalyzers);
|
||||
|
||||
return wrapper;
|
||||
}
|
||||
private SearchFieldAnalyzer productSearchFieldAnalyzer = null;
|
||||
private SearchFieldAnalyzer vendorSearchFieldAnalyzer = null;
|
||||
|
||||
/**
|
||||
* Creates an Analyzer for searching the CPE Index.
|
||||
*
|
||||
* @return the CPE Analyzer.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Analyzer createSearchingAnalyzer() {
|
||||
Map fieldAnalyzers = new HashMap();
|
||||
|
||||
fieldAnalyzers.put(Fields.VERSION, new KeywordAnalyzer());
|
||||
fieldAnalyzers.put(Fields.NAME, new KeywordAnalyzer());
|
||||
productSearchFieldAnalyzer = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
vendorSearchFieldAnalyzer = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
fieldAnalyzers.put(Fields.PRODUCT, productSearchFieldAnalyzer);
|
||||
fieldAnalyzers.put(Fields.VENDOR, vendorSearchFieldAnalyzer);
|
||||
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(
|
||||
new FieldAnalyzer(Version.LUCENE_40), fieldAnalyzers);
|
||||
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads the latest CPE XML file from the web and imports it into the
|
||||
* current CPE Index.
|
||||
*
|
||||
* @throws UpdateException is thrown if there is a problem updating the
|
||||
* index.
|
||||
* Creates the Lucene QueryParser used when querying the index
|
||||
* @return a QueryParser.
|
||||
*/
|
||||
public void update() throws UpdateException {
|
||||
try {
|
||||
long timeStamp = updateNeeded();
|
||||
if (timeStamp > 0) {
|
||||
URL url = new URL(Settings.getString(Settings.KEYS.CPE_URL));
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Updating CPE :" + url.toString());
|
||||
File outputPath = null;
|
||||
try {
|
||||
outputPath = File.createTempFile("cpe", ".xml");
|
||||
Downloader.fetchFile(url, outputPath, true);
|
||||
Importer.importXML(outputPath.toString());
|
||||
writeLastUpdatedPropertyFile(timeStamp);
|
||||
} catch (ParserConfigurationException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (SAXException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (IOException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} finally {
|
||||
try {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.delete();
|
||||
}
|
||||
} finally {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (MalformedURLException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (DownloadFailedException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
}
|
||||
public QueryParser createQueryParser() {
|
||||
return new QueryParser(Version.LUCENE_40, Fields.NAME, getSearchingAnalyzer());
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a properties file containing the last updated date to the CPE
|
||||
* directory.
|
||||
*
|
||||
* @param timeStamp the timestamp to write.
|
||||
* Resets the searching analyzers
|
||||
*/
|
||||
private void writeLastUpdatedPropertyFile(long timeStamp) throws UpdateException {
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate the last updated properties file.", ex);
|
||||
protected void resetSearchingAnalyzer() {
|
||||
if (productSearchFieldAnalyzer != null) {
|
||||
productSearchFieldAnalyzer.clear();
|
||||
}
|
||||
File cpeProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
Properties prop = new Properties();
|
||||
prop.put(Index.LAST_UPDATED, String.valueOf(timeStamp));
|
||||
OutputStream os = null;
|
||||
try {
|
||||
os = new FileOutputStream(cpeProp);
|
||||
OutputStreamWriter out = new OutputStreamWriter(os);
|
||||
prop.store(out, dir);
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
try {
|
||||
os.flush();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
try {
|
||||
os.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
if (vendorSearchFieldAnalyzer != null) {
|
||||
vendorSearchFieldAnalyzer.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the index needs to be updated. This is done by fetching the
|
||||
* cpe.meta data and checking the lastModifiedDate. If the CPE data needs to
|
||||
* be refreshed this method will return the timestamp of the new CPE. If an
|
||||
* update is not required this function will return 0.
|
||||
*
|
||||
* @return the timestamp of the currently published CPE.xml if the index
|
||||
* needs to be updated, otherwise returns 0..
|
||||
* @throws MalformedURLException is thrown if the URL for the CPE Meta data
|
||||
* is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error
|
||||
* downloading the cpe.meta data file.
|
||||
* @throws UpdateException is thrown if there is an error locating the last updated
|
||||
* properties file.
|
||||
*/
|
||||
public long updateNeeded() throws MalformedURLException, DownloadFailedException, UpdateException {
|
||||
long retVal = 0;
|
||||
long lastUpdated = 0;
|
||||
long currentlyPublishedDate = retrieveCurrentCPETimestampFromWeb();
|
||||
if (currentlyPublishedDate == 0) {
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from cpe.meta file");
|
||||
}
|
||||
|
||||
//String dir = Settings.getString(Settings.KEYS.CPE_INDEX);
|
||||
File f;
|
||||
try {
|
||||
f = getDataDirectory(); //new File(dir);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
if (!f.exists()) {
|
||||
retVal = currentlyPublishedDate;
|
||||
} else {
|
||||
File cpeProp;
|
||||
try {
|
||||
cpeProp = new File(f.getCanonicalPath() + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to find last updated properties file.", ex);
|
||||
}
|
||||
if (!cpeProp.exists()) {
|
||||
retVal = currentlyPublishedDate;
|
||||
} else {
|
||||
Properties prop = new Properties();
|
||||
InputStream is = null;
|
||||
try {
|
||||
is = new FileInputStream(cpeProp);
|
||||
prop.load(is);
|
||||
lastUpdated = Long.parseLong(prop.getProperty(Index.LAST_UPDATED));
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
}
|
||||
if (currentlyPublishedDate > lastUpdated) {
|
||||
retVal = currentlyPublishedDate;
|
||||
}
|
||||
}
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the timestamp from the CPE meta data file.
|
||||
*
|
||||
* @return the timestamp from the currently published cpe.meta.
|
||||
* @throws MalformedURLException is thrown if the URL for the CPE Meta data
|
||||
* is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error
|
||||
* downloading the cpe.meta data file.
|
||||
*/
|
||||
private long retrieveCurrentCPETimestampFromWeb() throws MalformedURLException, DownloadFailedException {
|
||||
long timestamp = 0;
|
||||
File tmp = null;
|
||||
InputStream is = null;
|
||||
try {
|
||||
tmp = File.createTempFile("cpe", "meta");
|
||||
URL url = new URL(Settings.getString(Settings.KEYS.CPE_META_URL));
|
||||
Downloader.fetchFile(url, tmp);
|
||||
Properties prop = new Properties();
|
||||
is = new FileInputStream(tmp);
|
||||
prop.load(is);
|
||||
timestamp = Long.parseLong(prop.getProperty("lastModifiedDate"));
|
||||
} catch (IOException ex) {
|
||||
throw new DownloadFailedException("Unable to create temporary file for CPE Meta File download.", ex);
|
||||
} finally {
|
||||
try {
|
||||
if (is != null) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
}
|
||||
}
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.delete();
|
||||
}
|
||||
} finally {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
return timestamp;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ public class CPEHandler extends DefaultHandler {
|
||||
skip = (temp != null && temp.equals("true"));
|
||||
try {
|
||||
if (!skip && name.startsWith("cpe:/a:")) {
|
||||
entry.setName(name);
|
||||
entry.parseName(name);
|
||||
} else {
|
||||
skip = true;
|
||||
}
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
package org.codesecure.dependencycheck.data.cpe.xml;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.parsers.SAXParser;
|
||||
import javax.xml.parsers.SAXParserFactory;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
/**
|
||||
* Imports a CPE XML file into the Lucene CPE Index.
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class Importer {
|
||||
|
||||
/**
|
||||
* Private constructor for utility class.
|
||||
*/
|
||||
private Importer() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports the CPE XML File into the Lucene Index.
|
||||
*
|
||||
* @param file containing the path to the CPE XML file.
|
||||
* @throws ParserConfigurationException is thrown if the parser is
|
||||
* misconfigured.
|
||||
* @throws SAXException is thrown when there is a SAXException.
|
||||
* @throws IOException is thrown when there is an IOException.
|
||||
* @throws CorruptIndexException is thrown when the Lucene index is corrupt.
|
||||
*/
|
||||
public static void importXML(File file) throws CorruptIndexException, ParserConfigurationException, IOException, SAXException {
|
||||
SAXParserFactory factory = SAXParserFactory.newInstance();
|
||||
SAXParser saxParser = factory.newSAXParser();
|
||||
CPEHandler handler = new CPEHandler();
|
||||
Indexer indexer = new Indexer();
|
||||
indexer.openIndexWriter();
|
||||
handler.registerSaveDelegate(indexer);
|
||||
try {
|
||||
saxParser.parse(file, handler);
|
||||
} catch (SAXException ex) {
|
||||
Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
indexer.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports the CPE XML File into the Lucene Index.
|
||||
*
|
||||
* @param path the path to the CPE XML file.
|
||||
* @throws ParserConfigurationException is thrown if the parser is
|
||||
* misconfigured.
|
||||
* @throws SAXException is thrown when there is a SAXException.
|
||||
* @throws IOException is thrown when there is an IOException.
|
||||
*/
|
||||
public static void importXML(String path) throws ParserConfigurationException, SAXException, IOException {
|
||||
File f = new File(path);
|
||||
if (!f.exists()) {
|
||||
f.mkdirs();
|
||||
}
|
||||
Importer.importXML(f);
|
||||
}
|
||||
}
|
||||
@@ -21,13 +21,13 @@ package org.codesecure.dependencycheck.data.cpe.xml;
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.codesecure.dependencycheck.data.cpe.Entry;
|
||||
import org.codesecure.dependencycheck.data.cpe.Fields;
|
||||
import org.codesecure.dependencycheck.data.cpe.Index;
|
||||
import org.codesecure.dependencycheck.data.lucene.LuceneUtils;
|
||||
|
||||
/**
|
||||
* The Indexer is used to convert a CPE Entry, retrieved from the CPE XML file,
|
||||
@@ -46,7 +46,8 @@ public class Indexer extends Index implements EntrySaveDelegate {
|
||||
*/
|
||||
public void saveEntry(Entry entry) throws CorruptIndexException, IOException {
|
||||
Document doc = convertEntryToDoc(entry);
|
||||
Term term = new Term(Fields.NVDID, LuceneUtils.escapeLuceneQuery(entry.getNvdId()));
|
||||
//Term term = new Term(Fields.NVDID, LuceneUtils.escapeLuceneQuery(entry.getNvdId()));
|
||||
Term term = new Term(Fields.NAME, entry.getName());
|
||||
indexWriter.updateDocument(term, doc);
|
||||
}
|
||||
|
||||
@@ -59,44 +60,30 @@ public class Indexer extends Index implements EntrySaveDelegate {
|
||||
protected Document convertEntryToDoc(Entry entry) {
|
||||
Document doc = new Document();
|
||||
|
||||
Field name = new Field(Fields.NAME, entry.getName(), Field.Store.YES, Field.Index.ANALYZED);
|
||||
name.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
Field name = new StoredField(Fields.NAME, entry.getName());
|
||||
doc.add(name);
|
||||
|
||||
Field nvdId = new Field(Fields.NVDID, entry.getNvdId(), Field.Store.NO, Field.Index.ANALYZED);
|
||||
nvdId.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
doc.add(nvdId);
|
||||
|
||||
Field vendor = new Field(Fields.VENDOR, entry.getVendor(), Field.Store.NO, Field.Index.ANALYZED);
|
||||
vendor.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
Field vendor = new TextField(Fields.VENDOR, entry.getVendor(), Field.Store.NO);
|
||||
vendor.setBoost(5.0F);
|
||||
doc.add(vendor);
|
||||
|
||||
Field product = new Field(Fields.PRODUCT, entry.getProduct(), Field.Store.NO, Field.Index.ANALYZED);
|
||||
product.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
Field product = new TextField(Fields.PRODUCT, entry.getProduct(), Field.Store.NO);
|
||||
product.setBoost(5.0F);
|
||||
doc.add(product);
|
||||
|
||||
Field title = new Field(Fields.TITLE, entry.getTitle(), Field.Store.YES, Field.Index.ANALYZED);
|
||||
title.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
//title.setBoost(1.0F);
|
||||
doc.add(title);
|
||||
|
||||
//TODO revision should likely be its own field
|
||||
if (entry.getVersion() != null) {
|
||||
Field version = null;
|
||||
if (entry.getRevision() != null) {
|
||||
version = new Field(Fields.VERSION, entry.getVersion() + " "
|
||||
+ entry.getRevision(), Field.Store.NO, Field.Index.ANALYZED);
|
||||
version = new TextField(Fields.VERSION, entry.getVersion() + " "
|
||||
+ entry.getRevision(), Field.Store.NO);
|
||||
} else {
|
||||
version = new Field(Fields.VERSION, entry.getVersion(),
|
||||
Field.Store.NO, Field.Index.ANALYZED);
|
||||
version = new TextField(Fields.VERSION, entry.getVersion(),
|
||||
Field.Store.NO);
|
||||
}
|
||||
version.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
version.setBoost(0.8F);
|
||||
doc.add(version);
|
||||
}
|
||||
|
||||
return doc;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,11 +22,17 @@ import java.io.IOException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.Version;
|
||||
@@ -56,9 +62,17 @@ public abstract class AbstractIndex {
|
||||
*/
|
||||
private IndexSearcher indexSearcher = null;
|
||||
/**
|
||||
* The Lucene Analyzer.
|
||||
* The Lucene Analyzer used for Indexing.
|
||||
*/
|
||||
private Analyzer analyzer = null;
|
||||
private Analyzer indexingAnalyzer = null;
|
||||
/**
|
||||
* The Lucene Analyzer used for Searching
|
||||
*/
|
||||
private Analyzer searchingAnalyzer = null;
|
||||
/**
|
||||
* The Lucene QueryParser used for Searching
|
||||
*/
|
||||
private QueryParser queryParser = null;
|
||||
/**
|
||||
* Indicates whether or not the Lucene Index is open.
|
||||
*/
|
||||
@@ -71,7 +85,8 @@ public abstract class AbstractIndex {
|
||||
*/
|
||||
public void open() throws IOException {
|
||||
directory = this.getDirectory();
|
||||
analyzer = this.getAnalyzer(); //new StandardAnalyzer(Version.LUCENE_35);
|
||||
indexingAnalyzer = this.getIndexingAnalyzer();
|
||||
searchingAnalyzer = this.getSearchingAnalyzer();
|
||||
indexOpen = true;
|
||||
}
|
||||
|
||||
@@ -98,19 +113,19 @@ public abstract class AbstractIndex {
|
||||
}
|
||||
}
|
||||
if (indexSearcher != null) {
|
||||
try {
|
||||
indexSearcher.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(AbstractIndex.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
indexSearcher = null;
|
||||
}
|
||||
indexSearcher = null;
|
||||
}
|
||||
|
||||
if (analyzer != null) {
|
||||
analyzer.close();
|
||||
analyzer = null;
|
||||
if (indexingAnalyzer != null) {
|
||||
indexingAnalyzer.close();
|
||||
indexingAnalyzer = null;
|
||||
}
|
||||
|
||||
if (searchingAnalyzer != null) {
|
||||
searchingAnalyzer.close();
|
||||
searchingAnalyzer = null;
|
||||
}
|
||||
|
||||
try {
|
||||
directory.close();
|
||||
} catch (IOException ex) {
|
||||
@@ -140,7 +155,7 @@ public abstract class AbstractIndex {
|
||||
if (!isOpen()) {
|
||||
open();
|
||||
}
|
||||
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_35, analyzer);
|
||||
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_40, indexingAnalyzer);
|
||||
indexWriter = new IndexWriter(directory, conf);
|
||||
}
|
||||
|
||||
@@ -170,7 +185,8 @@ public abstract class AbstractIndex {
|
||||
if (!isOpen()) {
|
||||
open();
|
||||
}
|
||||
indexReader = IndexReader.open(directory, true);
|
||||
//indexReader = IndexReader.open(directory, true);
|
||||
indexReader = DirectoryReader.open(directory);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -180,7 +196,7 @@ public abstract class AbstractIndex {
|
||||
* @throws CorruptIndexException is thrown if the index is corrupt.
|
||||
* @throws IOException is thrown if there is an exception reading the index.
|
||||
*/
|
||||
public IndexSearcher getIndexSearcher() throws CorruptIndexException, IOException {
|
||||
protected IndexSearcher getIndexSearcher() throws CorruptIndexException, IOException {
|
||||
if (indexReader == null) {
|
||||
openIndexReader();
|
||||
}
|
||||
@@ -191,29 +207,116 @@ public abstract class AbstractIndex {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an Analyzer for the Lucene Index.
|
||||
* Returns an Analyzer to be used when indexing.
|
||||
*
|
||||
* @return an Analyzer.
|
||||
*/
|
||||
public Analyzer getAnalyzer() {
|
||||
if (analyzer == null) {
|
||||
analyzer = createAnalyzer();
|
||||
public Analyzer getIndexingAnalyzer() {
|
||||
if (indexingAnalyzer == null) {
|
||||
indexingAnalyzer = createIndexingAnalyzer();
|
||||
}
|
||||
return analyzer;
|
||||
return indexingAnalyzer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the directory that contains the Lucene Index.
|
||||
* Returns an analyzer used for searching the index
|
||||
* @return a lucene analyzer
|
||||
*/
|
||||
protected Analyzer getSearchingAnalyzer() {
|
||||
if (searchingAnalyzer == null) {
|
||||
searchingAnalyzer = createSearchingAnalyzer();
|
||||
}
|
||||
return searchingAnalyzer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a query parser
|
||||
* @return a query parser
|
||||
*/
|
||||
protected QueryParser getQueryParser() {
|
||||
if (queryParser == null) {
|
||||
queryParser = createQueryParser();
|
||||
}
|
||||
return queryParser;
|
||||
}
|
||||
|
||||
/**
|
||||
* Searches the index using the given search string
|
||||
* @param searchString the query text
|
||||
* @param maxQueryResults the maximum number of documents to return
|
||||
* @return the TopDocs found by the search
|
||||
* @throws ParseException thrown when the searchString is invalid
|
||||
* @throws IOException is thrown if there is an issue with the underlying Index
|
||||
*/
|
||||
public TopDocs search(String searchString, int maxQueryResults) throws ParseException, IOException {
|
||||
|
||||
QueryParser parser = getQueryParser();
|
||||
|
||||
Query query = parser.parse(searchString);
|
||||
|
||||
resetSearchingAnalyzer();
|
||||
|
||||
IndexSearcher is = getIndexSearcher();
|
||||
|
||||
TopDocs docs = is.search(query, maxQueryResults);
|
||||
|
||||
return docs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Searches the index using the given query
|
||||
* @param query the query used to search the index
|
||||
* @param maxQueryResults the max number of results to return
|
||||
* @return the TopDocs found be the query
|
||||
* @throws CorruptIndexException thrown if the Index is corrupt
|
||||
* @throws IOException thrown if there is an IOException
|
||||
*/
|
||||
public TopDocs search(Query query, int maxQueryResults) throws CorruptIndexException, IOException {
|
||||
IndexSearcher is = getIndexSearcher();
|
||||
return is.search(query, maxQueryResults);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a document from the Index
|
||||
* @param documentId the id of the document to retrieve
|
||||
* @return the Document
|
||||
* @throws IOException thrown if there is an IOException
|
||||
*/
|
||||
public Document getDocument(int documentId) throws IOException {
|
||||
IndexSearcher is = getIndexSearcher();
|
||||
return is.doc(documentId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the directory that contains the Lucene Index
|
||||
*
|
||||
* @return a Lucene Directory.
|
||||
* @throws IOException is thrown when an IOException occurs.
|
||||
* @return a Lucene Directory
|
||||
* @throws IOException is thrown when an IOException occurs
|
||||
*/
|
||||
public abstract Directory getDirectory() throws IOException;
|
||||
|
||||
/**
|
||||
* Creates the Lucene Analyzer used when indexing and searching the index.
|
||||
* Creates the Lucene Analyzer used when indexing
|
||||
*
|
||||
* @return a Lucene Analyzer.
|
||||
* @return a Lucene Analyzer
|
||||
*/
|
||||
public abstract Analyzer createAnalyzer();
|
||||
public abstract Analyzer createIndexingAnalyzer();
|
||||
|
||||
/**
|
||||
* Creates the Lucene Analyzer used when querying the index
|
||||
*
|
||||
* @return a Lucene Analyzer
|
||||
*/
|
||||
public abstract Analyzer createSearchingAnalyzer();
|
||||
|
||||
/**
|
||||
* Creates the Lucene QueryParser used when querying the index
|
||||
* @return a QueryParser
|
||||
*/
|
||||
public abstract QueryParser createQueryParser();
|
||||
|
||||
/**
|
||||
* Resets the searching analyzers
|
||||
*/
|
||||
protected abstract void resetSearchingAnalyzer();
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ package org.codesecure.dependencycheck.data.lucene;
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.search.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
|
||||
/**
|
||||
*
|
||||
@@ -41,7 +41,7 @@ public class DependencySimilarity extends DefaultSimilarity {
|
||||
* @return 1
|
||||
*/
|
||||
@Override
|
||||
public float idf(int docFreq, int numDocs) {
|
||||
public float idf(long docFreq, long numDocs) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
package org.codesecure.dependencycheck.data.lucene;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.Reader;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* <p>A Lucene Analyzer that utilizes the WhitespaceTokenizer, WordDelimiterFilter,
|
||||
* LowerCaseFilter, and StopFilter. The intended purpose of this Analyzer is
|
||||
* to index the CPE fields vendor and product.</p>
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class FieldAnalyzer extends Analyzer {
|
||||
|
||||
/**
|
||||
* The Lucene Version used
|
||||
*/
|
||||
private Version version = null;
|
||||
|
||||
/**
|
||||
* Creates a new FieldAnalyzer
|
||||
* @param version the Lucene version
|
||||
*/
|
||||
public FieldAnalyzer(Version version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the TokenStreamComponents
|
||||
*
|
||||
* @param fieldName the field name being analyzed
|
||||
* @param reader the reader containing the input
|
||||
* @return the TokenStreamComponents
|
||||
*/
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer source = new WhitespaceTokenizer(version, reader);
|
||||
|
||||
TokenStream stream = source;
|
||||
|
||||
stream = new WordDelimiterFilter(stream,
|
||||
WordDelimiterFilter.CATENATE_WORDS
|
||||
| WordDelimiterFilter.GENERATE_WORD_PARTS
|
||||
| WordDelimiterFilter.GENERATE_NUMBER_PARTS
|
||||
| WordDelimiterFilter.PRESERVE_ORIGINAL
|
||||
| WordDelimiterFilter.SPLIT_ON_CASE_CHANGE
|
||||
| WordDelimiterFilter.SPLIT_ON_NUMERICS
|
||||
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
|
||||
|
||||
stream = new LowerCaseFilter(version, stream);
|
||||
stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||
|
||||
return new TokenStreamComponents(source, stream);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
package org.codesecure.dependencycheck.data.lucene;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.Reader;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* A Lucene field analyzer used to analyzer queries against the CPE data.
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class SearchFieldAnalyzer extends Analyzer {
|
||||
|
||||
/**
|
||||
* The Lucene Version used
|
||||
*/
|
||||
private Version version = null;
|
||||
/**
|
||||
* A local reference to the TokenPairConcatenatingFilter so that we
|
||||
* can clear any left over state if this analyzer is re-used.
|
||||
*/
|
||||
private TokenPairConcatenatingFilter concatenatingFilter = null;
|
||||
|
||||
/**
|
||||
* Constructs a new SearchFieldAnalyzer
|
||||
* @param version the Lucene version
|
||||
*/
|
||||
public SearchFieldAnalyzer(Version version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a the TokenStreamComponents used to analyze the stream.
|
||||
* @param fieldName the field that this lucene analyzer will process
|
||||
* @param reader a reader containing the tokens
|
||||
* @return the token stream filter chain
|
||||
*/
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer source = new WhitespaceTokenizer(version, reader);
|
||||
|
||||
TokenStream stream = source;
|
||||
|
||||
stream = new WordDelimiterFilter(stream,
|
||||
WordDelimiterFilter.GENERATE_WORD_PARTS
|
||||
| WordDelimiterFilter.GENERATE_NUMBER_PARTS
|
||||
| WordDelimiterFilter.PRESERVE_ORIGINAL
|
||||
| WordDelimiterFilter.SPLIT_ON_CASE_CHANGE
|
||||
| WordDelimiterFilter.SPLIT_ON_NUMERICS
|
||||
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
|
||||
|
||||
stream = new LowerCaseFilter(version, stream);
|
||||
concatenatingFilter = new TokenPairConcatenatingFilter(stream);
|
||||
stream = concatenatingFilter;
|
||||
stream = new StopFilter(version, stream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||
|
||||
return new TokenStreamComponents(source, stream);
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Resets the analyzer and clears any internal state data that may
|
||||
* have been left-over from previous uses of the analyzer.</p>
|
||||
* <p><b>If this analyzer is re-used this method must be called between uses.</b></p>
|
||||
*/
|
||||
public void clear() {
|
||||
concatenatingFilter.clear();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
package org.codesecure.dependencycheck.data.lucene;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
|
||||
/**
|
||||
* <p>Takes a TokenStream and adds additional tokens by concatenating pairs of words.</p>
|
||||
* <p><b>Example:</b> "Spring Framework Core" -> "Spring SpringFramework Framework FrameworkCore Core".</p>
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public final class TokenPairConcatenatingFilter extends TokenFilter {
|
||||
|
||||
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
|
||||
private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
|
||||
private String previousWord = null;
|
||||
private LinkedList<String> words = null;
|
||||
|
||||
/**
|
||||
* Consructs a new TokenPairConcatenatingFilter
|
||||
* @param stream the TokenStream that this filter will process
|
||||
*/
|
||||
public TokenPairConcatenatingFilter(TokenStream stream) {
|
||||
super(stream);
|
||||
words = new LinkedList<String>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Increments the underlying TokenStream and sets CharTermAtttributes to
|
||||
* construct an expanded set of tokens by concatenting tokens with the
|
||||
* previous token.
|
||||
*
|
||||
* @return whether or not we have hit the end of the TokenStream
|
||||
* @throws IOException is thrown when an IOException occurs
|
||||
*/
|
||||
@Override
|
||||
public boolean incrementToken() throws IOException {
|
||||
|
||||
//collect all the terms into the words collaction
|
||||
while (input.incrementToken()) {
|
||||
String word = new String(termAtt.buffer(), 0, termAtt.length());
|
||||
words.add(word);
|
||||
}
|
||||
|
||||
//if we have a previousTerm - write it out as its own token concatonated
|
||||
// with the current word (if one is available).
|
||||
if (previousWord != null && words.size() > 0) {
|
||||
String word = words.getFirst();
|
||||
clearAttributes();
|
||||
termAtt.append(previousWord).append(word);
|
||||
posIncAtt.setPositionIncrement(0);
|
||||
previousWord = null;
|
||||
return true;
|
||||
}
|
||||
//if we have words, write it out as a single token
|
||||
if (words.size() > 0) {
|
||||
String word = words.removeFirst();
|
||||
clearAttributes();
|
||||
termAtt.append(word);
|
||||
previousWord = word;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Resets the Filter and clears any internal state data that may
|
||||
* have been left-over from previous uses of the Filter.</p>
|
||||
* <p><b>If this Filter is re-used this method must be called between uses.</b></p>
|
||||
*/
|
||||
public void clear() {
|
||||
previousWord = null;
|
||||
words.clear();
|
||||
}
|
||||
}
|
||||
@@ -19,27 +19,17 @@ package org.codesecure.dependencycheck.data.nvdcve;
|
||||
*/
|
||||
|
||||
import java.io.*;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.*;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.KeywordAnalyzer;
|
||||
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper;
|
||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.codesecure.dependencycheck.data.CachedWebDataSource;
|
||||
import org.codesecure.dependencycheck.data.UpdateException;
|
||||
import org.codesecure.dependencycheck.data.lucene.AbstractIndex;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.xml.Importer;
|
||||
import org.codesecure.dependencycheck.utils.DownloadFailedException;
|
||||
import org.codesecure.dependencycheck.utils.Downloader;
|
||||
import org.codesecure.dependencycheck.utils.Settings;
|
||||
|
||||
/**
|
||||
@@ -47,24 +37,7 @@ import org.codesecure.dependencycheck.utils.Settings;
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
|
||||
/**
|
||||
* The name of the properties file containing the timestamp of the last
|
||||
* update.
|
||||
*/
|
||||
private static final String UPDATE_PROPERTIES_FILE = "lastupdated.prop";
|
||||
/**
|
||||
* The properties file key for the last updated field - used to store the
|
||||
* last updated time of the Modified NVD CVE xml file.
|
||||
*/
|
||||
private static final String LAST_UPDATED_MODIFIED = "lastupdated.modified";
|
||||
/**
|
||||
* Stores the last updated time for each of the NVD CVE files. These
|
||||
* timestamps should be updated if we process the modified file within 7
|
||||
* days of the last update.
|
||||
*/
|
||||
private static final String LAST_UPDATED_BASE = "lastupdated.";
|
||||
public class Index extends AbstractIndex {
|
||||
|
||||
/**
|
||||
* Returns the directory that holds the NVD CVE Index. Note, this
|
||||
@@ -98,6 +71,11 @@ public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
}
|
||||
File path = new File(exePath.getCanonicalFile() + File.separator + fileName);
|
||||
path = new File(path.getCanonicalPath());
|
||||
if (!path.exists()) {
|
||||
if (!path.mkdirs()) {
|
||||
throw new IOException("Unable to create NVD CVE Data directory");
|
||||
}
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
@@ -107,449 +85,48 @@ public class Index extends AbstractIndex implements CachedWebDataSource {
|
||||
* @return the VULNERABLE_CPE Analyzer.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Analyzer createAnalyzer() {
|
||||
public Analyzer createIndexingAnalyzer() {
|
||||
Map fieldAnalyzers = new HashMap();
|
||||
|
||||
fieldAnalyzers.put(Fields.CVE_ID, new KeywordAnalyzer());
|
||||
fieldAnalyzers.put(Fields.VULNERABLE_CPE, new KeywordAnalyzer());
|
||||
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(
|
||||
new StandardAnalyzer(Version.LUCENE_35), fieldAnalyzers);
|
||||
new StandardAnalyzer(Version.LUCENE_40), fieldAnalyzers);
|
||||
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Downloads the latest NVD CVE XML file from the web and imports it into
|
||||
* the current CVE Index.</p>
|
||||
* Creates an Analyzer for the NVD VULNERABLE_CPE Index.
|
||||
*
|
||||
* @throws UpdateException is thrown if there is an error updating the index
|
||||
* @return the VULNERABLE_CPE Analyzer.
|
||||
*/
|
||||
public void update() throws UpdateException {
|
||||
try {
|
||||
Map<String, NvdCveUrl> update = updateNeeded();
|
||||
int maxUpdates = 0;
|
||||
for (NvdCveUrl cve : update.values()) {
|
||||
if (cve.getNeedsUpdate()) {
|
||||
maxUpdates += 1;
|
||||
}
|
||||
}
|
||||
if (maxUpdates > 3) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "NVD CVE requires several updates; this could take a couple of minutes.");
|
||||
}
|
||||
int count = 0;
|
||||
for (NvdCveUrl cve : update.values()) {
|
||||
if (cve.getNeedsUpdate()) {
|
||||
count += 1;
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Updating NVD CVE (" + count + " of " + maxUpdates + ")");
|
||||
URL url = new URL(cve.getUrl());
|
||||
File outputPath = null;
|
||||
try {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Downloading " + cve.getUrl());
|
||||
outputPath = File.createTempFile("cve" + cve.getId() + "_", ".xml");
|
||||
Downloader.fetchFile(url, outputPath, false);
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Processing " + cve.getUrl());
|
||||
Importer.importXML(outputPath.toString());
|
||||
Logger.getLogger(Index.class.getName()).log(Level.WARNING, "Completed updated " + count + " of " + maxUpdates);
|
||||
} catch (FileNotFoundException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (IOException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} finally {
|
||||
try {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.delete();
|
||||
}
|
||||
} finally {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (maxUpdates >= 1) {
|
||||
writeLastUpdatedPropertyFile(update);
|
||||
}
|
||||
} catch (MalformedURLException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (DownloadFailedException ex) {
|
||||
//Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
}
|
||||
@SuppressWarnings("unchecked")
|
||||
public Analyzer createSearchingAnalyzer() {
|
||||
Map fieldAnalyzers = new HashMap();
|
||||
|
||||
fieldAnalyzers.put(Fields.CVE_ID, new KeywordAnalyzer());
|
||||
fieldAnalyzers.put(Fields.VULNERABLE_CPE, new KeywordAnalyzer());
|
||||
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(
|
||||
new StandardAnalyzer(Version.LUCENE_40), fieldAnalyzers);
|
||||
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a properties file containing the last updated date to the
|
||||
* VULNERABLE_CPE directory.
|
||||
*
|
||||
* @param timeStamp the timestamp to write.
|
||||
* Creates the Lucene QueryParser used when querying the index
|
||||
* @return a QueryParser
|
||||
*/
|
||||
private void writeLastUpdatedPropertyFile(Map<String, NvdCveUrl> updated) throws UpdateException {
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
File cveProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
Properties prop = new Properties();
|
||||
|
||||
for (NvdCveUrl cve : updated.values()) {
|
||||
prop.put(LAST_UPDATED_BASE + cve.id, String.valueOf(cve.getTimestamp()));
|
||||
}
|
||||
|
||||
OutputStream os = null;
|
||||
try {
|
||||
os = new FileOutputStream(cveProp);
|
||||
OutputStreamWriter out = new OutputStreamWriter(os);
|
||||
prop.store(out, dir);
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to find last updated properties file.", ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to update last updated properties file.", ex);
|
||||
} finally {
|
||||
try {
|
||||
os.flush();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
try {
|
||||
os.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
public QueryParser createQueryParser() {
|
||||
return new QueryParser(Version.LUCENE_40, Fields.VULNERABLE_CPE, getSearchingAnalyzer());
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the index needs to be updated. This is done by fetching the
|
||||
* nvd cve meta data and checking the last update date. If the data needs to
|
||||
* be refreshed this method will return the NvdCveUrl for the files that
|
||||
* need to be updated.
|
||||
*
|
||||
* @return the NvdCveUrl of the files that need to be updated.
|
||||
* @throws MalformedURLException is thrown if the URL for the NVD CVE Meta
|
||||
* data is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error.
|
||||
* downloading the nvd cve download data file.
|
||||
* @throws UpdateException Is thrown if there is an issue with the last updated properties file.
|
||||
* Resets the searching analyzers
|
||||
*/
|
||||
public Map<String, NvdCveUrl> updateNeeded() throws MalformedURLException, DownloadFailedException, UpdateException {
|
||||
|
||||
Map<String, NvdCveUrl> currentlyPublished;
|
||||
try {
|
||||
currentlyPublished = retrieveCurrentTimestampsFromWeb();
|
||||
} catch (InvalidDataException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from nvd cve downloads page", ex);
|
||||
}
|
||||
if (currentlyPublished == null) {
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from nvd cve downloads page");
|
||||
}
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
|
||||
File f = new File(dir);
|
||||
if (f.exists()) {
|
||||
File cveProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
if (cveProp.exists()) {
|
||||
Properties prop = new Properties();
|
||||
InputStream is;
|
||||
try {
|
||||
is = new FileInputStream(cveProp);
|
||||
prop.load(is);
|
||||
long lastUpdated = Long.parseLong(prop.getProperty(Index.LAST_UPDATED_MODIFIED));
|
||||
Date now = new Date();
|
||||
int days = Settings.getInt(Settings.KEYS.CVE_MODIFIED_VALID_FOR_DAYS);
|
||||
int maxEntries = Settings.getInt(Settings.KEYS.CVE_URL_COUNT);
|
||||
if (lastUpdated == currentlyPublished.get("modified").timestamp) {
|
||||
currentlyPublished.clear(); //we don't need to update anything.
|
||||
} else if (withinRange(lastUpdated, now.getTime(), days)) {
|
||||
currentlyPublished.get("modified").setNeedsUpdate(true);
|
||||
for (int i = 1; i <= maxEntries; i++) {
|
||||
currentlyPublished.get(String.valueOf(i)).setNeedsUpdate(false);
|
||||
}
|
||||
} else { //we figure out which of the several XML files need to be downloaded.
|
||||
currentlyPublished.get("modified").setNeedsUpdate(false);
|
||||
for (int i = 1; i <= maxEntries; i++) {
|
||||
NvdCveUrl cve = currentlyPublished.get(String.valueOf(i));
|
||||
long currentTimestamp = 0;
|
||||
try {
|
||||
currentTimestamp = Long.parseLong(prop.getProperty(LAST_UPDATED_BASE + String.valueOf(i), "0"));
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, "Error parsing " + LAST_UPDATED_BASE
|
||||
+ String.valueOf(i) + " from nvdcve.lastupdated", ex);
|
||||
}
|
||||
if (currentTimestamp == cve.getTimestamp()) {
|
||||
cve.setNeedsUpdate(false); //they default to true.
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(Index.class.getName()).log(Level.FINEST, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
return currentlyPublished;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the epoch date is within the range specified of the
|
||||
* compareTo epoch time. This takes the (compareTo-date)/1000/60/60/24 to
|
||||
* get the number of days. If the calculated days is less then the range the
|
||||
* date is considered valid.
|
||||
*
|
||||
* @param date the date to be checked.
|
||||
* @param compareTo the date to compare to.
|
||||
* @param range the range in days to be considered valid.
|
||||
* @return whether or not the date is within the range.
|
||||
*/
|
||||
private boolean withinRange(long date, long compareTo, int range) {
|
||||
double differenceInDays = (compareTo - date) / 1000 / 60 / 60 / 24;
|
||||
return differenceInDays < range;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the timestamps from the NVD CVE meta data file.
|
||||
*
|
||||
* @return the timestamp from the currently published nvdcve downloads page
|
||||
* @throws MalformedURLException is thrown if the URL for the NVD CCE Meta
|
||||
* data is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error
|
||||
* downloading the nvd cve meta data file
|
||||
* @throws InvalidDataException is thrown if there is an exception parsing
|
||||
* the timestamps
|
||||
*/
|
||||
protected Map<String, NvdCveUrl> retrieveCurrentTimestampsFromWeb() throws MalformedURLException, DownloadFailedException, InvalidDataException {
|
||||
Map<String, NvdCveUrl> map = new HashMap<String, NvdCveUrl>();
|
||||
|
||||
File tmp = null;
|
||||
try {
|
||||
tmp = File.createTempFile("cve", "meta");
|
||||
URL url = new URL(Settings.getString(Settings.KEYS.CVE_META_URL));
|
||||
Downloader.fetchFile(url, tmp);
|
||||
String html = readFile(tmp);
|
||||
|
||||
String retrieveUrl = Settings.getString(Settings.KEYS.CVE_MODIFIED_URL);
|
||||
NvdCveUrl cve = createNvdCveUrl("modified", retrieveUrl, html);
|
||||
cve.setNeedsUpdate(false); //the others default to true, to make life easier later this should default to false.
|
||||
map.put("modified", cve);
|
||||
int max = Settings.getInt(Settings.KEYS.CVE_URL_COUNT);
|
||||
for (int i = 1; i <= max; i++) {
|
||||
retrieveUrl = Settings.getString(Settings.KEYS.CVE_BASE_URL + i);
|
||||
String key = Integer.toString(i);
|
||||
cve = createNvdCveUrl(key, retrieveUrl, html);
|
||||
map.put(key, cve);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new DownloadFailedException("Unable to create temporary file for NVD CVE Meta File download.", ex);
|
||||
} finally {
|
||||
try {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.delete();
|
||||
}
|
||||
} finally {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new NvdCveUrl object from the provide id, url, and text/html
|
||||
* from the NVD CVE downloads page.
|
||||
*
|
||||
* @param id the name of this NVD CVE Url
|
||||
* @param retrieveUrl the URL to download the file from
|
||||
* @param text a bit of HTML from the NVD CVE downloads page that contains
|
||||
* the URL and the last updated timestamp.
|
||||
* @return a shiny new NvdCveUrl object.
|
||||
* @throws InvalidDataException is thrown if the timestamp could not be
|
||||
* extracted from the provided text.
|
||||
*/
|
||||
private NvdCveUrl createNvdCveUrl(String id, String retrieveUrl, String text) throws InvalidDataException {
|
||||
Pattern pattern = Pattern.compile(Pattern.quote(retrieveUrl) + ".+?\\<br");
|
||||
Matcher m = pattern.matcher(text);
|
||||
NvdCveUrl item = new NvdCveUrl();
|
||||
item.id = id;
|
||||
item.url = retrieveUrl;
|
||||
if (m.find()) {
|
||||
String line = m.group();
|
||||
int pos = line.indexOf("Updated:");
|
||||
if (pos > 0) {
|
||||
pos += 9;
|
||||
try {
|
||||
String timestampstr = line.substring(pos, line.length() - 3).replace("at ", "");
|
||||
long timestamp = getEpochTimeFromDateTime(timestampstr);
|
||||
item.setTimestamp(timestamp);
|
||||
} catch (NumberFormatException ex) {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain a valid timestamp for '" + retrieveUrl + "'.", ex);
|
||||
}
|
||||
} else {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain the updated timestamp for '" + retrieveUrl + "'.");
|
||||
}
|
||||
} else {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain the url for '" + retrieveUrl + "'.");
|
||||
}
|
||||
return item;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a timestamp in the format of "MM/dd/yy hh:mm" into a calendar
|
||||
* object and returns the epoch time. Note, this removes the millisecond
|
||||
* portion of the epoch time so all numbers returned should end in 000.
|
||||
*
|
||||
* @param timestamp a string in the format of "MM/dd/yy hh:mm"
|
||||
* @return a Calendar object.
|
||||
* @throws NumberFormatException if the timestamp was parsed incorrectly.
|
||||
*/
|
||||
private long getEpochTimeFromDateTime(String timestamp) throws NumberFormatException {
|
||||
Calendar c = new GregorianCalendar();
|
||||
int month = Integer.parseInt(timestamp.substring(0, 2));
|
||||
int date = Integer.parseInt(timestamp.substring(3, 5));
|
||||
int year = 2000 + Integer.parseInt(timestamp.substring(6, 8));
|
||||
int hourOfDay = Integer.parseInt(timestamp.substring(9, 11));
|
||||
int minute = Integer.parseInt(timestamp.substring(12, 14));
|
||||
c.set(year, month, date, hourOfDay, minute, 0);
|
||||
long t = c.getTimeInMillis();
|
||||
t = (t / 1000) * 1000;
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a file into a string.
|
||||
*
|
||||
* @param file the file to be read.
|
||||
* @return the contents of the file.
|
||||
* @throws IOException is thrown if an IOExcpetion occurs.
|
||||
*/
|
||||
private String readFile(File file) throws IOException {
|
||||
FileReader stream = new FileReader(file);
|
||||
StringBuilder str = new StringBuilder((int) file.length());
|
||||
try {
|
||||
char[] buf = new char[8096];
|
||||
int read = stream.read(buf, 0, 8096);
|
||||
while (read > 0) {
|
||||
str.append(buf, 0, read);
|
||||
read = stream.read(buf, 0, 8096);
|
||||
}
|
||||
} finally {
|
||||
stream.close();
|
||||
}
|
||||
return str.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* A pojo that contains the Url and timestamp of the current NvdCve XML
|
||||
* files.
|
||||
*/
|
||||
protected class NvdCveUrl {
|
||||
|
||||
/**
|
||||
* an id.
|
||||
*/
|
||||
private String id;
|
||||
|
||||
/**
|
||||
* Get the value of id
|
||||
*
|
||||
* @return the value of id
|
||||
*/
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of id
|
||||
*
|
||||
* @param id new value of id
|
||||
*/
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
/**
|
||||
* a url.
|
||||
*/
|
||||
private String url;
|
||||
|
||||
/**
|
||||
* Get the value of url
|
||||
*
|
||||
* @return the value of url
|
||||
*/
|
||||
public String getUrl() {
|
||||
return url;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of url
|
||||
*
|
||||
* @param url new value of url
|
||||
*/
|
||||
public void setUrl(String url) {
|
||||
this.url = url;
|
||||
}
|
||||
/**
|
||||
* a timestamp - epoch time.
|
||||
*/
|
||||
private long timestamp;
|
||||
|
||||
/**
|
||||
* Get the value of timestamp - epoch time
|
||||
*
|
||||
* @return the value of timestamp - epoch time
|
||||
*/
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of timestamp - epoch time
|
||||
*
|
||||
* @param timestamp new value of timestamp - epoch time
|
||||
*/
|
||||
public void setTimestamp(long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
/**
|
||||
* indicates whether or not this item should be updated.
|
||||
*/
|
||||
private boolean needsUpdate = true;
|
||||
|
||||
/**
|
||||
* Get the value of needsUpdate
|
||||
*
|
||||
* @return the value of needsUpdate
|
||||
*/
|
||||
public boolean getNeedsUpdate() {
|
||||
return needsUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of needsUpdate
|
||||
*
|
||||
* @param needsUpdate new value of needsUpdate
|
||||
*/
|
||||
public void setNeedsUpdate(boolean needsUpdate) {
|
||||
this.needsUpdate = needsUpdate;
|
||||
}
|
||||
protected void resetSearchingAnalyzer() {
|
||||
//do nothing
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ package org.codesecure.dependencycheck.data.nvdcve;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.Set;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
@@ -55,10 +56,6 @@ public class NvdCveAnalyzer implements org.codesecure.dependencycheck.analyzer.A
|
||||
* The CVE Index.
|
||||
*/
|
||||
protected Index cve = null;
|
||||
/**
|
||||
* The Lucene IndexSearcher.
|
||||
*/
|
||||
private IndexSearcher indexSearcher = null;
|
||||
|
||||
/**
|
||||
* Opens the data source.
|
||||
@@ -69,14 +66,12 @@ public class NvdCveAnalyzer implements org.codesecure.dependencycheck.analyzer.A
|
||||
public void open() throws IOException {
|
||||
cve = new Index();
|
||||
cve.open();
|
||||
indexSearcher = cve.getIndexSearcher();
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the data source.
|
||||
*/
|
||||
public void close() {
|
||||
indexSearcher = null;
|
||||
cve.close();
|
||||
}
|
||||
|
||||
@@ -131,9 +126,9 @@ public class NvdCveAnalyzer implements org.codesecure.dependencycheck.analyzer.A
|
||||
query.add(query1, BooleanClause.Occur.SHOULD);
|
||||
query.add(query2, BooleanClause.Occur.SHOULD);
|
||||
|
||||
TopDocs docs = indexSearcher.search(query, MAX_QUERY_RESULTS);
|
||||
TopDocs docs = cve.search(query, MAX_QUERY_RESULTS);
|
||||
for (ScoreDoc d : docs.scoreDocs) {
|
||||
Document doc = indexSearcher.doc(d.doc);
|
||||
Document doc = cve.getDocument(d.doc);
|
||||
String xml = doc.get(Fields.XML);
|
||||
Vulnerability vuln;
|
||||
try {
|
||||
@@ -142,6 +137,9 @@ public class NvdCveAnalyzer implements org.codesecure.dependencycheck.analyzer.A
|
||||
} catch (JAXBException ex) {
|
||||
Logger.getLogger(NvdCveAnalyzer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
dependency.addAnalysisException(new AnalysisException("Unable to retrieve vulnerability data", ex));
|
||||
} catch (UnsupportedEncodingException ex) {
|
||||
Logger.getLogger(NvdCveAnalyzer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
dependency.addAnalysisException(new AnalysisException("Unable to retrieve vulnerability data - utf-8", ex));
|
||||
}
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
@@ -198,11 +196,11 @@ public class NvdCveAnalyzer implements org.codesecure.dependencycheck.analyzer.A
|
||||
this.open();
|
||||
}
|
||||
|
||||
private Vulnerability parseVulnerability(String xml) throws JAXBException {
|
||||
private Vulnerability parseVulnerability(String xml) throws JAXBException, UnsupportedEncodingException {
|
||||
|
||||
JAXBContext jaxbContext = JAXBContext.newInstance(VulnerabilityType.class);
|
||||
Unmarshaller unmarshaller = jaxbContext.createUnmarshaller();
|
||||
ByteArrayInputStream input = new ByteArrayInputStream(xml.getBytes());
|
||||
ByteArrayInputStream input = new ByteArrayInputStream(xml.getBytes("UTF-8"));
|
||||
VulnerabilityType cvedata = (VulnerabilityType) unmarshaller.unmarshal(input);
|
||||
if (cvedata == null) {
|
||||
return null;
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.*;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
|
||||
/**
|
||||
* Imports a NVD CVE XML file into the Lucene NVD CVE Index.
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class Importer {
|
||||
|
||||
/**
|
||||
* Private constructor for utility class.
|
||||
*/
|
||||
private Importer() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports the NVD CVE XML File into the Lucene Index.
|
||||
*
|
||||
* @param file containing the path to the NVD CVE XML file.
|
||||
*/
|
||||
public static void importXML(File file) {
|
||||
NvdCveParser indexer = null;
|
||||
try {
|
||||
|
||||
indexer = new NvdCveParser();
|
||||
|
||||
indexer.openIndexWriter();
|
||||
|
||||
|
||||
indexer.parse(file);
|
||||
|
||||
} catch (CorruptIndexException ex) {
|
||||
Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
if (indexer != null) {
|
||||
indexer.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
// public static void importXML(File file) throws FileNotFoundException, IOException, JAXBException,
|
||||
// ParserConfigurationException, SAXException {
|
||||
//
|
||||
// SAXParserFactory factory = SAXParserFactory.newInstance();
|
||||
// factory.setNamespaceAware(true);
|
||||
// XMLReader reader = factory.newSAXParser().getXMLReader();
|
||||
//
|
||||
// JAXBContext context = JAXBContext.newInstance("org.codesecure.dependencycheck.data.nvdcve.generated");
|
||||
// NvdCveXmlFilter filter = new NvdCveXmlFilter(context);
|
||||
//
|
||||
// Indexer indexer = new Indexer();
|
||||
// indexer.openIndexWriter();
|
||||
//
|
||||
// filter.registerSaveDelegate(indexer);
|
||||
//
|
||||
// reader.setContentHandler(filter);
|
||||
// Reader fileReader = new FileReader(file);
|
||||
// InputSource is = new InputSource(fileReader);
|
||||
// try {
|
||||
// reader.parse(is);
|
||||
// } catch (IOException ex) {
|
||||
// Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
// } catch (SAXException ex) {
|
||||
// Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
// } finally {
|
||||
// indexer.close();
|
||||
// }
|
||||
// }
|
||||
|
||||
/**
|
||||
* Imports the CPE XML File into the Lucene Index.
|
||||
*
|
||||
* @param path the path to the CPE XML file.
|
||||
*/
|
||||
public static void importXML(String path) {
|
||||
File f = new File(path);
|
||||
if (!f.exists()) {
|
||||
f.mkdirs();
|
||||
}
|
||||
Importer.importXML(f);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,627 @@
|
||||
package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import org.codesecure.dependencycheck.data.CachedWebDataSource;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.Index;
|
||||
import org.codesecure.dependencycheck.data.UpdateException;
|
||||
import org.codesecure.dependencycheck.utils.DownloadFailedException;
|
||||
import org.codesecure.dependencycheck.utils.Downloader;
|
||||
import org.codesecure.dependencycheck.utils.FileUtils;
|
||||
import org.codesecure.dependencycheck.utils.Settings;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class IndexUpdater extends Index implements CachedWebDataSource {
|
||||
|
||||
/**
|
||||
* The name of the properties file containing the timestamp of the last
|
||||
* update.
|
||||
*/
|
||||
private static final String UPDATE_PROPERTIES_FILE = "lastupdated.prop";
|
||||
/**
|
||||
* The properties file key for the last updated field - used to store the
|
||||
* last updated time of the Modified NVD CVE xml file.
|
||||
*/
|
||||
private static final String LAST_UPDATED_MODIFIED = "lastupdated.modified";
|
||||
/**
|
||||
* Stores the last updated time for each of the NVD CVE files. These
|
||||
* timestamps should be updated if we process the modified file within 7
|
||||
* days of the last update.
|
||||
*/
|
||||
private static final String LAST_UPDATED_BASE = "lastupdated.";
|
||||
/**
|
||||
* The current version of the index
|
||||
*/
|
||||
public static final String INDEX_VERSION = "1.1";
|
||||
|
||||
/**
|
||||
* <p>Downloads the latest NVD CVE XML file from the web and imports it into
|
||||
* the current CVE Index.</p>
|
||||
*
|
||||
* @throws UpdateException is thrown if there is an error updating the index
|
||||
*/
|
||||
public void update() throws UpdateException {
|
||||
try {
|
||||
Map<String, NvdCveUrl> update = updateNeeded();
|
||||
int maxUpdates = 0;
|
||||
for (NvdCveUrl cve : update.values()) {
|
||||
if (cve.getNeedsUpdate()) {
|
||||
maxUpdates += 1;
|
||||
}
|
||||
}
|
||||
if (maxUpdates > 3) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING,
|
||||
"NVD CVE requires several updates; this could take a couple of minutes.");
|
||||
}
|
||||
int count = 0;
|
||||
for (NvdCveUrl cve : update.values()) {
|
||||
if (cve.getNeedsUpdate()) {
|
||||
count += 1;
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING,
|
||||
"Updating NVD CVE ({0} of {1})", new Object[]{count, maxUpdates});
|
||||
URL url = new URL(cve.getUrl());
|
||||
File outputPath = null;
|
||||
try {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING, "Downloading {0}", cve.getUrl());
|
||||
outputPath = File.createTempFile("cve" + cve.getId() + "_", ".xml");
|
||||
Downloader.fetchFile(url, outputPath, false);
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING, "Processing {0}", cve.getUrl());
|
||||
importXML(outputPath.toString());
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING,
|
||||
"Completed updated {0} of {1}", new Object[]{count, maxUpdates});
|
||||
} catch (FileNotFoundException ex) {
|
||||
//Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (IOException ex) {
|
||||
//Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} finally {
|
||||
try {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.delete();
|
||||
}
|
||||
} finally {
|
||||
if (outputPath != null && outputPath.exists()) {
|
||||
outputPath.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (maxUpdates >= 1) {
|
||||
writeLastUpdatedPropertyFile(update);
|
||||
}
|
||||
} catch (MalformedURLException ex) {
|
||||
//Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
} catch (DownloadFailedException ex) {
|
||||
//Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports the NVD CVE XML File into the Lucene Index.
|
||||
*
|
||||
* @param file containing the path to the NVD CVE XML file.
|
||||
*/
|
||||
private void importXML(File file) {
|
||||
if (!file.exists()) {
|
||||
file.mkdirs();
|
||||
}
|
||||
NvdCveParser indexer = null;
|
||||
org.codesecure.dependencycheck.data.cpe.xml.Indexer cpeIndexer = null;
|
||||
try {
|
||||
indexer = new NvdCveParser();
|
||||
indexer.openIndexWriter();
|
||||
|
||||
//HACK - hack to ensure all CPE data is stored in the index.
|
||||
cpeIndexer = new org.codesecure.dependencycheck.data.cpe.xml.Indexer();
|
||||
cpeIndexer.openIndexWriter();
|
||||
indexer.setCPEIndexer(cpeIndexer);
|
||||
|
||||
indexer.parse(file);
|
||||
} catch (CorruptIndexException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
if (indexer != null) {
|
||||
indexer.close();
|
||||
}
|
||||
if (cpeIndexer != null) {
|
||||
cpeIndexer.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
// public static void importXML(File file) throws FileNotFoundException, IOException, JAXBException,
|
||||
// ParserConfigurationException, SAXException {
|
||||
//
|
||||
// SAXParserFactory factory = SAXParserFactory.newInstance();
|
||||
// factory.setNamespaceAware(true);
|
||||
// XMLReader reader = factory.newSAXParser().getXMLReader();
|
||||
//
|
||||
// JAXBContext context = JAXBContext.newInstance("org.codesecure.dependencycheck.data.nvdcve.generated");
|
||||
// NvdCveXmlFilter filter = new NvdCveXmlFilter(context);
|
||||
//
|
||||
// Indexer indexer = new Indexer();
|
||||
// indexer.openIndexWriter();
|
||||
//
|
||||
// filter.registerSaveDelegate(indexer);
|
||||
//
|
||||
// reader.setContentHandler(filter);
|
||||
// Reader fileReader = new FileReader(file);
|
||||
// InputSource is = new InputSource(fileReader);
|
||||
// try {
|
||||
// reader.parse(is);
|
||||
// } catch (IOException ex) {
|
||||
// Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
// } catch (SAXException ex) {
|
||||
// Logger.getLogger(Importer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
// } finally {
|
||||
// indexer.close();
|
||||
// }
|
||||
// }
|
||||
|
||||
/**
|
||||
* Imports the CPE XML File into the Lucene Index.
|
||||
*
|
||||
* @param path the path to the CPE XML file.
|
||||
*/
|
||||
private void importXML(String path) {
|
||||
File f = new File(path);
|
||||
importXML(f);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a properties file containing the last updated date to the
|
||||
* VULNERABLE_CPE directory.
|
||||
*
|
||||
* @param timeStamp the timestamp to write.
|
||||
*/
|
||||
private void writeLastUpdatedPropertyFile(Map<String, NvdCveUrl> updated) throws UpdateException {
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
File cveProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
Properties prop = new Properties();
|
||||
prop.put("version", INDEX_VERSION);
|
||||
for (NvdCveUrl cve : updated.values()) {
|
||||
prop.put(LAST_UPDATED_BASE + cve.id, String.valueOf(cve.getTimestamp()));
|
||||
}
|
||||
|
||||
OutputStream os = null;
|
||||
try {
|
||||
os = new FileOutputStream(cveProp);
|
||||
OutputStreamWriter out = new OutputStreamWriter(os, "UTF-8");
|
||||
prop.store(out, dir);
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to find last updated properties file.", ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to update last updated properties file.", ex);
|
||||
} finally {
|
||||
if (os != null) {
|
||||
try {
|
||||
os.flush();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
try {
|
||||
os.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the index needs to be updated. This is done by fetching the
|
||||
* nvd cve meta data and checking the last update date. If the data needs to
|
||||
* be refreshed this method will return the NvdCveUrl for the files that
|
||||
* need to be updated.
|
||||
*
|
||||
* @return the NvdCveUrl of the files that need to be updated.
|
||||
* @throws MalformedURLException is thrown if the URL for the NVD CVE Meta
|
||||
* data is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error.
|
||||
* downloading the nvd cve download data file.
|
||||
* @throws UpdateException Is thrown if there is an issue with the last updated properties file.
|
||||
*/
|
||||
public Map<String, NvdCveUrl> updateNeeded() throws MalformedURLException, DownloadFailedException, UpdateException {
|
||||
|
||||
Map<String, NvdCveUrl> currentlyPublished;
|
||||
try {
|
||||
currentlyPublished = retrieveCurrentTimestampsFromWeb();
|
||||
} catch (InvalidDataException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from nvd cve downloads page", ex);
|
||||
}
|
||||
if (currentlyPublished == null) {
|
||||
throw new DownloadFailedException("Unable to retrieve valid timestamp from nvd cve downloads page");
|
||||
}
|
||||
String dir;
|
||||
try {
|
||||
dir = getDataDirectory().getCanonicalPath();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
throw new UpdateException("Unable to locate last updated properties file.", ex);
|
||||
}
|
||||
|
||||
File f = new File(dir);
|
||||
if (f.exists()) {
|
||||
File cveProp = new File(dir + File.separatorChar + UPDATE_PROPERTIES_FILE);
|
||||
if (cveProp.exists()) {
|
||||
Properties prop = new Properties();
|
||||
InputStream is = null;
|
||||
try {
|
||||
is = new FileInputStream(cveProp);
|
||||
prop.load(is);
|
||||
|
||||
boolean deleteAndRecreate = false;
|
||||
float version = 0;
|
||||
|
||||
if (prop.getProperty("version") == null) {
|
||||
deleteAndRecreate = true;
|
||||
} else {
|
||||
try {
|
||||
version = Float.parseFloat(prop.getProperty("version"));
|
||||
float currentVersion = Float.parseFloat(INDEX_VERSION);
|
||||
if (currentVersion > version) {
|
||||
deleteAndRecreate = true;
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
deleteAndRecreate = true;
|
||||
}
|
||||
}
|
||||
if (deleteAndRecreate) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.WARNING, "Index version is old. Rebuilding the index.");
|
||||
is.close();
|
||||
//this is an old version of the lucene index - just delete it
|
||||
FileUtils.delete(f);
|
||||
|
||||
//this importer also updates the CPE index and it is also using an old version
|
||||
org.codesecure.dependencycheck.data.cpe.Index cpeidx = new org.codesecure.dependencycheck.data.cpe.Index();
|
||||
File cpeDir = cpeidx.getDataDirectory();
|
||||
FileUtils.delete(cpeDir);
|
||||
return currentlyPublished;
|
||||
}
|
||||
|
||||
long lastUpdated = Long.parseLong(prop.getProperty(LAST_UPDATED_MODIFIED));
|
||||
Date now = new Date();
|
||||
int days = Settings.getInt(Settings.KEYS.CVE_MODIFIED_VALID_FOR_DAYS);
|
||||
int maxEntries = Settings.getInt(Settings.KEYS.CVE_URL_COUNT);
|
||||
if (lastUpdated == currentlyPublished.get("modified").timestamp) {
|
||||
currentlyPublished.clear(); //we don't need to update anything.
|
||||
} else if (withinRange(lastUpdated, now.getTime(), days)) {
|
||||
currentlyPublished.get("modified").setNeedsUpdate(true);
|
||||
for (int i = 1; i <= maxEntries; i++) {
|
||||
currentlyPublished.get(String.valueOf(i)).setNeedsUpdate(false);
|
||||
}
|
||||
} else { //we figure out which of the several XML files need to be downloaded.
|
||||
currentlyPublished.get("modified").setNeedsUpdate(false);
|
||||
for (int i = 1; i <= maxEntries; i++) {
|
||||
NvdCveUrl cve = currentlyPublished.get(String.valueOf(i));
|
||||
long currentTimestamp = 0;
|
||||
try {
|
||||
currentTimestamp = Long.parseLong(prop.getProperty(LAST_UPDATED_BASE + String.valueOf(i), "0"));
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.FINEST, "Error parsing " + LAST_UPDATED_BASE
|
||||
+ String.valueOf(i) + " from nvdcve.lastupdated", ex);
|
||||
}
|
||||
if (currentTimestamp == cve.getTimestamp()) {
|
||||
cve.setNeedsUpdate(false); //they default to true.
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.FINEST, null, ex);
|
||||
} catch (NumberFormatException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.FINEST, null, ex);
|
||||
} finally {
|
||||
if (is != null) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(IndexUpdater.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return currentlyPublished;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the epoch date is within the range specified of the
|
||||
* compareTo epoch time. This takes the (compareTo-date)/1000/60/60/24 to
|
||||
* get the number of days. If the calculated days is less then the range the
|
||||
* date is considered valid.
|
||||
*
|
||||
* @param date the date to be checked.
|
||||
* @param compareTo the date to compare to.
|
||||
* @param range the range in days to be considered valid.
|
||||
* @return whether or not the date is within the range.
|
||||
*/
|
||||
private boolean withinRange(long date, long compareTo, int range) {
|
||||
double differenceInDays = (compareTo - date) / 1000 / 60 / 60 / 24;
|
||||
return differenceInDays < range;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the timestamps from the NVD CVE meta data file.
|
||||
*
|
||||
* @return the timestamp from the currently published nvdcve downloads page
|
||||
* @throws MalformedURLException is thrown if the URL for the NVD CCE Meta
|
||||
* data is incorrect.
|
||||
* @throws DownloadFailedException is thrown if there is an error
|
||||
* downloading the nvd cve meta data file
|
||||
* @throws InvalidDataException is thrown if there is an exception parsing
|
||||
* the timestamps
|
||||
*/
|
||||
protected Map<String, NvdCveUrl> retrieveCurrentTimestampsFromWeb() throws MalformedURLException, DownloadFailedException, InvalidDataException {
|
||||
Map<String, NvdCveUrl> map = new HashMap<String, NvdCveUrl>();
|
||||
|
||||
File tmp = null;
|
||||
try {
|
||||
tmp = File.createTempFile("cve", "meta");
|
||||
URL url = new URL(Settings.getString(Settings.KEYS.CVE_META_URL));
|
||||
Downloader.fetchFile(url, tmp);
|
||||
String html = readFile(tmp);
|
||||
|
||||
String retrieveUrl = Settings.getString(Settings.KEYS.CVE_MODIFIED_URL);
|
||||
NvdCveUrl cve = createNvdCveUrl("modified", retrieveUrl, html);
|
||||
cve.setNeedsUpdate(false); //the others default to true, to make life easier later this should default to false.
|
||||
map.put("modified", cve);
|
||||
int max = Settings.getInt(Settings.KEYS.CVE_URL_COUNT);
|
||||
for (int i = 1; i <= max; i++) {
|
||||
retrieveUrl = Settings.getString(Settings.KEYS.CVE_BASE_URL + i);
|
||||
String key = Integer.toString(i);
|
||||
cve = createNvdCveUrl(key, retrieveUrl, html);
|
||||
map.put(key, cve);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new DownloadFailedException("Unable to create temporary file for NVD CVE Meta File download.", ex);
|
||||
} finally {
|
||||
try {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.delete();
|
||||
}
|
||||
} finally {
|
||||
if (tmp != null && tmp.exists()) {
|
||||
tmp.deleteOnExit();
|
||||
}
|
||||
}
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new NvdCveUrl object from the provide id, url, and text/html
|
||||
* from the NVD CVE downloads page.
|
||||
*
|
||||
* @param id the name of this NVD CVE Url
|
||||
* @param retrieveUrl the URL to download the file from
|
||||
* @param text a bit of HTML from the NVD CVE downloads page that contains
|
||||
* the URL and the last updated timestamp.
|
||||
* @return a shiny new NvdCveUrl object.
|
||||
* @throws InvalidDataException is thrown if the timestamp could not be
|
||||
* extracted from the provided text.
|
||||
*/
|
||||
private NvdCveUrl createNvdCveUrl(String id, String retrieveUrl, String text) throws InvalidDataException {
|
||||
Pattern pattern = Pattern.compile(Pattern.quote(retrieveUrl) + ".+?\\<br");
|
||||
Matcher m = pattern.matcher(text);
|
||||
NvdCveUrl item = new NvdCveUrl();
|
||||
item.id = id;
|
||||
item.url = retrieveUrl;
|
||||
if (m.find()) {
|
||||
String line = m.group();
|
||||
int pos = line.indexOf("Updated:");
|
||||
if (pos > 0) {
|
||||
pos += 9;
|
||||
try {
|
||||
String timestampstr = line.substring(pos, line.length() - 3).replace("at ", "");
|
||||
long timestamp = getEpochTimeFromDateTime(timestampstr);
|
||||
item.setTimestamp(timestamp);
|
||||
} catch (NumberFormatException ex) {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain a valid timestamp for '" + retrieveUrl + "'.", ex);
|
||||
}
|
||||
} else {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain the updated timestamp for '" + retrieveUrl + "'.");
|
||||
}
|
||||
} else {
|
||||
throw new InvalidDataException("NVD CVE Meta file does not contain the url for '" + retrieveUrl + "'.");
|
||||
}
|
||||
return item;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a timestamp in the format of "MM/dd/yy hh:mm" into a calendar
|
||||
* object and returns the epoch time. Note, this removes the millisecond
|
||||
* portion of the epoch time so all numbers returned should end in 000.
|
||||
*
|
||||
* @param timestamp a string in the format of "MM/dd/yy hh:mm"
|
||||
* @return a Calendar object.
|
||||
* @throws NumberFormatException if the timestamp was parsed incorrectly.
|
||||
*/
|
||||
private long getEpochTimeFromDateTime(String timestamp) throws NumberFormatException {
|
||||
Calendar c = new GregorianCalendar();
|
||||
int month = Integer.parseInt(timestamp.substring(0, 2));
|
||||
int date = Integer.parseInt(timestamp.substring(3, 5));
|
||||
int year = 2000 + Integer.parseInt(timestamp.substring(6, 8));
|
||||
int hourOfDay = Integer.parseInt(timestamp.substring(9, 11));
|
||||
int minute = Integer.parseInt(timestamp.substring(12, 14));
|
||||
c.set(year, month, date, hourOfDay, minute, 0);
|
||||
long t = c.getTimeInMillis();
|
||||
t = (t / 1000) * 1000;
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a file into a string.
|
||||
*
|
||||
* @param file the file to be read.
|
||||
* @return the contents of the file.
|
||||
* @throws IOException is thrown if an IOExcpetion occurs.
|
||||
*/
|
||||
private String readFile(File file) throws IOException {
|
||||
InputStreamReader stream = new InputStreamReader(new FileInputStream(file), "UTF-8");
|
||||
StringBuilder str = new StringBuilder((int) file.length());
|
||||
try {
|
||||
char[] buf = new char[8096];
|
||||
int read = stream.read(buf, 0, 8096);
|
||||
while (read > 0) {
|
||||
str.append(buf, 0, read);
|
||||
read = stream.read(buf, 0, 8096);
|
||||
}
|
||||
} finally {
|
||||
stream.close();
|
||||
}
|
||||
return str.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* A pojo that contains the Url and timestamp of the current NvdCve XML
|
||||
* files.
|
||||
*/
|
||||
protected class NvdCveUrl {
|
||||
|
||||
/**
|
||||
* an id.
|
||||
*/
|
||||
private String id;
|
||||
|
||||
/**
|
||||
* Get the value of id
|
||||
*
|
||||
* @return the value of id
|
||||
*/
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of id
|
||||
*
|
||||
* @param id new value of id
|
||||
*/
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
/**
|
||||
* a url.
|
||||
*/
|
||||
private String url;
|
||||
|
||||
/**
|
||||
* Get the value of url
|
||||
*
|
||||
* @return the value of url
|
||||
*/
|
||||
public String getUrl() {
|
||||
return url;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of url
|
||||
*
|
||||
* @param url new value of url
|
||||
*/
|
||||
public void setUrl(String url) {
|
||||
this.url = url;
|
||||
}
|
||||
/**
|
||||
* a timestamp - epoch time.
|
||||
*/
|
||||
private long timestamp;
|
||||
|
||||
/**
|
||||
* Get the value of timestamp - epoch time
|
||||
*
|
||||
* @return the value of timestamp - epoch time
|
||||
*/
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of timestamp - epoch time
|
||||
*
|
||||
* @param timestamp new value of timestamp - epoch time
|
||||
*/
|
||||
public void setTimestamp(long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
/**
|
||||
* indicates whether or not this item should be updated.
|
||||
*/
|
||||
private boolean needsUpdate = true;
|
||||
|
||||
/**
|
||||
* Get the value of needsUpdate
|
||||
*
|
||||
* @return the value of needsUpdate
|
||||
*/
|
||||
public boolean getNeedsUpdate() {
|
||||
return needsUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of needsUpdate
|
||||
*
|
||||
* @param needsUpdate new value of needsUpdate
|
||||
*/
|
||||
public void setNeedsUpdate(boolean needsUpdate) {
|
||||
this.needsUpdate = needsUpdate;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,7 @@ package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import javax.xml.bind.JAXBContext;
|
||||
@@ -27,8 +28,9 @@ import javax.xml.bind.JAXBException;
|
||||
import javax.xml.bind.Marshaller;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.codesecure.dependencycheck.data.lucene.LuceneUtils;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.generated.VulnerabilityType;
|
||||
@@ -55,7 +57,12 @@ public class Indexer extends Index implements EntrySaveDelegate {
|
||||
*/
|
||||
public void saveEntry(VulnerabilityType vulnerability) throws CorruptIndexException, IOException {
|
||||
try {
|
||||
Document doc = convertEntryToDoc(vulnerability);
|
||||
Document doc = null;
|
||||
try {
|
||||
doc = convertEntryToDoc(vulnerability);
|
||||
} catch (UnsupportedEncodingException ex) {
|
||||
Logger.getLogger(Indexer.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
|
||||
if (doc == null) {
|
||||
return;
|
||||
@@ -74,8 +81,9 @@ public class Indexer extends Index implements EntrySaveDelegate {
|
||||
* @param vulnerability a VULNERABLE_CPE Entry.
|
||||
* @return a Lucene Document containing a VULNERABLE_CPE Entry.
|
||||
* @throws JAXBException is thrown when there is a JAXBException.
|
||||
* @throws UnsupportedEncodingException if the system doesn't support utf-8
|
||||
*/
|
||||
protected Document convertEntryToDoc(VulnerabilityType vulnerability) throws JAXBException {
|
||||
protected Document convertEntryToDoc(VulnerabilityType vulnerability) throws JAXBException, UnsupportedEncodingException {
|
||||
boolean hasApplication = false;
|
||||
Document doc = new Document();
|
||||
|
||||
@@ -101,14 +109,11 @@ public class Indexer extends Index implements EntrySaveDelegate {
|
||||
return null;
|
||||
}
|
||||
|
||||
Field name = new Field(Fields.CVE_ID, vulnerability.getId(), Field.Store.NO, Field.Index.ANALYZED);
|
||||
name.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
Field name = new StringField(Fields.CVE_ID, vulnerability.getId(), Field.Store.NO);
|
||||
doc.add(name);
|
||||
|
||||
Field description = new Field(Fields.DESCRIPTION, vulnerability.getSummary(), Field.Store.NO, Field.Index.ANALYZED);
|
||||
description.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
doc.add(description);
|
||||
|
||||
// Field description = new Field(Fields.DESCRIPTION, vulnerability.getSummary(), Field.Store.NO, Field.Index.ANALYZED);
|
||||
// doc.add(description);
|
||||
|
||||
JAXBContext context = JAXBContext.newInstance("org.codesecure.dependencycheck.data.nvdcve.generated");
|
||||
|
||||
@@ -119,7 +124,7 @@ public class Indexer extends Index implements EntrySaveDelegate {
|
||||
|
||||
m.marshal(vulnerability, out);
|
||||
|
||||
Field xml = new Field(Fields.XML, out.toString(), Field.Store.YES, Field.Index.NO);
|
||||
Field xml = new StoredField(Fields.XML, out.toString("UTF-8"));
|
||||
doc.add(xml);
|
||||
|
||||
return doc;
|
||||
@@ -141,8 +146,7 @@ public class Indexer extends Index implements EntrySaveDelegate {
|
||||
}
|
||||
|
||||
private void addVulnerableCpe(String cpe, Document doc) {
|
||||
Field vulnerable = new Field(Fields.VULNERABLE_CPE, cpe, Field.Store.NO, Field.Index.ANALYZED);
|
||||
vulnerable.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
Field vulnerable = new StringField(Fields.VULNERABLE_CPE, cpe, Field.Store.NO);
|
||||
doc.add(vulnerable);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package org.codesecure.dependencycheck.data.nvdcve;
|
||||
package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
@@ -20,17 +20,22 @@ package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.codesecure.dependencycheck.data.cpe.Entry;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.Fields;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.Index;
|
||||
|
||||
@@ -40,6 +45,19 @@ import org.codesecure.dependencycheck.data.nvdcve.Index;
|
||||
*/
|
||||
public class NvdCveParser extends Index {
|
||||
|
||||
//HACK - this has initially been placed here as a hack because not all
|
||||
// of the CPEs listed in the NVD CVE are actually in the CPE xml file
|
||||
// hosted by NIST.
|
||||
private org.codesecure.dependencycheck.data.cpe.xml.Indexer cpeIndexer = null;
|
||||
|
||||
/**
|
||||
* Adds the CPE Index to add additional CPEs found by parsing the NVD CVE.
|
||||
* @param indexer the CPE Indexer to write new CPEs into.
|
||||
*/
|
||||
public void setCPEIndexer(org.codesecure.dependencycheck.data.cpe.xml.Indexer indexer) {
|
||||
this.cpeIndexer = indexer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses an NVD CVE xml file using a buffered readerd. This
|
||||
* method maybe more fragile then using a partial-unmarshalling SAX
|
||||
@@ -49,14 +67,15 @@ public class NvdCveParser extends Index {
|
||||
* @param file the reference to the NVD CVE file
|
||||
*/
|
||||
public void parse(File file) {
|
||||
FileReader fr = null;
|
||||
InputStreamReader fr = null;
|
||||
BufferedReader br = null;
|
||||
Pattern rxEntry = Pattern.compile("^\\s*<entry\\s*id\\=\\\"([^\\\"]+)\\\".*$");
|
||||
Pattern rxEntryEnd = Pattern.compile("^\\s*</entry>.*$");
|
||||
Pattern rxFact = Pattern.compile("^\\s*<cpe\\-lang\\:fact\\-ref name=\\\"([^\\\"]+).*$");
|
||||
Pattern rxSummary = Pattern.compile("^\\s*<vuln:summary>([^\\<]+).*$");
|
||||
//Pattern rxSummary = Pattern.compile("^\\s*<vuln:summary>([^\\<]+).*$");
|
||||
try {
|
||||
fr = new FileReader(file);
|
||||
|
||||
fr = new InputStreamReader(new FileInputStream(file), "UTF-8");
|
||||
br = new BufferedReader(fr);
|
||||
StringBuilder sb = new StringBuilder(7000);
|
||||
String str = null;
|
||||
@@ -74,9 +93,15 @@ public class NvdCveParser extends Index {
|
||||
//facts occur more often, do them first.
|
||||
Matcher matcherFact = rxFact.matcher(str);
|
||||
if (matcherFact.matches()) {
|
||||
String cpe = matcherFact.group(1);
|
||||
String cpe = matcherFact.group(1).trim();
|
||||
if (cpe != null && cpe.startsWith("cpe:/a:")) {
|
||||
skipEntry = false;
|
||||
|
||||
//TODO deal with other possible :-: scenarios. do we need to be concerned about those?
|
||||
if (cpe.endsWith(":-")) {
|
||||
cpe = cpe.substring(0, cpe.length() - 2);
|
||||
}
|
||||
|
||||
addVulnerableCpe(cpe, doc);
|
||||
}
|
||||
continue;
|
||||
@@ -105,23 +130,21 @@ public class NvdCveParser extends Index {
|
||||
sb.append("id=\"").append(id).append("\">");
|
||||
//sb.append(str); //need to do the above to get the correct schema generated from files.
|
||||
|
||||
Field name = new Field(Fields.CVE_ID, id, Field.Store.NO, Field.Index.ANALYZED);
|
||||
name.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
Field name = new StringField(Fields.CVE_ID, id, Field.Store.NO);
|
||||
doc.add(name);
|
||||
continue;
|
||||
}
|
||||
Matcher matcherSummary = rxSummary.matcher(str);
|
||||
if (matcherSummary.matches()) {
|
||||
String summary = matcherSummary.group(1);
|
||||
Field description = new Field(Fields.DESCRIPTION, summary, Field.Store.NO, Field.Index.ANALYZED);
|
||||
description.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
doc.add(description);
|
||||
continue;
|
||||
}
|
||||
// Matcher matcherSummary = rxSummary.matcher(str);
|
||||
// if (matcherSummary.matches()) {
|
||||
// String summary = matcherSummary.group(1);
|
||||
// Field description = new Field(Fields.DESCRIPTION, summary, Field.Store.NO);
|
||||
// doc.add(description);
|
||||
// continue;
|
||||
// }
|
||||
|
||||
if (matcherEntryEnd.matches()) {
|
||||
sb.append("</vulnerabilityType>");
|
||||
Field xml = new Field(Fields.XML, sb.toString(), Field.Store.YES, Field.Index.NO);
|
||||
Field xml = new StoredField(Fields.XML, sb.toString());
|
||||
doc.add(xml);
|
||||
|
||||
if (!skipEntry) {
|
||||
@@ -146,7 +169,9 @@ public class NvdCveParser extends Index {
|
||||
Logger.getLogger(NvdCveParser.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
try {
|
||||
fr.close();
|
||||
if (fr != null) {
|
||||
fr.close();
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(NvdCveParser.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
@@ -164,10 +189,24 @@ public class NvdCveParser extends Index {
|
||||
* Adds a CPE to the Lucene Document
|
||||
* @param cpe a string representing a CPE
|
||||
* @param doc a lucene document
|
||||
* @throws CorruptIndexException is thrown if the CPE Index is corrupt
|
||||
* @throws IOException is thrown if there is an IO Exception while writting to the CPE Index
|
||||
*/
|
||||
private void addVulnerableCpe(String cpe, Document doc) {
|
||||
Field vulnerable = new Field(Fields.VULNERABLE_CPE, cpe, Field.Store.NO, Field.Index.ANALYZED);
|
||||
vulnerable.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
private void addVulnerableCpe(String cpe, Document doc) throws CorruptIndexException, IOException {
|
||||
Field vulnerable = new StringField(Fields.VULNERABLE_CPE, cpe, Field.Store.NO);
|
||||
doc.add(vulnerable);
|
||||
|
||||
//HACK - this has initially been placed here as a hack because not all
|
||||
// of the CPEs listed in the NVD CVE are actually in the CPE xml file
|
||||
// hosted by NIST.
|
||||
Entry cpeEntry = new Entry();
|
||||
try {
|
||||
cpeEntry.parseName(cpe);
|
||||
} catch (UnsupportedEncodingException ex) {
|
||||
Logger.getLogger(NvdCveParser.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
if (cpeIndexer != null) {
|
||||
cpeIndexer.saveEntry(cpeEntry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ package org.codesecure.dependencycheck.dependency;
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@@ -238,11 +238,10 @@ public class Dependency {
|
||||
*
|
||||
* @param type the type of identifier (such as CPE).
|
||||
* @param value the value of the identifier.
|
||||
* @param title the title of the identifier.
|
||||
* @param url the URL of the identifier.
|
||||
*/
|
||||
public void addIdentifier(String type, String value, String title, String url) {
|
||||
Identifier i = new Identifier(type, value, title, url);
|
||||
public void addIdentifier(String type, String value, String url) {
|
||||
Identifier i = new Identifier(type, value, url);
|
||||
this.identifiers.add(i);
|
||||
}
|
||||
|
||||
@@ -377,15 +376,13 @@ public class Dependency {
|
||||
return false;
|
||||
}
|
||||
|
||||
String fnd = str.toLowerCase();
|
||||
|
||||
if (vendorEvidence.containsUsedString(str)) {
|
||||
return true;
|
||||
}
|
||||
if (productEvidence.containsUsedString(str)) {
|
||||
return true;
|
||||
}
|
||||
if (versionEvidence.containsUsedString(fnd)) {
|
||||
if (versionEvidence.containsUsedString(str)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -419,7 +416,7 @@ public class Dependency {
|
||||
try {
|
||||
md5 = Checksum.getMD5Checksum(file);
|
||||
sha1 = Checksum.getSHA1Checksum(file);
|
||||
} catch (FileNotFoundException ex) {
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Dependency.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (NoSuchAlgorithmException ex) {
|
||||
Logger.getLogger(Dependency.class.getName()).log(Level.SEVERE, null, ex);
|
||||
|
||||
@@ -184,7 +184,7 @@ public class EvidenceCollection implements Iterable<Evidence> {
|
||||
text = text.toLowerCase();
|
||||
|
||||
for (Evidence e : this.list) {
|
||||
if (e.used && e.value.contains(text)) {
|
||||
if (e.used && e.value.toLowerCase().contains(text)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,13 +29,11 @@ public class Identifier {
|
||||
*
|
||||
* @param type the identifier type.
|
||||
* @param value the identifier value.
|
||||
* @param title the identifier title.
|
||||
* @param url the identifier url.
|
||||
*/
|
||||
Identifier(String type, String value, String title, String url) {
|
||||
Identifier(String type, String value, String url) {
|
||||
this.type = type;
|
||||
this.value = value;
|
||||
this.title = title;
|
||||
this.url = url;
|
||||
}
|
||||
|
||||
@@ -44,12 +42,11 @@ public class Identifier {
|
||||
*
|
||||
* @param type the identifier type.
|
||||
* @param value the identifier value.
|
||||
* @param title the identifier title.
|
||||
* @param url the identifier url.
|
||||
* @param description the description of the identifier.
|
||||
*/
|
||||
Identifier(String type, String value, String title, String url, String description) {
|
||||
this(type, value, title, url);
|
||||
Identifier(String type, String value, String url, String description) {
|
||||
this(type, value, url);
|
||||
this.description = description;
|
||||
}
|
||||
/**
|
||||
@@ -74,28 +71,7 @@ public class Identifier {
|
||||
public void setValue(String value) {
|
||||
this.value = value;
|
||||
}
|
||||
/**
|
||||
* The title of the identifeir
|
||||
*/
|
||||
protected String title;
|
||||
|
||||
/**
|
||||
* Get the value of title
|
||||
*
|
||||
* @return the value of title
|
||||
*/
|
||||
public String getTitle() {
|
||||
return title;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of title
|
||||
*
|
||||
* @param title new value of title
|
||||
*/
|
||||
public void setTitle(String title) {
|
||||
this.title = title;
|
||||
}
|
||||
/**
|
||||
* The url for the identifeir
|
||||
*/
|
||||
|
||||
@@ -18,15 +18,16 @@ package org.codesecure.dependencycheck.reporting;
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.HashMap;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.velocity.app.VelocityEngine;
|
||||
@@ -35,94 +36,139 @@ import org.apache.velocity.runtime.RuntimeConstants;
|
||||
import org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader;
|
||||
import org.apache.velocity.tools.ToolManager;
|
||||
import org.apache.velocity.tools.config.EasyFactoryConfiguration;
|
||||
import org.codesecure.dependencycheck.analyzer.Analyzer;
|
||||
import org.codesecure.dependencycheck.dependency.Dependency;
|
||||
|
||||
/**
|
||||
* The ReportGenerator is used to, as the name implies, generate reports. Internally
|
||||
* the generator uses the Velocity Templating Engine. The ReportGenerator exposes
|
||||
* a list of Dependencies to the template when generating the report.
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class ReportGenerator {
|
||||
|
||||
/**
|
||||
* The Velocity Engine.
|
||||
*/
|
||||
private VelocityEngine engine = null;
|
||||
/**
|
||||
* The Velocity Engine Context.
|
||||
*/
|
||||
private Context context = null;
|
||||
|
||||
/**
|
||||
* Constructs a new ReportGenerator.
|
||||
*
|
||||
* @param applicationName the application name being analyzed
|
||||
* @param dependencies the list of dependencies
|
||||
* @param analyzers the list of analyzers used.
|
||||
*/
|
||||
public ReportGenerator(String applicationName, List<Dependency> dependencies, List<Analyzer> analyzers) {
|
||||
engine = createVelocityEngine();
|
||||
context = createContext();
|
||||
|
||||
engine.init();
|
||||
|
||||
context.put("applicationName", applicationName);
|
||||
context.put("dependencies", dependencies);
|
||||
context.put("analyzers", analyzers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Velocity Engine.
|
||||
* @return a velocity engine.
|
||||
*/
|
||||
private VelocityEngine createVelocityEngine() {
|
||||
VelocityEngine ve = new VelocityEngine();
|
||||
ve.setProperty(RuntimeConstants.RESOURCE_LOADER, "classpath");
|
||||
ve.setProperty("classpath.resource.loader.class", ClasspathResourceLoader.class.getName());
|
||||
return ve;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Velocity Context initialized with escape and date tools.
|
||||
* @return a Velcotiy Context.
|
||||
*/
|
||||
private Context createContext() {
|
||||
ToolManager manager = new ToolManager();
|
||||
Context c = manager.createContext();
|
||||
EasyFactoryConfiguration config = new EasyFactoryConfiguration();
|
||||
config.addDefaultTools();
|
||||
config.toolbox("application").tool("esc", "org.apache.velocity.tools.generic.EscapeTool").tool("org.apache.velocity.tools.generic.DateTool");
|
||||
manager.configure(config);
|
||||
return c;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the Dependency Reports for the identified dependencies.
|
||||
*
|
||||
* @param outputDir the path where the reports should be written.
|
||||
* @param applicationName the name of the application that was scanned.
|
||||
* @param dependencies a list of dependencies to include in the report.
|
||||
* @throws IOException is thrown when the template file does not exist.
|
||||
* @throws Exception is thrown if there is an error writting out the
|
||||
* reports.
|
||||
*/
|
||||
public void generateReports(String outputDir, String applicationName, List<Dependency> dependencies) throws IOException, Exception {
|
||||
|
||||
Map<String, Object> properties = new HashMap<String, Object>();
|
||||
properties.put("dependencies", dependencies);
|
||||
properties.put("applicationName", applicationName);
|
||||
|
||||
String reportName = applicationName.replaceAll("[^a-zA-Z0-9-_ \\.]+", "");
|
||||
String filename = outputDir + File.separatorChar + reportName;
|
||||
generateReport("HtmlReport", filename + ".html", properties);
|
||||
//generateReport("XmlReport",filename + ".xml",properties);
|
||||
|
||||
public void generateReports(String outputDir) throws IOException, Exception {
|
||||
generateReport("HtmlReport", outputDir + File.separator + "DependencyCheck-Report.html");
|
||||
//generateReport("XmlReport", outputDir + File.separator + "DependencyCheck-Report.xml");
|
||||
}
|
||||
|
||||
/**
|
||||
* much of this code is from
|
||||
* http://stackoverflow.com/questions/2931516/loading-velocity-template-inside-a-jar-file
|
||||
* Generates a report from a given Velocity Template. The template name
|
||||
* provided can be the name of a template contained in the jar file, such as
|
||||
* 'XmlReport' or 'HtmlReport', or the template name can be the path to a template file.
|
||||
*
|
||||
* @param templateName the name of the template to load.
|
||||
* @param outFileName The filename and path to write the report to.
|
||||
* @param properties a map of properties to load into the velocity context.
|
||||
* @param outFileName the filename and path to write the report to.
|
||||
* @throws IOException is thrown when the template file does not exist.
|
||||
* @throws Exception is thrown when an exception occurs.
|
||||
*/
|
||||
protected void generateReport(String templateName, String outFileName,
|
||||
Map<String, Object> properties) throws IOException, Exception {
|
||||
|
||||
VelocityEngine ve = new VelocityEngine();
|
||||
ve.setProperty(RuntimeConstants.RESOURCE_LOADER, "classpath");
|
||||
ve.setProperty("classpath.resource.loader.class", ClasspathResourceLoader.class.getName());
|
||||
|
||||
ToolManager manager = new ToolManager();
|
||||
Context context = manager.createContext();
|
||||
EasyFactoryConfiguration config = new EasyFactoryConfiguration();
|
||||
config.addDefaultTools();
|
||||
config.toolbox("application").tool("esc", "org.apache.velocity.tools.generic.EscapeTool").tool("org.apache.velocity.tools.generic.DateTool");
|
||||
|
||||
manager.configure(config);
|
||||
|
||||
ve.init();
|
||||
|
||||
final String templatePath = "templates/" + templateName + ".vsl";
|
||||
InputStream input = this.getClass().getClassLoader().getResourceAsStream(templatePath);
|
||||
public void generateReport(String templateName, String outFileName) throws IOException, Exception {
|
||||
InputStream input = null;
|
||||
String templatePath = null;
|
||||
File f = new File(templateName);
|
||||
if (f.exists() && f.isFile()) {
|
||||
try {
|
||||
templatePath = templateName;
|
||||
input = new FileInputStream(f);
|
||||
} catch (FileNotFoundException ex) {
|
||||
Logger.getLogger(ReportGenerator.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
} else {
|
||||
templatePath = "templates/" + templateName + ".vsl";
|
||||
input = this.getClass().getClassLoader().getResourceAsStream(templatePath);
|
||||
}
|
||||
if (input == null) {
|
||||
throw new IOException("Template file doesn't exist");
|
||||
}
|
||||
|
||||
InputStreamReader reader = new InputStreamReader(input);
|
||||
BufferedWriter writer = null;
|
||||
|
||||
//VelocityContext context = new VelocityContext();
|
||||
|
||||
//load the data into the context
|
||||
if (properties != null) {
|
||||
for (Map.Entry<String, Object> property : properties.entrySet()) {
|
||||
context.put(property.getKey(), property.getValue());
|
||||
}
|
||||
}
|
||||
InputStreamReader reader = new InputStreamReader(input, "UTF-8");
|
||||
OutputStreamWriter writer = null;
|
||||
OutputStream outputStream = null;
|
||||
|
||||
try {
|
||||
writer = new BufferedWriter(new FileWriter(new File(outFileName)));
|
||||
outputStream = new FileOutputStream(outFileName);
|
||||
writer = new OutputStreamWriter(outputStream, "UTF-8");
|
||||
//writer = new BufferedWriter(oswriter);
|
||||
|
||||
if (!ve.evaluate(context, writer, templatePath, reader)) {
|
||||
if (!engine.evaluate(context, writer, templatePath, reader)) {
|
||||
throw new Exception("Failed to convert the template into html.");
|
||||
}
|
||||
writer.flush();
|
||||
} finally {
|
||||
try {
|
||||
writer.close();
|
||||
} catch (Exception ex) {
|
||||
Logger.getLogger(ReportGenerator.class.getName()).log(Level.FINEST, null, ex);
|
||||
if (writer != null) {
|
||||
try {
|
||||
writer.close();
|
||||
} catch (Exception ex) {
|
||||
Logger.getLogger(ReportGenerator.class.getName()).log(Level.FINEST, null, ex);
|
||||
}
|
||||
}
|
||||
if (outputStream != null) {
|
||||
try {
|
||||
outputStream.close();
|
||||
} catch (Exception ex) {
|
||||
Logger.getLogger(ReportGenerator.class.getName()).log(Level.FINEST, null, ex);
|
||||
}
|
||||
}
|
||||
try {
|
||||
reader.close();
|
||||
|
||||
@@ -2,7 +2,6 @@ package org.codesecure.dependencycheck.utils;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.security.MessageDigest;
|
||||
@@ -30,29 +29,30 @@ public class Checksum {
|
||||
* @param algorithm the algorithm to use to calculate the checksum
|
||||
* @param file the file to calculate the checksum for
|
||||
* @return the checksum
|
||||
* @throws FileNotFoundException when the file does not exist
|
||||
* @throws IOException when the file does not exist
|
||||
* @throws NoSuchAlgorithmException when an algorithm is specified that does
|
||||
* not exist
|
||||
*/
|
||||
public static byte[] getChecksum(String algorithm, File file) throws FileNotFoundException, NoSuchAlgorithmException {
|
||||
InputStream fis = new FileInputStream(file);
|
||||
public static byte[] getChecksum(String algorithm, File file) throws NoSuchAlgorithmException, IOException {
|
||||
InputStream fis = null;
|
||||
byte[] buffer = new byte[1024];
|
||||
MessageDigest complete = MessageDigest.getInstance(algorithm);
|
||||
int numRead;
|
||||
try {
|
||||
fis = new FileInputStream(file);
|
||||
do {
|
||||
numRead = fis.read(buffer);
|
||||
if (numRead > 0) {
|
||||
complete.update(buffer, 0, numRead);
|
||||
}
|
||||
} while (numRead != -1);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Checksum.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} finally {
|
||||
try {
|
||||
fis.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Checksum.class.getName()).log(Level.SEVERE, null, ex);
|
||||
if (fis != null) {
|
||||
try {
|
||||
fis.close();
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(Checksum.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
return complete.digest();
|
||||
@@ -63,10 +63,10 @@ public class Checksum {
|
||||
*
|
||||
* @param file the file to generate the MD5 checksum
|
||||
* @return the hex representation of the MD5 hash
|
||||
* @throws FileNotFoundException when the file passed in does not exist
|
||||
* @throws IOException when the file passed in does not exist
|
||||
* @throws NoSuchAlgorithmException when the MD5 algorithm is not available
|
||||
*/
|
||||
public static String getMD5Checksum(File file) throws FileNotFoundException, NoSuchAlgorithmException {
|
||||
public static String getMD5Checksum(File file) throws IOException, NoSuchAlgorithmException {
|
||||
byte[] b = getChecksum("MD5", file);
|
||||
return getHex(b);
|
||||
}
|
||||
@@ -76,10 +76,10 @@ public class Checksum {
|
||||
*
|
||||
* @param file the file to generate the MD5 checksum
|
||||
* @return the hex representation of the SHA1 hash
|
||||
* @throws FileNotFoundException when the file passed in does not exist
|
||||
* @throws IOException when the file passed in does not exist
|
||||
* @throws NoSuchAlgorithmException when the SHA1 algorithm is not available
|
||||
*/
|
||||
public static String getSHA1Checksum(File file) throws FileNotFoundException, NoSuchAlgorithmException {
|
||||
public static String getSHA1Checksum(File file) throws IOException, NoSuchAlgorithmException {
|
||||
byte[] b = getChecksum("SHA1", file);
|
||||
return getHex(b);
|
||||
}
|
||||
|
||||
@@ -86,9 +86,6 @@ public final class CliParser {
|
||||
* SCAN or CPE command line arguments that does not exist.
|
||||
*/
|
||||
private void validateArgs() throws FileNotFoundException, ParseException {
|
||||
if (isLoadCPE()) {
|
||||
validatePathExists(getCpeFile());
|
||||
}
|
||||
if (isRunScan()) {
|
||||
validatePathExists(getScanFiles());
|
||||
if (!line.hasOption(ArgumentName.OUT)) {
|
||||
@@ -171,10 +168,6 @@ public final class CliParser {
|
||||
.withDescription("the path to scan - this option can be specified multiple times.")
|
||||
.create(ArgumentName.SCAN_SHORT);
|
||||
|
||||
Option load = OptionBuilder.withArgName("file").hasArg().withLongOpt(ArgumentName.CPE)
|
||||
.withDescription("load the CPE xml file.")
|
||||
.create(ArgumentName.CPE_SHORT);
|
||||
|
||||
Option props = OptionBuilder.withArgName("file").hasArg().withLongOpt(ArgumentName.PROP)
|
||||
.withDescription("a property file to load.")
|
||||
.create(ArgumentName.PROP_SHORT);
|
||||
@@ -187,7 +180,6 @@ public final class CliParser {
|
||||
|
||||
OptionGroup og = new OptionGroup();
|
||||
og.addOption(path);
|
||||
og.addOption(load);
|
||||
|
||||
Options opts = new Options();
|
||||
opts.addOptionGroup(og);
|
||||
@@ -219,15 +211,6 @@ public final class CliParser {
|
||||
return (line != null) ? line.hasOption(ArgumentName.HELP) : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the 'cpe' command line argument was passed in.
|
||||
*
|
||||
* @return whether or not the 'cpe' command line argument was passed in
|
||||
*/
|
||||
public boolean isLoadCPE() {
|
||||
return (line != null) ? isValid && line.hasOption(ArgumentName.CPE) : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the 'scan' command line argument was passed in.
|
||||
*
|
||||
@@ -267,16 +250,6 @@ public final class CliParser {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the file command line parameter(s) specified for the 'cpe'
|
||||
* argument.
|
||||
*
|
||||
* @return the file paths specified on the command line
|
||||
*/
|
||||
public String getCpeFile() {
|
||||
return line.getOptionValue(ArgumentName.CPE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the file command line parameter(s) specified for the 'scan'
|
||||
* argument.
|
||||
@@ -326,7 +299,7 @@ public final class CliParser {
|
||||
* @return if auto-update is allowed.
|
||||
*/
|
||||
public boolean isAutoUpdate() {
|
||||
return (line != null) ? !line.hasOption(ArgumentName.DISABLE_AUTO_UPDATE) : false;
|
||||
return (line != null) ? !line.hasOption(ArgumentName.DISABLE_AUTO_UPDATE) : true;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -343,16 +316,6 @@ public final class CliParser {
|
||||
* The short CLI argument name specifing the directory/file to scan
|
||||
*/
|
||||
public static final String SCAN_SHORT = "s";
|
||||
/**
|
||||
* The long CLI argument name specifing the path to the CPE.XML file to
|
||||
* import
|
||||
*/
|
||||
public static final String CPE = "cpe";
|
||||
/**
|
||||
* The short CLI argument name specifing the path to the CPE.XML file to
|
||||
* import
|
||||
*/
|
||||
public static final String CPE_SHORT = "c";
|
||||
/**
|
||||
* The long CLI argument name specifing that the CPE/CVE/etc. data
|
||||
* should not be automatically updated.
|
||||
|
||||
@@ -128,8 +128,8 @@ public class Downloader {
|
||||
String encoding = conn.getContentEncoding();
|
||||
|
||||
BufferedOutputStream writer = null;
|
||||
InputStream reader = null;
|
||||
try {
|
||||
InputStream reader;
|
||||
if (unzip || (encoding != null && "gzip".equalsIgnoreCase(encoding))) {
|
||||
reader = new GZIPInputStream(conn.getInputStream());
|
||||
} else if (encoding != null && "deflate".equalsIgnoreCase(encoding)) {
|
||||
@@ -147,6 +147,7 @@ public class Downloader {
|
||||
} catch (Exception ex) {
|
||||
throw new DownloadFailedException("Error saving downloaded file.", ex);
|
||||
} finally {
|
||||
if (writer != null) {
|
||||
try {
|
||||
writer.close();
|
||||
writer = null;
|
||||
@@ -154,6 +155,17 @@ public class Downloader {
|
||||
Logger.getLogger(Downloader.class.getName()).log(Level.FINEST,
|
||||
"Error closing the writter in Downloader.", ex);
|
||||
}
|
||||
}
|
||||
if (reader != null) {
|
||||
try {
|
||||
reader.close();
|
||||
reader = null;
|
||||
} catch (Exception ex) {
|
||||
|
||||
Logger.getLogger(Downloader.class.getName()).log(Level.FINEST,
|
||||
"Error closing the reader in Downloader.", ex);
|
||||
}
|
||||
}
|
||||
try {
|
||||
conn.disconnect();
|
||||
} finally {
|
||||
|
||||
@@ -18,6 +18,10 @@ package org.codesecure.dependencycheck.utils;
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A collection of utilities for processing information about files.
|
||||
*
|
||||
@@ -45,4 +49,22 @@ public class FileUtils {
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a file. If the File is a directory it will recursively delete
|
||||
* the contents.
|
||||
*
|
||||
* @param file the File to delete
|
||||
* @throws IOException is thrown if the file could not be deleted
|
||||
*/
|
||||
public static void delete(File file) throws IOException {
|
||||
if (file.isDirectory()) {
|
||||
for (File c : file.listFiles()) {
|
||||
delete(c);
|
||||
}
|
||||
}
|
||||
if (!file.delete()) {
|
||||
throw new FileNotFoundException("Failed to delete file: " + file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,14 @@ public class Settings {
|
||||
/**
|
||||
* The collection of keys used within the properties file.
|
||||
*/
|
||||
public abstract class KEYS {
|
||||
public static class KEYS {
|
||||
|
||||
/**
|
||||
* private contructor because this is a "utility" class containing constants
|
||||
*/
|
||||
private KEYS() {
|
||||
//do nothing
|
||||
}
|
||||
|
||||
/**
|
||||
* The properties key for the path where the CPE Lucene Index will be
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
org.codesecure.dependencycheck.data.nvdcve.Index
|
||||
org.codesecure.dependencycheck.data.cpe.Index
|
||||
org.codesecure.dependencycheck.data.nvdcve.xml.IndexUpdater
|
||||
@@ -1,4 +1,5 @@
|
||||
handlers=java.util.logging.ConsoleHandler, java.util.logging.FileHandler
|
||||
handlers=java.util.logging.ConsoleHandler
|
||||
#, java.util.logging.FileHandler
|
||||
|
||||
# logging levels
|
||||
# FINEST, FINER, FINE, CONFIG, INFO, WARNING and SEVERE.
|
||||
@@ -9,8 +10,8 @@ java.util.logging.ConsoleHandler.level=WARNING
|
||||
org.codesecure.dependencycheck.data.nvdcve.xml
|
||||
|
||||
# Configure the FileHandler.
|
||||
java.util.logging.FileHandler.formatter=java.util.logging.SimpleFormatter
|
||||
java.util.logging.FileHandler.level=FINEST
|
||||
#java.util.logging.FileHandler.formatter=java.util.logging.SimpleFormatter
|
||||
#java.util.logging.FileHandler.level=FINEST
|
||||
|
||||
# The following special tokens can be used in the pattern property
|
||||
# which specifies the location and name of the log file.
|
||||
@@ -20,4 +21,4 @@ java.util.logging.FileHandler.level=FINEST
|
||||
# %g - generation number for rotating logs
|
||||
# %u - unique number to avoid conflicts
|
||||
# FileHandler writes to %h/demo0.log by default.
|
||||
java.util.logging.FileHandler.pattern=./logs/DependencyCheck%u.log
|
||||
#java.util.logging.FileHandler.pattern=./logs/DependencyCheck%u.log
|
||||
@@ -34,7 +34,7 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
</script>
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
$(".expandablesubsection").click(function (e) {
|
||||
$(".expandable").click(function (e) {
|
||||
e = e || window.event;
|
||||
var h = e.target || e.srcElement;
|
||||
var content = "#content" + h.id.substr(6);
|
||||
@@ -64,7 +64,7 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.exandable {}
|
||||
.expandablesubsection {
|
||||
cursor: pointer;
|
||||
/*background-image: url(img/plus.gif);*/
|
||||
@@ -285,8 +285,12 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
<div class="sectioncontent">Report Generated On: $date<br/><br/>
|
||||
Dependencies Scanned: $dependencies.size()<br/><br/>
|
||||
<div class="indent">
|
||||
#foreach($dependency in $dependencies)
|
||||
<a href="#$esc.html($esc.url($dependency.FilePath))">$esc.html($dependency.FileName)</a><br/>
|
||||
#foreach($dependency in $dependencies)
|
||||
#if($dependency.getVulnerabilities().size()>0)
|
||||
<a href="#$esc.html($esc.url($dependency.FilePath))">$esc.html($dependency.FileName)</a> <b style="color:#ff0000;">•</b><br/>
|
||||
#else
|
||||
<a href="#$esc.html($esc.url($dependency.FilePath))">$esc.html($dependency.FileName)</a><br/>
|
||||
#end
|
||||
#end
|
||||
</div>
|
||||
<h2>Dependencies</h2>
|
||||
@@ -308,7 +312,7 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
|
||||
#if ( $dependency.analysisExceptions.size() != 0 )
|
||||
#set($cnt=$cnt+1)
|
||||
<h4 id="header$cnt" class="subsectionheader expandablesubsection red">Exceptions Occured During Analysis</h4>
|
||||
<h4 id="header$cnt" class="subsectionheader expandable expandablesubsection red">Exceptions Occured During Analysis</h4>
|
||||
<div id="content$cnt" class="subsectioncontent standardsubsection hidden">
|
||||
<ul>
|
||||
#foreach($ex in $dependency.analysisExceptions)
|
||||
@@ -336,7 +340,7 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
</div>
|
||||
#end
|
||||
#set($cnt=$cnt+1)
|
||||
<h4 id="header$cnt" class="subsectionheader expandablesubsection white">Evidence</h4>
|
||||
<h4 id="header$cnt" class="subsectionheader expandable expandablesubsection white">Evidence</h4>
|
||||
<div id="content$cnt" class="subsectioncontent standardsubsection hidden">
|
||||
<table class="lined fullwidth" border="0">
|
||||
<tr><th class="left" style="width:10%;">Source</th><th class="left" style="width:20%;">Name</th><th class="left" style="width:70%;">Value</th></tr>
|
||||
@@ -366,7 +370,7 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
<ul>
|
||||
#foreach($id in $dependency.getIdentifiers())
|
||||
##yes, we are HTML Encoding the href. this is okay. We can't URL encode as we have to trust the analyzer here...
|
||||
<li><b>$esc.html($id.type):</b> $esc.html($id.title) : <a href="$esc.html($id.url)" target="_blank">$esc.html($id.value)</a>
|
||||
<li><b>$esc.html($id.type):</b> <a href="$esc.html($id.url)" target="_blank">$esc.html($id.value)</a>
|
||||
#if( $id.descrription )
|
||||
<br/>$esc.html($id.description)
|
||||
#end
|
||||
@@ -377,7 +381,7 @@ Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
</div>
|
||||
#if($dependency.getVulnerabilities().size()>0)
|
||||
#set($cnt=$cnt+1)
|
||||
<h4 id="header$cnt" class="subsectionheader white">Published Vulnerabilities</h4>
|
||||
<h4 id="header$cnt" class="subsectionheader expandable collaspablesubsection white">Published Vulnerabilities</h4>
|
||||
<div id="content$cnt" class="subsectioncontent standardsubsection">
|
||||
#foreach($vuln in $dependency.getVulnerabilities())
|
||||
<p><b><a target="_blank" href="http://web.nvd.nist.gov/view/vuln/detail?vulnId=$esc.url($vuln.name)">$esc.html($vuln.name)</a></b></p>
|
||||
|
||||
@@ -6,4 +6,4 @@
|
||||
<body>
|
||||
<menu ref="reports" />
|
||||
</body>
|
||||
</project>
|
||||
</project>
|
||||
@@ -51,7 +51,8 @@ public class EngineIntegrationTest {
|
||||
instance.scan(path);
|
||||
assertTrue(instance.getDependencies().size() > 0);
|
||||
instance.analyzeDependencies();
|
||||
ReportGenerator rg = new ReportGenerator();
|
||||
rg.generateReports("./target/", "DependencyCheck", instance.getDependencies());
|
||||
ReportGenerator rg = new ReportGenerator("DependencyCheck",
|
||||
instance.getDependencies(), instance.getAnalyzers());
|
||||
rg.generateReports("./target/");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,7 +63,8 @@ public class JarAnalyzerTest {
|
||||
instance.analyze(result);
|
||||
boolean found = false;
|
||||
for (Evidence e : result.getProductEvidence()) {
|
||||
if (e.getName().equals("package-title") && e.getValue().equals("org.mortbay.http")) {
|
||||
if (e.getName().equalsIgnoreCase("package-title")
|
||||
&& e.getValue().equalsIgnoreCase("org.mortbay.http")) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
@@ -72,7 +73,8 @@ public class JarAnalyzerTest {
|
||||
|
||||
found = false;
|
||||
for (Evidence e : result.getVendorEvidence()) {
|
||||
if (e.getName().equals("implementation-url") && e.getValue().equals("http://jetty.mortbay.org")) {
|
||||
if (e.getName().equalsIgnoreCase("implementation-url")
|
||||
&& e.getValue().equalsIgnoreCase("http://jetty.mortbay.org")) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
@@ -81,7 +83,8 @@ public class JarAnalyzerTest {
|
||||
|
||||
found = false;
|
||||
for (Evidence e : result.getVersionEvidence()) {
|
||||
if (e.getName().equals("Implementation-Version") && e.getValue().equals("4.2.27")) {
|
||||
if (e.getName().equalsIgnoreCase("Implementation-Version")
|
||||
&& e.getValue().equalsIgnoreCase("4.2.27")) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.queryParser.ParseException;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.codesecure.dependencycheck.dependency.Dependency;
|
||||
import org.codesecure.dependencycheck.analyzer.JarAnalyzer;
|
||||
import org.junit.Test;
|
||||
@@ -98,50 +98,29 @@ public class CPEAnalyzerTest extends BaseIndexTestCase {
|
||||
JarAnalyzer jarAnalyzer = new JarAnalyzer();
|
||||
Dependency depends = new Dependency(file);
|
||||
jarAnalyzer.analyze(depends);
|
||||
|
||||
|
||||
File fileSpring = new File(this.getClass().getClassLoader().getResource("spring-core-2.5.5.jar").getPath());
|
||||
Dependency spring = new Dependency(fileSpring);
|
||||
jarAnalyzer.analyze(spring);
|
||||
|
||||
CPEAnalyzer instance = new CPEAnalyzer();
|
||||
instance.open();
|
||||
String expResult = "cpe:/a:apache:struts:2.1.2";
|
||||
instance.determineCPE(depends);
|
||||
instance.determineCPE(spring);
|
||||
instance.close();
|
||||
assertTrue("Incorrect match", depends.getIdentifiers().size() == 1);
|
||||
assertTrue("Incorrect match", depends.getIdentifiers().get(0).getValue().equals(expResult));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of searchCPE method, of class CPEAnalyzer.
|
||||
* @throws Exception is thrown when an exception occurs
|
||||
*/
|
||||
@Test
|
||||
public void testSearchCPE_3args() throws Exception {
|
||||
System.out.println("searchCPE - 3 args");
|
||||
String vendor = "apache software foundation";
|
||||
String product = "struts 2 core";
|
||||
String version = "2.1.2";
|
||||
CPEAnalyzer instance = new CPEAnalyzer();
|
||||
instance.open();
|
||||
String expResult = "cpe:/a:apache:struts:2.1.2";
|
||||
List<Entry> result = instance.searchCPE(vendor, product, version);
|
||||
assertEquals(expResult, result.get(0).getName());
|
||||
|
||||
vendor = "apache software foundation";
|
||||
product = "struts 2 core";
|
||||
version = "2.3.1.2";
|
||||
|
||||
expResult = "cpe:/a:apache:struts:2.3.1.2";
|
||||
result = instance.searchCPE(vendor, product, version);
|
||||
assertEquals(expResult, result.get(0).getName());
|
||||
|
||||
instance.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of searchCPE method, of class CPEAnalyzer.
|
||||
* @throws Exception is thrown when an exception occurs
|
||||
*/
|
||||
@Test
|
||||
public void testSearchCPE_5args() throws Exception {
|
||||
System.out.println("searchCPE - 5 args");
|
||||
public void testSearchCPE() throws Exception {
|
||||
System.out.println("searchCPE");
|
||||
String vendor = "apache software foundation";
|
||||
String product = "struts 2 core";
|
||||
String version = "2.1.2";
|
||||
|
||||
@@ -35,7 +35,7 @@ public class EntryTest extends TestCase {
|
||||
String name = "cpe:/a:apache:struts:1.1:rc2";
|
||||
|
||||
Entry instance = new Entry();
|
||||
instance.setName(name);
|
||||
instance.parseName(name);
|
||||
|
||||
assertEquals(name,instance.getName());
|
||||
assertEquals("apache", instance.getVendor());
|
||||
|
||||
@@ -46,8 +46,9 @@ public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
@Test
|
||||
public void testUpdate() throws Exception {
|
||||
System.out.println("update");
|
||||
Index instance = new Index();
|
||||
instance.update();
|
||||
//deprecated
|
||||
//Index instance = new Index();
|
||||
//instance.update();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -56,8 +57,9 @@ public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
@Test
|
||||
public void testUpdateNeeded() throws Exception {
|
||||
System.out.println("updateNeeded");
|
||||
Index instance = new Index();
|
||||
instance.updateNeeded();
|
||||
//deprecated
|
||||
//Index instance = new Index();
|
||||
//instance.updateNeeded();
|
||||
//if an exception is thrown this test fails. However, because it depends on the
|
||||
// order of the tests what this will return I am just testing for the exception.
|
||||
//assertTrue(expResult < result);
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* To change this template, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package org.codesecure.dependencycheck.data.cpe.xml;
|
||||
|
||||
import java.io.File;
|
||||
import junit.framework.TestCase;
|
||||
import org.codesecure.dependencycheck.data.cpe.xml.Importer;
|
||||
import org.xml.sax.Attributes;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author jeremy
|
||||
*/
|
||||
public class ImporterTest extends TestCase {
|
||||
|
||||
public ImporterTest(String testName) {
|
||||
super(testName);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Test of all methods within class CPEHandler.
|
||||
* @throws Exception is thrown when an excretion occurs.
|
||||
*/
|
||||
public void testHandler() throws Exception {
|
||||
System.out.println("importXML");
|
||||
|
||||
File path = new File(this.getClass().getClassLoader().getResource("official-cpe-dictionary_v2.2.xml").getPath());
|
||||
|
||||
Importer.importXML(path.getCanonicalPath());
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
package org.codesecure.dependencycheck.data.lucene;
|
||||
/*
|
||||
* This file is part of DependencyCheck.
|
||||
*
|
||||
* DependencyCheck is free software: you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation, either version 3 of the License, or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* DependencyCheck is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* DependencyCheck. If not, see http://www.gnu.org/licenses/.
|
||||
*
|
||||
* Copyright (c) 2012 Jeremy Long. All Rights Reserved.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopScoreDocCollector;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.Query;
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Jeremy Long (jeremy.long@gmail.com)
|
||||
*/
|
||||
public class FieldAnalyzerTest {
|
||||
|
||||
public FieldAnalyzerTest() {
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void setUpClass() throws Exception {
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownClass() throws Exception {
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAnalyzers() throws Exception {
|
||||
|
||||
Analyzer analyzer = new FieldAnalyzer(Version.LUCENE_40);
|
||||
Directory index = new RAMDirectory();
|
||||
|
||||
String field1 = "product";
|
||||
String text1 = "springframework";
|
||||
|
||||
String field2 = "vendor";
|
||||
String text2 = "springsource";
|
||||
|
||||
createIndex(analyzer, index, field1, text1, field2, text2);
|
||||
|
||||
//Analyzer searchingAnalyzer = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
String querystr = "product:\"(Spring Framework Core)\" vendor:(SpringSource)";
|
||||
|
||||
SearchFieldAnalyzer searchAnalyzerProduct = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
SearchFieldAnalyzer searchAnalyzerVendor = new SearchFieldAnalyzer(Version.LUCENE_40);
|
||||
HashMap<String,Analyzer> map = new HashMap<String,Analyzer>();
|
||||
map.put(field1, searchAnalyzerProduct);
|
||||
map.put(field2, searchAnalyzerVendor);
|
||||
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(Version.LUCENE_40), map);
|
||||
QueryParser parser = new QueryParser(Version.LUCENE_40, field1, wrapper);
|
||||
|
||||
Query q = parser.parse(querystr);
|
||||
//System.out.println(q.toString());
|
||||
|
||||
int hitsPerPage = 10;
|
||||
|
||||
IndexReader reader = DirectoryReader.open(index);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
|
||||
searcher.search(q, collector);
|
||||
ScoreDoc[] hits = collector.topDocs().scoreDocs;
|
||||
|
||||
assertEquals("Did not find 1 document?", 1, hits.length);
|
||||
|
||||
searchAnalyzerProduct.clear(); //ensure we don't have anything left over from the previuos search.
|
||||
searchAnalyzerVendor.clear();
|
||||
querystr = "product:(Apache Struts) vendor:(Apache)";
|
||||
Query q2 = parser.parse(querystr);
|
||||
//System.out.println(q2.toString());
|
||||
assertFalse("second parsing contains previousWord from the TokenPairConcatenatingFilter", q2.toString().contains("core"));
|
||||
}
|
||||
|
||||
private void createIndex(Analyzer analyzer, Directory index, String field1, String text1, String field2, String text2) throws IOException {
|
||||
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_40, analyzer);
|
||||
IndexWriter w = new IndexWriter(index, config);
|
||||
addDoc(w, field1, text1, field2, text2);
|
||||
w.close();
|
||||
}
|
||||
|
||||
private static void addDoc(IndexWriter w, String field1, String text1, String field2, String text2) throws IOException {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField(field1, text1, Field.Store.YES));
|
||||
doc.add(new TextField(field2, text2, Field.Store.YES));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
}
|
||||
@@ -2,19 +2,20 @@
|
||||
* To change this template, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package org.codesecure.dependencycheck.data.nvdcve;
|
||||
package org.codesecure.dependencycheck.data.nvdcve.xml;
|
||||
|
||||
|
||||
import java.util.Map;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.BaseIndexTestCase;
|
||||
import org.junit.*;
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
* @author Jeremy
|
||||
*/
|
||||
public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
public class IndexUpdaterIntegrationTest extends BaseIndexTestCase {
|
||||
|
||||
public IndexIntegrationTest(String testName) {
|
||||
public IndexUpdaterIntegrationTest(String testName) {
|
||||
super(testName);
|
||||
}
|
||||
|
||||
@@ -40,8 +41,8 @@ public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
@Test
|
||||
public void testRetrieveCurrentTimestampFromWeb() throws Exception {
|
||||
System.out.println("retrieveCurrentTimestampFromWeb");
|
||||
Index instance = new Index();
|
||||
Map<String, Index.NvdCveUrl> result = instance.retrieveCurrentTimestampsFromWeb();
|
||||
IndexUpdater instance = new IndexUpdater();
|
||||
Map<String, IndexUpdater.NvdCveUrl> result = instance.retrieveCurrentTimestampsFromWeb();
|
||||
assertEquals(12, result.size());
|
||||
}
|
||||
|
||||
@@ -51,7 +52,7 @@ public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
@Test
|
||||
public void testUpdate() throws Exception {
|
||||
System.out.println("update");
|
||||
Index instance = new Index();
|
||||
IndexUpdater instance = new IndexUpdater();
|
||||
instance.update();
|
||||
}
|
||||
|
||||
@@ -61,7 +62,7 @@ public class IndexIntegrationTest extends BaseIndexTestCase {
|
||||
@Test
|
||||
public void testUpdateNeeded() throws Exception {
|
||||
System.out.println("updateNeeded");
|
||||
Index instance = new Index();
|
||||
IndexUpdater instance = new IndexUpdater();
|
||||
instance.updateNeeded();
|
||||
//if an exception is thrown this test fails. However, because it depends on the
|
||||
// order of the tests what this will return I am just testing for the exception.
|
||||
@@ -9,7 +9,6 @@ import java.io.IOException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.codesecure.dependencycheck.data.nvdcve.InvalidDataException;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
||||
@@ -249,15 +249,13 @@ public class DependencyTest {
|
||||
System.out.println("addIdentifier");
|
||||
String type = "cpe";
|
||||
String value = "cpe:/a:apache:struts:2.1.2";
|
||||
String title = "Apache Struts 2.1.2";
|
||||
String url = "http://somewhere";
|
||||
Dependency instance = new Dependency();
|
||||
instance.addIdentifier(type, value, title, url);
|
||||
instance.addIdentifier(type, value, url);
|
||||
assertEquals(1,instance.getIdentifiers().size());
|
||||
Identifier i = instance.getIdentifiers().get(0);
|
||||
assertEquals(type,i.getType());
|
||||
assertEquals(value, i.getValue());
|
||||
assertEquals(title, i.getTitle());
|
||||
assertEquals(url, i.getUrl());
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
package org.codesecure.dependencycheck.utils;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import junit.framework.TestCase;
|
||||
import org.junit.Test;
|
||||
@@ -66,7 +66,7 @@ public class ChecksumTest extends TestCase {
|
||||
boolean exceptionThrown = false;
|
||||
try {
|
||||
byte[] result = Checksum.getChecksum(algorithm, file);
|
||||
} catch (FileNotFoundException ex) {
|
||||
} catch (IOException ex) {
|
||||
exceptionThrown = true;
|
||||
}
|
||||
assertTrue(exceptionThrown);
|
||||
|
||||
@@ -53,7 +53,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -73,7 +72,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertTrue(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -91,31 +89,6 @@ public class CliParserTest extends TestCase {
|
||||
assertTrue(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of parse method with jar and cpe args, of class CliParser.
|
||||
* @throws Exception thrown when an excpetion occurs.
|
||||
*/
|
||||
@Test
|
||||
public void testParse_scan_cpe() throws Exception {
|
||||
System.out.println("parse -cpe file -scan file");
|
||||
|
||||
String[] args = {"-scan", "file", "-cpe", "file"};
|
||||
|
||||
CliParser instance = new CliParser();
|
||||
try {
|
||||
instance.parse(args);
|
||||
} catch (ParseException ex) {
|
||||
assertTrue(ex.getMessage().contains("an option from this group has already been selected"));
|
||||
}
|
||||
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
@@ -146,7 +119,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -170,8 +142,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -194,7 +164,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -215,78 +184,6 @@ public class CliParserTest extends TestCase {
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertTrue(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of parse method with cpe arg, of class CliParser.
|
||||
* @throws Exception thrown when an excpetion occurs.
|
||||
*/
|
||||
@Test
|
||||
public void testParse_cpe() throws Exception {
|
||||
System.out.println("parse -cpe");
|
||||
|
||||
String[] args = {"-cpe"};
|
||||
|
||||
CliParser instance = new CliParser();
|
||||
|
||||
try {
|
||||
instance.parse(args);
|
||||
} catch (ParseException ex) {
|
||||
assertTrue(ex.getMessage().contains("Missing argument"));
|
||||
}
|
||||
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of parse method with jar arg, of class CliParser.
|
||||
* @throws Exception thrown when an excpetion occurs.
|
||||
*/
|
||||
@Test
|
||||
public void testParse_cpe_unknownFile() throws Exception {
|
||||
System.out.println("parse -cpe cpe.that.does.not.exist");
|
||||
|
||||
String[] args = {"-cpe", "cpe.that.does.not.exist"};
|
||||
|
||||
CliParser instance = new CliParser();
|
||||
try {
|
||||
instance.parse(args);
|
||||
} catch (FileNotFoundException ex) {
|
||||
assertTrue(ex.getMessage().contains("Invalid file argument"));
|
||||
}
|
||||
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertFalse(instance.isLoadCPE());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of parse method with jar arg, of class CliParser.
|
||||
* @throws Exception thrown when an excpetion occurs.
|
||||
*/
|
||||
@Test
|
||||
public void testParse_cpe_withFileExists() throws Exception {
|
||||
System.out.println("parse -cpe checkSumTest.file");
|
||||
File path = new File(this.getClass().getClassLoader().getResource("checkSumTest.file").getPath());
|
||||
String[] args = {"-cpe", path.getCanonicalPath()};
|
||||
|
||||
CliParser instance = new CliParser();
|
||||
instance.parse(args);
|
||||
|
||||
assertEquals(path.getCanonicalPath(), instance.getCpeFile());
|
||||
|
||||
assertFalse(instance.isGetVersion());
|
||||
assertFalse(instance.isGetHelp());
|
||||
assertFalse(instance.isRunScan());
|
||||
assertTrue(instance.isLoadCPE());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -342,7 +239,7 @@ public class CliParserTest extends TestCase {
|
||||
String text = (new String(baos.toByteArray()));
|
||||
String[] lines = text.split(System.getProperty("line.separator"));
|
||||
assertTrue(lines[0].startsWith("usage: "));
|
||||
assertTrue((lines.length>2));
|
||||
assertTrue((lines.length > 2));
|
||||
} catch (IOException ex) {
|
||||
System.setOut(out);
|
||||
fail("CliParser.printVersionInfo did not write anything to system.out.");
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
BIN
src/test/resources/spring-core-2.5.5.jar
Normal file
BIN
src/test/resources/spring-core-2.5.5.jar
Normal file
Binary file not shown.
BIN
src/test/resources/struts.jar
Normal file
BIN
src/test/resources/struts.jar
Normal file
Binary file not shown.
Reference in New Issue
Block a user