- * If either the vendorWeightings or productWeightings lists have been populated this data is used to add weighting
- * factors to the search.
+ * If either the vendorWeightings or productWeightings lists have been populated this data is used to add weighting factors to
+ * the search.
*
* @param vendor the text used to search the vendor field
* @param product the text used to search the product field
* @param vendorWeightings a list of strings to use to add weighting factors to the vendor field
* @param productWeightings Adds a list of strings that will be used to add weighting factors to the product search
* @return a list of possible CPE values
- * @throws CorruptIndexException when the Lucene index is corrupt
- * @throws IOException when the Lucene index is not found
- * @throws ParseException when the generated query is not valid
*/
protected List searchCPE(String vendor, String product,
- Set vendorWeightings, Set productWeightings)
- throws CorruptIndexException, IOException, ParseException {
- final ArrayList ret = new ArrayList(MAX_QUERY_RESULTS);
+ Set vendorWeightings, Set productWeightings) {
+
+ final List ret = new ArrayList(MAX_QUERY_RESULTS);
final String searchString = buildSearch(vendor, product, vendorWeightings, productWeightings);
if (searchString == null) {
return ret;
}
-
- final TopDocs docs = cpe.search(searchString, MAX_QUERY_RESULTS);
- for (ScoreDoc d : docs.scoreDocs) {
- if (d.score >= 0.08) {
- final Document doc = cpe.getDocument(d.doc);
- final IndexEntry entry = new IndexEntry();
- entry.setVendor(doc.get(Fields.VENDOR));
- entry.setProduct(doc.get(Fields.PRODUCT));
-// if (d.score < 0.08) {
-// System.out.print(entry.getVendor());
-// System.out.print(":");
-// System.out.print(entry.getProduct());
-// System.out.print(":");
-// System.out.println(d.score);
-// }
- entry.setSearchScore(d.score);
- if (!ret.contains(entry)) {
- ret.add(entry);
+ try {
+ final TopDocs docs = cpe.search(searchString, MAX_QUERY_RESULTS);
+ for (ScoreDoc d : docs.scoreDocs) {
+ if (d.score >= 0.08) {
+ final Document doc = cpe.getDocument(d.doc);
+ final IndexEntry entry = new IndexEntry();
+ entry.setVendor(doc.get(Fields.VENDOR));
+ entry.setProduct(doc.get(Fields.PRODUCT));
+ entry.setSearchScore(d.score);
+ if (!ret.contains(entry)) {
+ ret.add(entry);
+ }
}
}
+ return ret;
+ } catch (ParseException ex) {
+ final String msg = String.format("Unable to parse: %s", searchString);
+ LOGGER.log(Level.WARNING, "An error occured querying the CPE data. See the log for more details.");
+ LOGGER.log(Level.INFO, msg, ex);
+ } catch (IOException ex) {
+ final String msg = String.format("IO Error with search string: %s", searchString);
+ LOGGER.log(Level.WARNING, "An error occured reading CPE data. See the log for more details.");
+ LOGGER.log(Level.INFO, msg, ex);
}
- return ret;
+ return null;
}
/**
@@ -292,8 +293,8 @@ public class CPEAnalyzer implements Analyzer {
* Builds a Lucene search string by properly escaping data and constructing a valid search query.
*
*
- * If either the possibleVendor or possibleProducts lists have been populated this data is used to add weighting
- * factors to the search string generated.
+ * If either the possibleVendor or possibleProducts lists have been populated this data is used to add weighting factors to
+ * the search string generated.
*
* @param vendor text to search the vendor field
* @param product text to search the product field
@@ -319,9 +320,8 @@ public class CPEAnalyzer implements Analyzer {
}
/**
- * This method constructs a Lucene query for a given field. The searchText is split into separate words and if the
- * word is within the list of weighted words then an additional weighting is applied to the term as it is appended
- * into the query.
+ * This method constructs a Lucene query for a given field. The searchText is split into separate words and if the word is
+ * within the list of weighted words then an additional weighting is applied to the term as it is appended into the query.
*
* @param sb a StringBuilder that the query text will be appended to.
* @param field the field within the Lucene index that the query is searching.
@@ -392,8 +392,8 @@ public class CPEAnalyzer implements Analyzer {
}
/**
- * Ensures that the CPE Identified matches the dependency. This validates that the product, vendor, and version
- * information for the CPE are contained within the dependencies evidence.
+ * Ensures that the CPE Identified matches the dependency. This validates that the product, vendor, and version information
+ * for the CPE are contained within the dependencies evidence.
*
* @param entry a CPE entry.
* @param dependency the dependency that the CPE entries could be for.
@@ -482,17 +482,19 @@ public class CPEAnalyzer implements Analyzer {
}
/**
- * Retrieves a list of CPE values from the CveDB based on the vendor and product passed in. The list is then
- * validated to find only CPEs that are valid for the given dependency. It is possible that the CPE identified is a
- * best effort "guess" based on the vendor, product, and version information.
+ * Retrieves a list of CPE values from the CveDB based on the vendor and product passed in. The list is then validated to find
+ * only CPEs that are valid for the given dependency. It is possible that the CPE identified is a best effort "guess" based on
+ * the vendor, product, and version information.
*
* @param dependency the Dependency being analyzed
* @param vendor the vendor for the CPE being analyzed
* @param product the product for the CPE being analyzed
+ * @param currentConfidence the current confidence being used during analysis
* @return true if an identifier was added to the dependency; otherwise false
* @throws UnsupportedEncodingException is thrown if UTF-8 is not supported
*/
- private boolean determineIdentifiers(Dependency dependency, String vendor, String product, Confidence currentConfidence) throws UnsupportedEncodingException {
+ protected boolean determineIdentifiers(Dependency dependency, String vendor, String product,
+ Confidence currentConfidence) throws UnsupportedEncodingException {
final Set cpes = cve.getCPEs(vendor, product);
DependencyVersion bestGuess = new DependencyVersion("-");
Confidence bestGuessConf = null;
@@ -590,8 +592,8 @@ public class CPEAnalyzer implements Analyzer {
*/
BEST_GUESS,
/**
- * The entire vendor/product group must be added (without a guess at version) because there is a CVE with a VS
- * that only specifies vendor/product.
+ * The entire vendor/product group must be added (without a guess at version) because there is a CVE with a VS that only
+ * specifies vendor/product.
*/
BROAD_MATCH
}
@@ -739,8 +741,7 @@ public class CPEAnalyzer implements Analyzer {
//
/**
- * Standard implementation of compareTo that compares identifier confidence, evidence confidence, and then the
- * identifier.
+ * Standard implementation of compareTo that compares identifier confidence, evidence confidence, and then the identifier.
*
* @param o the IdentifierMatch to compare to
* @return the natural ordering of IdentifierMatch
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/CentralAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/CentralAnalyzer.java
new file mode 100644
index 000000000..244358fa1
--- /dev/null
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/CentralAnalyzer.java
@@ -0,0 +1,243 @@
+/*
+ * This file is part of dependency-check-core.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Copyright (c) 2014 Jeremy Long. All Rights Reserved.
+ */
+package org.owasp.dependencycheck.analyzer;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URL;
+import java.util.List;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import org.apache.commons.io.FileUtils;
+import org.owasp.dependencycheck.Engine;
+import org.owasp.dependencycheck.analyzer.exception.AnalysisException;
+import org.owasp.dependencycheck.data.central.CentralSearch;
+import org.owasp.dependencycheck.data.nexus.MavenArtifact;
+import org.owasp.dependencycheck.dependency.Confidence;
+import org.owasp.dependencycheck.dependency.Dependency;
+import org.owasp.dependencycheck.dependency.Evidence;
+import org.owasp.dependencycheck.jaxb.pom.PomUtils;
+import org.owasp.dependencycheck.utils.DownloadFailedException;
+import org.owasp.dependencycheck.utils.Downloader;
+import org.owasp.dependencycheck.utils.InvalidSettingException;
+import org.owasp.dependencycheck.utils.Settings;
+
+/**
+ * Analyzer which will attempt to locate a dependency, and the GAV information, by querying Central for the dependency's SHA-1
+ * digest.
+ *
+ * @author colezlaw
+ */
+public class CentralAnalyzer extends AbstractFileTypeAnalyzer {
+
+ /**
+ * The logger.
+ */
+ private static final Logger LOGGER = Logger.getLogger(CentralAnalyzer.class.getName());
+
+ /**
+ * The name of the analyzer.
+ */
+ private static final String ANALYZER_NAME = "Central Analyzer";
+
+ /**
+ * The phase in which this analyzer runs.
+ */
+ private static final AnalysisPhase ANALYSIS_PHASE = AnalysisPhase.INFORMATION_COLLECTION;
+
+ /**
+ * The types of files on which this will work.
+ */
+ private static final Set SUPPORTED_EXTENSIONS = newHashSet("jar");
+
+ /**
+ * The analyzer should be disabled if there are errors, so this is a flag to determine if such an error has occurred.
+ */
+ private boolean errorFlag = false;
+
+ /**
+ * The searcher itself.
+ */
+ private CentralSearch searcher;
+ /**
+ * Utility to read POM files.
+ */
+ private PomUtils pomUtil = new PomUtils();
+ /**
+ * Field indicating if the analyzer is enabled.
+ */
+ private final boolean enabled = checkEnabled();
+
+ /**
+ * Determine whether to enable this analyzer or not.
+ *
+ * @return whether the analyzer should be enabled
+ */
+ @Override
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ /**
+ * Determines if this analyzer is enabled.
+ *
+ * @return true if the analyzer is enabled; otherwise false
+ */
+ private boolean checkEnabled() {
+ boolean retval = false;
+
+ try {
+ if (Settings.getBoolean(Settings.KEYS.ANALYZER_CENTRAL_ENABLED)) {
+ if (!Settings.getBoolean(Settings.KEYS.ANALYZER_NEXUS_ENABLED)
+ || NexusAnalyzer.DEFAULT_URL.equals(Settings.getString(Settings.KEYS.ANALYZER_NEXUS_URL))) {
+ LOGGER.fine("Enabling the Central analyzer");
+ retval = true;
+ } else {
+ LOGGER.info("Nexus analyzer is enabled, disabling the Central Analyzer");
+ }
+ } else {
+ LOGGER.info("Central analyzer disabled");
+ }
+ } catch (InvalidSettingException ise) {
+ LOGGER.warning("Invalid setting. Disabling the Central analyzer");
+ }
+ return retval;
+ }
+
+ /**
+ * Initializes the analyzer once before any analysis is performed.
+ *
+ * @throws Exception if there's an error during initialization
+ */
+ @Override
+ public void initializeFileTypeAnalyzer() throws Exception {
+ LOGGER.fine("Initializing Central analyzer");
+ LOGGER.fine(String.format("Central analyzer enabled: %s", isEnabled()));
+ if (isEnabled()) {
+ final String searchUrl = Settings.getString(Settings.KEYS.ANALYZER_CENTRAL_URL);
+ LOGGER.fine(String.format("Central Analyzer URL: %s", searchUrl));
+ searcher = new CentralSearch(new URL(searchUrl));
+ }
+ }
+
+ /**
+ * Returns the analyzer's name.
+ *
+ * @return the name of the analyzer
+ */
+ @Override
+ public String getName() {
+ return ANALYZER_NAME;
+ }
+
+ /**
+ * Returns the key used in the properties file to to reference the analyzer's enabled property.
+ *
+ * @return the analyzer's enabled property setting key.
+ */
+ @Override
+ protected String getAnalyzerEnabledSettingKey() {
+ return Settings.KEYS.ANALYZER_CENTRAL_ENABLED;
+ }
+
+ /**
+ * Returns the analysis phase under which the analyzer runs.
+ *
+ * @return the phase under which the analyzer runs
+ */
+ @Override
+ public AnalysisPhase getAnalysisPhase() {
+ return ANALYSIS_PHASE;
+ }
+
+ /**
+ * Returns the extensions for which this Analyzer runs.
+ *
+ * @return the extensions for which this Analyzer runs
+ */
+ @Override
+ public Set getSupportedExtensions() {
+ return SUPPORTED_EXTENSIONS;
+ }
+
+ /**
+ * Performs the analysis.
+ *
+ * @param dependency the dependency to analyze
+ * @param engine the engine
+ * @throws AnalysisException when there's an exception during analysis
+ */
+ @Override
+ public void analyzeFileType(Dependency dependency, Engine engine) throws AnalysisException {
+ if (errorFlag || !isEnabled()) {
+ return;
+ }
+
+ try {
+ final List mas = searcher.searchSha1(dependency.getSha1sum());
+ final Confidence confidence = mas.size() > 1 ? Confidence.HIGH : Confidence.HIGHEST;
+ for (MavenArtifact ma : mas) {
+ LOGGER.fine(String.format("Central analyzer found artifact (%s) for dependency (%s)", ma.toString(), dependency.getFileName()));
+ dependency.addAsEvidence("central", ma, confidence);
+ boolean pomAnalyzed = false;
+ for (Evidence e : dependency.getVendorEvidence()) {
+ if ("pom".equals(e.getSource())) {
+ pomAnalyzed = true;
+ break;
+ }
+ }
+ if (!pomAnalyzed && ma.getPomUrl() != null) {
+ File pomFile = null;
+ try {
+ final File baseDir = Settings.getTempDirectory();
+ pomFile = File.createTempFile("pom", ".xml", baseDir);
+ if (!pomFile.delete()) {
+ final String msg = String.format("Unable to fetch pom.xml for %s from Central; "
+ + "this could result in undetected CPE/CVEs.", dependency.getFileName());
+ LOGGER.warning(msg);
+ LOGGER.fine("Unable to delete temp file");
+ }
+ LOGGER.fine(String.format("Downloading %s", ma.getPomUrl()));
+ Downloader.fetchFile(new URL(ma.getPomUrl()), pomFile);
+ pomUtil.analyzePOM(dependency, pomFile);
+
+ } catch (DownloadFailedException ex) {
+ final String msg = String.format("Unable to download pom.xml for %s from Central; "
+ + "this could result in undetected CPE/CVEs.", dependency.getFileName());
+ LOGGER.warning(msg);
+ } finally {
+ if (pomFile != null && !FileUtils.deleteQuietly(pomFile)) {
+ pomFile.deleteOnExit();
+ }
+ }
+ }
+
+ }
+ } catch (IllegalArgumentException iae) {
+ LOGGER.info(String.format("invalid sha1-hash on %s", dependency.getFileName()));
+ } catch (FileNotFoundException fnfe) {
+ LOGGER.fine(String.format("Artifact not found in repository: '%s", dependency.getFileName()));
+ } catch (IOException ioe) {
+ LOGGER.log(Level.FINE, "Could not connect to Central search", ioe);
+ errorFlag = true;
+ }
+ }
+
+}
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/DependencyBundlingAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/DependencyBundlingAnalyzer.java
index 3c8a67471..fe1c653e4 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/DependencyBundlingAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/DependencyBundlingAnalyzer.java
@@ -36,9 +36,9 @@ import org.owasp.dependencycheck.utils.LogUtils;
/**
*
- * This analyzer ensures dependencies that should be grouped together, to remove excess noise from the report, are
- * grouped. An example would be Spring, Spring Beans, Spring MVC, etc. If they are all for the same version and have the
- * same relative path then these should be grouped into a single dependency under the core/main library.
+ * This analyzer ensures dependencies that should be grouped together, to remove excess noise from the report, are grouped. An
+ * example would be Spring, Spring Beans, Spring MVC, etc. If they are all for the same version and have the same relative path
+ * then these should be grouped into a single dependency under the core/main library.
*
* Note, this grouping only works on dependencies with identified CVE entries
*
@@ -55,7 +55,7 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
/**
* A pattern for obtaining the first part of a filename.
*/
- private static final Pattern STARTING_TEXT_PATTERN = Pattern.compile("^[a-zA-Z]*");
+ private static final Pattern STARTING_TEXT_PATTERN = Pattern.compile("^[a-zA-Z0-9]*");
/**
* a flag indicating if this analyzer has run. This analyzer only runs once.
*/
@@ -91,8 +91,8 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
//
/**
- * Analyzes a set of dependencies. If they have been found to have the same base path and the same set of
- * identifiers they are likely related. The related dependencies are bundled into a single reportable item.
+ * Analyzes a set of dependencies. If they have been found to have the same base path and the same set of identifiers they are
+ * likely related. The related dependencies are bundled into a single reportable item.
*
* @param ignore this analyzer ignores the dependency being analyzed
* @param engine the engine that is scanning the dependencies
@@ -107,30 +107,34 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
//for (Dependency nextDependency : engine.getDependencies()) {
while (mainIterator.hasNext()) {
final Dependency dependency = mainIterator.next();
- if (mainIterator.hasNext()) {
+ if (mainIterator.hasNext() && !dependenciesToRemove.contains(dependency)) {
final ListIterator subIterator = engine.getDependencies().listIterator(mainIterator.nextIndex());
while (subIterator.hasNext()) {
final Dependency nextDependency = subIterator.next();
if (hashesMatch(dependency, nextDependency)) {
- if (isCore(dependency, nextDependency)) {
+ if (firstPathIsShortest(dependency.getFilePath(), nextDependency.getFilePath())) {
mergeDependencies(dependency, nextDependency, dependenciesToRemove);
} else {
mergeDependencies(nextDependency, dependency, dependenciesToRemove);
+ break; //since we merged into the next dependency - skip forward to the next in mainIterator
}
} else if (isShadedJar(dependency, nextDependency)) {
if (dependency.getFileName().toLowerCase().endsWith("pom.xml")) {
- dependenciesToRemove.add(dependency);
+ mergeDependencies(nextDependency, dependency, dependenciesToRemove);
+ nextDependency.getRelatedDependencies().remove(dependency);
+ break;
} else {
- dependenciesToRemove.add(nextDependency);
+ mergeDependencies(dependency, nextDependency, dependenciesToRemove);
+ nextDependency.getRelatedDependencies().remove(nextDependency);
}
} else if (cpeIdentifiersMatch(dependency, nextDependency)
&& hasSameBasePath(dependency, nextDependency)
&& fileNameMatch(dependency, nextDependency)) {
-
if (isCore(dependency, nextDependency)) {
mergeDependencies(dependency, nextDependency, dependenciesToRemove);
} else {
mergeDependencies(nextDependency, dependency, dependenciesToRemove);
+ break; //since we merged into the next dependency - skip forward to the next in mainIterator
}
}
}
@@ -138,9 +142,7 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
}
//removing dependencies here as ensuring correctness and avoiding ConcurrentUpdateExceptions
// was difficult because of the inner iterator.
- for (Dependency d : dependenciesToRemove) {
- engine.getDependencies().remove(d);
- }
+ engine.getDependencies().removeAll(dependenciesToRemove);
}
}
@@ -148,10 +150,10 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
* Adds the relatedDependency to the dependency's related dependencies.
*
* @param dependency the main dependency
- * @param relatedDependency a collection of dependencies to be removed from the main analysis loop, this is the
- * source of dependencies to remove
- * @param dependenciesToRemove a collection of dependencies that will be removed from the main analysis loop, this
- * function adds to this collection
+ * @param relatedDependency a collection of dependencies to be removed from the main analysis loop, this is the source of
+ * dependencies to remove
+ * @param dependenciesToRemove a collection of dependencies that will be removed from the main analysis loop, this function
+ * adds to this collection
*/
private void mergeDependencies(final Dependency dependency, final Dependency relatedDependency, final Set dependenciesToRemove) {
dependency.addRelatedDependency(relatedDependency);
@@ -160,12 +162,14 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
dependency.addRelatedDependency(i.next());
i.remove();
}
+ if (dependency.getSha1sum().equals(relatedDependency.getSha1sum())) {
+ dependency.addAllProjectReferences(relatedDependency.getProjectReferences());
+ }
dependenciesToRemove.add(relatedDependency);
}
/**
- * Attempts to trim a maven repo to a common base path. This is typically
- * [drive]\[repo_location]\repository\[path1]\[path2].
+ * Attempts to trim a maven repo to a common base path. This is typically [drive]\[repo_location]\repository\[path1]\[path2].
*
* @param path the path to trim
* @return a string representing the base path.
@@ -201,25 +205,8 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
|| dependency2 == null || dependency2.getFileName() == null) {
return false;
}
- String fileName1 = dependency1.getFileName();
- String fileName2 = dependency2.getFileName();
-
- //update to deal with archive analyzer, the starting name maybe the same
- // as this is incorrectly looking at the starting path
- final File one = new File(fileName1);
- final File two = new File(fileName2);
- final String oneParent = one.getParent();
- final String twoParent = two.getParent();
- if (oneParent != null) {
- if (oneParent.equals(twoParent)) {
- fileName1 = one.getName();
- fileName2 = two.getName();
- } else {
- return false;
- }
- } else if (twoParent != null) {
- return false;
- }
+ final String fileName1 = dependency1.getActualFile().getName();
+ final String fileName2 = dependency2.getActualFile().getName();
//version check
final DependencyVersion version1 = DependencyVersionUtil.parseVersion(fileName1);
@@ -267,9 +254,11 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
}
if (cpeCount1 > 0 && cpeCount1 == cpeCount2) {
for (Identifier i : dependency1.getIdentifiers()) {
- matches |= dependency2.getIdentifiers().contains(i);
- if (!matches) {
- break;
+ if ("cpe".equals(i.getType())) {
+ matches |= dependency2.getIdentifiers().contains(i);
+ if (!matches) {
+ break;
+ }
}
}
}
@@ -318,8 +307,8 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
}
/**
- * This is likely a very broken attempt at determining if the 'left' dependency is the 'core' library in comparison
- * to the 'right' library.
+ * This is likely a very broken attempt at determining if the 'left' dependency is the 'core' library in comparison to the
+ * 'right' library.
*
* @param left the dependency to test
* @param right the dependency to test against
@@ -338,6 +327,10 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
|| !rightName.contains("core") && leftName.contains("core")
|| !rightName.contains("kernel") && leftName.contains("kernel")) {
returnVal = true;
+// } else if (leftName.matches(".*struts2\\-core.*") && rightName.matches(".*xwork\\-core.*")) {
+// returnVal = true;
+// } else if (rightName.matches(".*struts2\\-core.*") && leftName.matches(".*xwork\\-core.*")) {
+// returnVal = false;
} else {
/*
* considered splitting the names up and comparing the components,
@@ -372,13 +365,12 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
}
/**
- * Determines if the jar is shaded and the created pom.xml identified the same CPE as the jar - if so, the pom.xml
- * dependency should be removed.
+ * Determines if the jar is shaded and the created pom.xml identified the same CPE as the jar - if so, the pom.xml dependency
+ * should be removed.
*
* @param dependency a dependency to check
* @param nextDependency another dependency to check
- * @return true if on of the dependencies is a pom.xml and the identifiers between the two collections match;
- * otherwise false
+ * @return true if on of the dependencies is a pom.xml and the identifiers between the two collections match; otherwise false
*/
private boolean isShadedJar(Dependency dependency, Dependency nextDependency) {
final String mainName = dependency.getFileName().toLowerCase();
@@ -390,4 +382,43 @@ public class DependencyBundlingAnalyzer extends AbstractAnalyzer implements Anal
}
return false;
}
+
+ /**
+ * Determines which path is shortest; if path lengths are equal then we use compareTo of the string method to determine if the
+ * first path is smaller.
+ *
+ * @param left the first path to compare
+ * @param right the second path to compare
+ * @return true if the leftPath is the shortest; otherwise false
+ */
+ protected boolean firstPathIsShortest(String left, String right) {
+ final String leftPath = left.replace('\\', '/');
+ final String rightPath = right.replace('\\', '/');
+
+ final int leftCount = countChar(leftPath, '/');
+ final int rightCount = countChar(rightPath, '/');
+ if (leftCount == rightCount) {
+ return leftPath.compareTo(rightPath) <= 0;
+ } else {
+ return leftCount < rightCount;
+ }
+ }
+
+ /**
+ * Counts the number of times the character is present in the string.
+ *
+ * @param string the string to count the characters in
+ * @param c the character to count
+ * @return the number of times the character is present in the string
+ */
+ private int countChar(String string, char c) {
+ int count = 0;
+ final int max = string.length();
+ for (int i = 0; i < max; i++) {
+ if (c == string.charAt(i)) {
+ count++;
+ }
+ }
+ return count;
+ }
}
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FalsePositiveAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FalsePositiveAnalyzer.java
index 3eb5d46c3..725e32ede 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FalsePositiveAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FalsePositiveAnalyzer.java
@@ -93,12 +93,17 @@ public class FalsePositiveAnalyzer extends AbstractAnalyzer {
addFalseNegativeCPEs(dependency);
}
+ /**
+ * Removes inaccurate matches on springframework CPEs.
+ *
+ * @param dependency the dependency to test for and remove known inaccurate CPE matches
+ */
private void removeBadSpringMatches(Dependency dependency) {
String mustContain = null;
for (Identifier i : dependency.getIdentifiers()) {
if ("maven".contains(i.getType())) {
if (i.getValue() != null && i.getValue().startsWith("org.springframework.")) {
- int endPoint = i.getValue().indexOf(":", 19);
+ final int endPoint = i.getValue().indexOf(":", 19);
if (endPoint >= 0) {
mustContain = i.getValue().substring(19, endPoint).toLowerCase();
break;
@@ -107,9 +112,9 @@ public class FalsePositiveAnalyzer extends AbstractAnalyzer {
}
}
if (mustContain != null) {
- Iterator itr = dependency.getIdentifiers().iterator();
+ final Iterator itr = dependency.getIdentifiers().iterator();
while (itr.hasNext()) {
- Identifier i = itr.next();
+ final Identifier i = itr.next();
if ("cpe".contains(i.getType())
&& i.getValue() != null
&& i.getValue().startsWith("cpe:/a:springsource:")
@@ -117,7 +122,6 @@ public class FalsePositiveAnalyzer extends AbstractAnalyzer {
itr.remove();
//dependency.getIdentifiers().remove(i);
}
-
}
}
}
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FileNameAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FileNameAnalyzer.java
index e4bc0a6b1..8910f704d 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FileNameAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FileNameAnalyzer.java
@@ -73,7 +73,7 @@ public class FileNameAnalyzer extends AbstractAnalyzer implements Analyzer {
public void analyze(Dependency dependency, Engine engine) throws AnalysisException {
//strip any path information that may get added by ArchiveAnalyzer, etc.
- final File f = new File(dependency.getFileName());
+ final File f = dependency.getActualFile();
String fileName = f.getName();
//remove file extension
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FileTypeAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FileTypeAnalyzer.java
index d22aad210..55f3c2f7b 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FileTypeAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/FileTypeAnalyzer.java
@@ -31,4 +31,9 @@ public interface FileTypeAnalyzer extends Analyzer {
* @return whether or not the specified file extension is supported by this analyzer.
*/
boolean supportsExtension(String extension);
+
+ /**
+ * Resets the analyzers state.
+ */
+ void reset();
}
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/HintAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/HintAnalyzer.java
index 123f51f83..8d6f866e4 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/HintAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/HintAnalyzer.java
@@ -19,6 +19,7 @@ package org.owasp.dependencycheck.analyzer;
import java.util.ArrayList;
import java.util.Iterator;
+import java.util.List;
import java.util.Set;
import org.owasp.dependencycheck.Engine;
import org.owasp.dependencycheck.analyzer.exception.AnalysisException;
@@ -64,8 +65,8 @@ public class HintAnalyzer extends AbstractAnalyzer implements Analyzer {
//
/**
- * The HintAnalyzer uses knowledge about a dependency to add additional information to help in identification of
- * identifiers or vulnerabilities.
+ * The HintAnalyzer uses knowledge about a dependency to add additional information to help in identification of identifiers
+ * or vulnerabilities.
*
* @param dependency The dependency being analyzed
* @param engine The scanning engine
@@ -84,24 +85,39 @@ public class HintAnalyzer extends AbstractAnalyzer implements Analyzer {
Confidence.HIGH);
final Evidence springTest3 = new Evidence("Manifest",
+ "Implementation-Title",
+ "spring-core",
+ Confidence.HIGH);
+
+ final Evidence springTest4 = new Evidence("Manifest",
"Bundle-Vendor",
"SpringSource",
Confidence.HIGH);
- Set evidence = dependency.getProductEvidence().getEvidence();
- if (evidence.contains(springTest1) || evidence.contains(springTest2)) {
- dependency.getProductEvidence().addEvidence("hint analyzer", "product", "springsource_spring_framework", Confidence.HIGH);
+ final Evidence springTest5 = new Evidence("jar",
+ "package name",
+ "springframework",
+ Confidence.LOW);
+
+ //springsource/vware problem
+ final Set product = dependency.getProductEvidence().getEvidence();
+ final Set vendor = dependency.getVendorEvidence().getEvidence();
+
+ if (product.contains(springTest1) || product.contains(springTest2) || product.contains(springTest3)
+ || (dependency.getFileName().contains("spring") && (product.contains(springTest5) || vendor.contains(springTest5)))) {
+ dependency.getProductEvidence().addEvidence("hint analyzer", "product", "springsource spring framework", Confidence.HIGH);
dependency.getVendorEvidence().addEvidence("hint analyzer", "vendor", "SpringSource", Confidence.HIGH);
dependency.getVendorEvidence().addEvidence("hint analyzer", "vendor", "vmware", Confidence.HIGH);
}
- evidence = dependency.getVendorEvidence().getEvidence();
- if (evidence.contains(springTest3)) {
+ if (vendor.contains(springTest4)) {
dependency.getProductEvidence().addEvidence("hint analyzer", "product", "springsource_spring_framework", Confidence.HIGH);
dependency.getVendorEvidence().addEvidence("hint analyzer", "vendor", "vmware", Confidence.HIGH);
}
+
+ //sun/oracle problem
final Iterator itr = dependency.getVendorEvidence().iterator();
- final ArrayList newEntries = new ArrayList();
+ final List newEntries = new ArrayList();
while (itr.hasNext()) {
final Evidence e = itr.next();
if ("sun".equalsIgnoreCase(e.getValue(false))) {
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/JarAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/JarAnalyzer.java
index 97e934840..5adf7968f 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/JarAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/JarAnalyzer.java
@@ -46,13 +46,6 @@ import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import java.util.zip.ZipEntry;
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBElement;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.parsers.SAXParser;
-import javax.xml.parsers.SAXParserFactory;
import javax.xml.transform.sax.SAXSource;
import org.jsoup.Jsoup;
import org.owasp.dependencycheck.Engine;
@@ -60,7 +53,7 @@ import org.owasp.dependencycheck.analyzer.exception.AnalysisException;
import org.owasp.dependencycheck.dependency.Confidence;
import org.owasp.dependencycheck.dependency.Dependency;
import org.owasp.dependencycheck.dependency.EvidenceCollection;
-import org.owasp.dependencycheck.jaxb.pom.MavenNamespaceFilter;
+import org.owasp.dependencycheck.jaxb.pom.PomUtils;
import org.owasp.dependencycheck.jaxb.pom.generated.License;
import org.owasp.dependencycheck.jaxb.pom.generated.Model;
import org.owasp.dependencycheck.jaxb.pom.generated.Organization;
@@ -68,9 +61,6 @@ import org.owasp.dependencycheck.utils.FileUtils;
import org.owasp.dependencycheck.utils.NonClosingStream;
import org.owasp.dependencycheck.utils.Settings;
import org.xml.sax.InputSource;
-import org.xml.sax.SAXException;
-import org.xml.sax.XMLFilter;
-import org.xml.sax.XMLReader;
/**
* Used to load a JAR file and collect information that can be used to determine the associated CPE.
@@ -158,24 +148,18 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
* A pattern to detect HTML within text.
*/
private static final Pattern HTML_DETECTION_PATTERN = Pattern.compile("\\<[a-z]+.*/?\\>", Pattern.CASE_INSENSITIVE);
+
/**
- * The unmarshaller used to parse the pom.xml from a JAR file.
+ * The POM Utility for parsing POM files.
*/
- private Unmarshaller pomUnmarshaller;
+ private PomUtils pomUtils = null;
//
/**
* Constructs a new JarAnalyzer.
*/
public JarAnalyzer() {
- try {
- //final JAXBContext jaxbContext = JAXBContext.newInstance("org.owasp.dependencycheck.jaxb.pom.generated");
- final JAXBContext jaxbContext = JAXBContext.newInstance(Model.class);
- pomUnmarshaller = jaxbContext.createUnmarshaller();
- } catch (JAXBException ex) { //guess we will just have a null pointer exception later...
- LOGGER.log(Level.SEVERE, "Unable to load parser. See the log for more details.");
- LOGGER.log(Level.FINE, null, ex);
- }
+ pomUtils = new PomUtils();
}
//
@@ -243,7 +227,7 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
@Override
public void analyzeFileType(Dependency dependency, Engine engine) throws AnalysisException {
try {
- final ArrayList classNames = collectClassNames(dependency);
+ final List classNames = collectClassNames(dependency);
final String fileName = dependency.getFileName().toLowerCase();
if (classNames.isEmpty()
&& (fileName.endsWith("-sources.jar")
@@ -262,8 +246,8 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
}
/**
- * Attempts to find a pom.xml within the JAR file. If found it extracts information and adds it to the evidence.
- * This will attempt to interpolate the strings contained within the pom.properties if one exists.
+ * Attempts to find a pom.xml within the JAR file. If found it extracts information and adds it to the evidence. This will
+ * attempt to interpolate the strings contained within the pom.properties if one exists.
*
* @param dependency the dependency being analyzed
* @param classes a collection of class name information
@@ -271,7 +255,7 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
* @throws AnalysisException is thrown if there is an exception parsing the pom
* @return whether or not evidence was added to the dependency
*/
- protected boolean analyzePOM(Dependency dependency, ArrayList classes, Engine engine) throws AnalysisException {
+ protected boolean analyzePOM(Dependency dependency, List classes, Engine engine) throws AnalysisException {
boolean foundSomething = false;
final JarFile jar;
try {
@@ -293,13 +277,23 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
LOGGER.log(Level.FINE, msg, ex);
return false;
}
+ File externalPom = null;
if (pomEntries.isEmpty()) {
- return false;
+ String pomPath = dependency.getActualFilePath();
+ pomPath = pomPath.substring(0, pomPath.lastIndexOf('.')) + ".pom";
+ externalPom = new File(pomPath);
+ if (externalPom.isFile()) {
+ pomEntries.add(pomPath);
+ } else {
+ return false;
+ }
}
for (String path : pomEntries) {
Properties pomProperties = null;
try {
- pomProperties = retrievePomProperties(path, jar);
+ if (externalPom == null) {
+ pomProperties = retrievePomProperties(path, jar);
+ }
} catch (IOException ex) {
LOGGER.log(Level.FINEST, "ignore this, failed reading a non-existent pom.properties", ex);
}
@@ -313,11 +307,11 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
final String displayPath = String.format("%s%s%s",
dependency.getFilePath(),
File.separator,
- path); //.replaceAll("[\\/]", File.separator));
+ path);
final String displayName = String.format("%s%s%s",
dependency.getFileName(),
File.separator,
- path); //.replaceAll("[\\/]", File.separator));
+ path);
newDependency.setFileName(displayName);
newDependency.setFilePath(displayPath);
@@ -325,7 +319,11 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
engine.getDependencies().add(newDependency);
Collections.sort(engine.getDependencies());
} else {
- pom = retrievePom(path, jar);
+ if (externalPom == null) {
+ pom = retrievePom(path, jar);
+ } else {
+ pom = pomUtils.readPom(externalPom);
+ }
foundSomething |= setPomEvidence(dependency, pom, pomProperties, classes);
}
} catch (AnalysisException ex) {
@@ -410,7 +408,7 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
fos = new FileOutputStream(file);
bos = new BufferedOutputStream(fos, BUFFER_SIZE);
int count;
- final byte data[] = new byte[BUFFER_SIZE];
+ final byte[] data = new byte[BUFFER_SIZE];
while ((count = input.read(data, 0, BUFFER_SIZE)) != -1) {
bos.write(data, 0, count);
}
@@ -432,7 +430,7 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
final InputStreamReader reader = new InputStreamReader(fis, "UTF-8");
final InputSource xml = new InputSource(reader);
final SAXSource source = new SAXSource(xml);
- model = readPom(source);
+ model = pomUtils.readPom(source);
} catch (FileNotFoundException ex) {
final String msg = String.format("Unable to parse pom '%s' in jar '%s' (File Not Found)", path, jar.getName());
LOGGER.log(Level.WARNING, msg);
@@ -502,7 +500,7 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
final InputStreamReader reader = new InputStreamReader(stream, "UTF-8");
final InputSource xml = new InputSource(reader);
final SAXSource source = new SAXSource(xml);
- model = readPom(source);
+ model = pomUtils.readPom(source);
} catch (SecurityException ex) {
final String msg = String.format("Unable to parse pom '%s' in jar '%s'; invalid signature", path, jar.getName());
LOGGER.log(Level.WARNING, msg);
@@ -523,49 +521,17 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
return model;
}
- /**
- * Retrieves the specified POM from a jar file and converts it to a Model.
- *
- * @param source the SAXSource input stream to read the POM from
- * @return returns the POM object
- * @throws AnalysisException is thrown if there is an exception extracting or parsing the POM
- * {@link org.owasp.dependencycheck.jaxb.pom.generated.Model} object
- */
- private Model readPom(SAXSource source) throws AnalysisException {
- Model model = null;
- try {
- final XMLFilter filter = new MavenNamespaceFilter();
- final SAXParserFactory spf = SAXParserFactory.newInstance();
- final SAXParser sp = spf.newSAXParser();
- final XMLReader xr = sp.getXMLReader();
- filter.setParent(xr);
- final JAXBElement el = pomUnmarshaller.unmarshal(source, Model.class);
- model = el.getValue();
- } catch (SecurityException ex) {
- throw new AnalysisException(ex);
- } catch (ParserConfigurationException ex) {
- throw new AnalysisException(ex);
- } catch (SAXException ex) {
- throw new AnalysisException(ex);
- } catch (JAXBException ex) {
- throw new AnalysisException(ex);
- } catch (Throwable ex) {
- throw new AnalysisException(ex);
- }
- return model;
- }
-
/**
* Sets evidence from the pom on the supplied dependency.
*
* @param dependency the dependency to set data on
* @param pom the information from the pom
* @param pomProperties the pom properties file (null if none exists)
- * @param classes a collection of ClassNameInformation - containing data about the fully qualified class names
- * within the JAR file being analyzed
+ * @param classes a collection of ClassNameInformation - containing data about the fully qualified class names within the JAR
+ * file being analyzed
* @return true if there was evidence within the pom that we could use; otherwise false
*/
- private boolean setPomEvidence(Dependency dependency, Model pom, Properties pomProperties, ArrayList classes) {
+ private boolean setPomEvidence(Dependency dependency, Model pom, Properties pomProperties, List classes) {
boolean foundSomething = false;
boolean addAsIdentifier = true;
if (pom == null) {
@@ -583,9 +549,6 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
final String originalGroupID = groupid;
if (groupid != null && !groupid.isEmpty()) {
- if (groupid.startsWith("org.") || groupid.startsWith("com.")) {
- groupid = groupid.substring(4);
- }
foundSomething = true;
dependency.getVendorEvidence().addEvidence("pom", "groupid", groupid, Confidence.HIGHEST);
dependency.getProductEvidence().addEvidence("pom", "groupid", groupid, Confidence.LOW);
@@ -689,17 +652,17 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
}
/**
- * Analyzes the path information of the classes contained within the JarAnalyzer to try and determine possible
- * vendor or product names. If any are found they are stored in the packageVendor and packageProduct hashSets.
+ * Analyzes the path information of the classes contained within the JarAnalyzer to try and determine possible vendor or
+ * product names. If any are found they are stored in the packageVendor and packageProduct hashSets.
*
* @param classNames a list of class names
* @param dependency a dependency to analyze
* @param addPackagesAsEvidence a flag indicating whether or not package names should be added as evidence.
*/
- protected void analyzePackageNames(ArrayList classNames,
+ protected void analyzePackageNames(List classNames,
Dependency dependency, boolean addPackagesAsEvidence) {
- final HashMap vendorIdentifiers = new HashMap();
- final HashMap productIdentifiers = new HashMap();
+ final Map vendorIdentifiers = new HashMap();
+ final Map productIdentifiers = new HashMap();
analyzeFullyQualifiedClassNames(classNames, vendorIdentifiers, productIdentifiers);
final int classCount = classNames.size();
@@ -741,7 +704,7 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
* @return whether evidence was identified parsing the manifest
* @throws IOException if there is an issue reading the JAR file
*/
- protected boolean parseManifest(Dependency dependency, ArrayList classInformation) throws IOException {
+ protected boolean parseManifest(Dependency dependency, List classInformation) throws IOException {
boolean foundSomething = false;
JarFile jar = null;
try {
@@ -898,18 +861,17 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
}
/**
- * Adds a description to the given dependency. If the description contains one of the following strings beyond 100
- * characters, then the description used will be trimmed to that position:
+ * Adds a description to the given dependency. If the description contains one of the following strings beyond 100 characters,
+ * then the description used will be trimmed to that position:
*
"such as"
"like "
"will use "
"* uses "
*
* @param dependency a dependency
* @param description the description
* @param source the source of the evidence
* @param key the "name" of the evidence
- * @return if the description is trimmed, the trimmed version is returned; otherwise the original description is
- * returned
+ * @return if the description is trimmed, the trimmed version is returned; otherwise the original description is returned
*/
- private String addDescription(Dependency dependency, String description, String source, String key) {
+ public static String addDescription(Dependency dependency, String description, String source, String key) {
if (dependency.getDescription() == null) {
dependency.setDescription(description);
}
@@ -1014,12 +976,11 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
/**
*
- * A utility function that will interpolate strings based on values given in the properties file. It will also
- * interpolate the strings contained within the properties file so that properties can reference other
- * properties.
+ * A utility function that will interpolate strings based on values given in the properties file. It will also interpolate the
+ * strings contained within the properties file so that properties can reference other properties.
*
- * Note: if there is no property found the reference will be removed. In other words, if the interpolated
- * string will be replaced with an empty string.
+ * Note: if there is no property found the reference will be removed. In other words, if the interpolated string will
+ * be replaced with an empty string.
*
*
* Example:
@@ -1039,13 +1000,13 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
* @param properties a collection of properties that may be referenced within the text.
* @return the interpolated text.
*/
- protected String interpolateString(String text, Properties properties) {
- Properties props = properties;
+ public static String interpolateString(String text, Properties properties) {
+ final Properties props = properties;
if (text == null) {
return text;
}
if (props == null) {
- props = new Properties();
+ return text;
}
final int pos = text.indexOf("${");
@@ -1083,14 +1044,14 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
}
/**
- * Cycles through an enumeration of JarEntries, contained within the dependency, and returns a list of the class
- * names. This does not include core Java package names (i.e. java.* or javax.*).
+ * Cycles through an enumeration of JarEntries, contained within the dependency, and returns a list of the class names. This
+ * does not include core Java package names (i.e. java.* or javax.*).
*
* @param dependency the dependency being analyzed
* @return an list of fully qualified class names
*/
- private ArrayList collectClassNames(Dependency dependency) {
- final ArrayList classNames = new ArrayList();
+ private List collectClassNames(Dependency dependency) {
+ final List classNames = new ArrayList();
JarFile jar = null;
try {
jar = new JarFile(dependency.getActualFilePath());
@@ -1121,17 +1082,17 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
}
/**
- * Cycles through the list of class names and places the package levels 0-3 into the provided maps for vendor and
- * product. This is helpful when analyzing vendor/product as many times this is included in the package name.
+ * Cycles through the list of class names and places the package levels 0-3 into the provided maps for vendor and product.
+ * This is helpful when analyzing vendor/product as many times this is included in the package name.
*
* @param classNames a list of class names
* @param vendor HashMap of possible vendor names from package names (e.g. owasp)
* @param product HashMap of possible product names from package names (e.g. dependencycheck)
*/
- private void analyzeFullyQualifiedClassNames(ArrayList classNames,
- HashMap vendor, HashMap product) {
+ private void analyzeFullyQualifiedClassNames(List classNames,
+ Map vendor, Map product) {
for (ClassNameInformation entry : classNames) {
- final ArrayList list = entry.getPackageStructure();
+ final List list = entry.getPackageStructure();
addEntry(vendor, list.get(0));
if (list.size() == 2) {
@@ -1153,13 +1114,13 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
}
/**
- * Adds an entry to the specified collection and sets the Integer (e.g. the count) to 1. If the entry already exists
- * in the collection then the Integer is incremented by 1.
+ * Adds an entry to the specified collection and sets the Integer (e.g. the count) to 1. If the entry already exists in the
+ * collection then the Integer is incremented by 1.
*
* @param collection a collection of strings and their occurrence count
* @param key the key to add to the collection
*/
- private void addEntry(HashMap collection, String key) {
+ private void addEntry(Map collection, String key) {
if (collection.containsKey(key)) {
collection.put(key, collection.get(key) + 1);
} else {
@@ -1168,15 +1129,15 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
}
/**
- * Cycles through the collection of class name information to see if parts of the package names are contained in the
- * provided value. If found, it will be added as the HIGHEST confidence evidence because we have more then one
- * source corroborating the value.
+ * Cycles through the collection of class name information to see if parts of the package names are contained in the provided
+ * value. If found, it will be added as the HIGHEST confidence evidence because we have more then one source corroborating the
+ * value.
*
* @param classes a collection of class name information
* @param value the value to check to see if it contains a package name
* @param evidence the evidence collection to add new entries too
*/
- private void addMatchingValues(ArrayList classes, String value, EvidenceCollection evidence) {
+ private void addMatchingValues(List classes, String value, EvidenceCollection evidence) {
if (value == null || value.isEmpty() || classes == null || classes.isEmpty()) {
return;
}
@@ -1211,7 +1172,7 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
* @param pomProperties the properties, used for string interpolation
* @param dependency the dependency to add license information too
*/
- private void extractLicense(Model pom, Properties pomProperties, Dependency dependency) {
+ public static void extractLicense(Model pom, Properties pomProperties, Dependency dependency) {
//license
if (pom.getLicenses() != null) {
String license = null;
@@ -1252,9 +1213,9 @@ public class JarAnalyzer extends AbstractFileTypeAnalyzer {
/**
*
- * Stores information about a given class name. This class will keep the fully qualified class name and a list
- * of the important parts of the package structure. Up to the first four levels of the package structure are
- * stored, excluding a leading "org" or "com". Example:
+ * Stores information about a given class name. This class will keep the fully qualified class name and a list of the
+ * important parts of the package structure. Up to the first four levels of the package structure are stored, excluding a
+ * leading "org" or "com". Example:
* ClassNameInformation obj = new ClassNameInformation("org.owasp.dependencycheck.analyzer.JarAnalyzer");
* System.out.println(obj.getName());
* for (String p : obj.getPackageStructure())
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/NexusAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/NexusAnalyzer.java
index dbee6a5d7..7d5650db6 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/NexusAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/analyzer/NexusAnalyzer.java
@@ -17,6 +17,7 @@
*/
package org.owasp.dependencycheck.analyzer;
+import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.MalformedURLException;
@@ -24,13 +25,18 @@ import java.net.URL;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
+import org.apache.commons.io.FileUtils;
import org.owasp.dependencycheck.Engine;
import org.owasp.dependencycheck.analyzer.exception.AnalysisException;
import org.owasp.dependencycheck.data.nexus.MavenArtifact;
import org.owasp.dependencycheck.data.nexus.NexusSearch;
import org.owasp.dependencycheck.dependency.Confidence;
import org.owasp.dependencycheck.dependency.Dependency;
-import org.owasp.dependencycheck.dependency.Identifier;
+import org.owasp.dependencycheck.dependency.Evidence;
+import org.owasp.dependencycheck.jaxb.pom.PomUtils;
+import org.owasp.dependencycheck.utils.InvalidSettingException;
+import org.owasp.dependencycheck.utils.DownloadFailedException;
+import org.owasp.dependencycheck.utils.Downloader;
import org.owasp.dependencycheck.utils.Settings;
/**
@@ -49,6 +55,11 @@ import org.owasp.dependencycheck.utils.Settings;
*/
public class NexusAnalyzer extends AbstractFileTypeAnalyzer {
+ /**
+ * The default URL - this will be used by the CentralAnalyzer to determine whether to enable this.
+ */
+ public static final String DEFAULT_URL = "https://repository.sonatype.org/service/local/";
+
/**
* The logger.
*/
@@ -74,6 +85,51 @@ public class NexusAnalyzer extends AbstractFileTypeAnalyzer {
*/
private NexusSearch searcher;
+ /**
+ * Field indicating if the analyzer is enabled.
+ */
+ private final boolean enabled = checkEnabled();
+ /**
+ * Field for doing POM work
+ */
+ private final PomUtils pomUtil = new PomUtils();
+
+ /**
+ * Determines if this analyzer is enabled
+ *
+ * @return true if the analyzer is enabled; otherwise false
+ */
+ private boolean checkEnabled() {
+ /* Enable this analyzer ONLY if the Nexus URL has been set to something
+ other than the default one (if it's the default one, we'll use the
+ central one) and it's enabled by the user.
+ */
+ boolean retval = false;
+ try {
+ if ((!DEFAULT_URL.equals(Settings.getString(Settings.KEYS.ANALYZER_NEXUS_URL)))
+ && Settings.getBoolean(Settings.KEYS.ANALYZER_NEXUS_ENABLED)) {
+ LOGGER.info("Enabling Nexus analyzer");
+ retval = true;
+ } else {
+ LOGGER.fine("Nexus analyzer disabled, using Central instead");
+ }
+ } catch (InvalidSettingException ise) {
+ LOGGER.warning("Invalid setting. Disabling Nexus analyzer");
+ }
+
+ return retval;
+ }
+
+ /**
+ * Determine whether to enable this analyzer or not.
+ *
+ * @return whether the analyzer should be enabled
+ */
+ @Override
+ public boolean isEnabled() {
+ return enabled;
+ }
+
/**
* Initializes the analyzer once before any analysis is performed.
*
@@ -150,29 +206,42 @@ public class NexusAnalyzer extends AbstractFileTypeAnalyzer {
*/
@Override
public void analyzeFileType(Dependency dependency, Engine engine) throws AnalysisException {
+ if (!isEnabled()) {
+ return;
+ }
try {
final MavenArtifact ma = searcher.searchSha1(dependency.getSha1sum());
- if (ma.getGroupId() != null && !"".equals(ma.getGroupId())) {
- dependency.getVendorEvidence().addEvidence("nexus", "groupid", ma.getGroupId(), Confidence.HIGH);
- }
- if (ma.getArtifactId() != null && !"".equals(ma.getArtifactId())) {
- dependency.getProductEvidence().addEvidence("nexus", "artifactid", ma.getArtifactId(), Confidence.HIGH);
- }
- if (ma.getVersion() != null && !"".equals(ma.getVersion())) {
- dependency.getVersionEvidence().addEvidence("nexus", "version", ma.getVersion(), Confidence.HIGH);
- }
- if (ma.getArtifactUrl() != null && !"".equals(ma.getArtifactUrl())) {
- boolean found = false;
- for (Identifier i : dependency.getIdentifiers()) {
- if ("maven".equals(i.getType()) && i.getValue().equals(ma.toString())) {
- found = true;
- i.setConfidence(Confidence.HIGHEST);
- i.setUrl(ma.getArtifactUrl());
- break;
- }
+ dependency.addAsEvidence("nexus", ma, Confidence.HIGH);
+ boolean pomAnalyzed = false;
+ LOGGER.fine("POM URL " + ma.getPomUrl());
+ for (Evidence e : dependency.getVendorEvidence()) {
+ if ("pom".equals(e.getSource())) {
+ pomAnalyzed = true;
+ break;
}
- if (!found) {
- dependency.addIdentifier("maven", ma.toString(), ma.getArtifactUrl(), Confidence.HIGHEST);
+ }
+ if (!pomAnalyzed && ma.getPomUrl() != null) {
+ File pomFile = null;
+ try {
+ final File baseDir = Settings.getTempDirectory();
+ pomFile = File.createTempFile("pom", ".xml", baseDir);
+ if (!pomFile.delete()) {
+ final String msg = String.format("Unable to fetch pom.xml for %s from Nexus repository; "
+ + "this could result in undetected CPE/CVEs.", dependency.getFileName());
+ LOGGER.warning(msg);
+ LOGGER.fine("Unable to delete temp file");
+ }
+ LOGGER.fine(String.format("Downloading %s", ma.getPomUrl()));
+ Downloader.fetchFile(new URL(ma.getPomUrl()), pomFile);
+ pomUtil.analyzePOM(dependency, pomFile);
+ } catch (DownloadFailedException ex) {
+ final String msg = String.format("Unable to download pom.xml for %s from Nexus repository; "
+ + "this could result in undetected CPE/CVEs.", dependency.getFileName());
+ LOGGER.warning(msg);
+ } finally {
+ if (pomFile != null && !FileUtils.deleteQuietly(pomFile)) {
+ pomFile.deleteOnExit();
+ }
}
}
} catch (IllegalArgumentException iae) {
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/central/CentralSearch.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/central/CentralSearch.java
new file mode 100644
index 000000000..5699cbeb2
--- /dev/null
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/central/CentralSearch.java
@@ -0,0 +1,161 @@
+/*
+ * This file is part of dependency-check-core.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Copyright (c) 2014 Jeremy Long. All Rights Reserved.
+ */
+package org.owasp.dependencycheck.data.central;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Logger;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.xpath.XPath;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathFactory;
+import org.owasp.dependencycheck.data.nexus.MavenArtifact;
+import org.owasp.dependencycheck.utils.Settings;
+import org.owasp.dependencycheck.utils.URLConnectionFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.NodeList;
+
+/**
+ * Class of methods to search Maven Central via Central.
+ *
+ * @author colezlaw
+ */
+public class CentralSearch {
+
+ /**
+ * The URL for the Central service
+ */
+ private final URL rootURL;
+
+ /**
+ * Whether to use the Proxy when making requests
+ */
+ private boolean useProxy;
+
+ /**
+ * Used for logging.
+ */
+ private static final Logger LOGGER = Logger.getLogger(CentralSearch.class.getName());
+
+ /**
+ * Creates a NexusSearch for the given repository URL.
+ *
+ * @param rootURL the URL of the repository on which searches should execute. Only parameters are added to this (so it should
+ * end in /select)
+ */
+ public CentralSearch(URL rootURL) {
+ this.rootURL = rootURL;
+ if (null != Settings.getString(Settings.KEYS.PROXY_SERVER)) {
+ useProxy = true;
+ LOGGER.fine("Using proxy");
+ } else {
+ useProxy = false;
+ LOGGER.fine("Not using proxy");
+ }
+ }
+
+ /**
+ * Searches the configured Central URL for the given sha1 hash. If the artifact is found, a MavenArtifact is
+ * populated with the GAV.
+ *
+ * @param sha1 the SHA-1 hash string for which to search
+ * @return the populated Maven GAV.
+ * @throws IOException if it's unable to connect to the specified repository or if the specified artifact is not found.
+ */
+ public List searchSha1(String sha1) throws IOException {
+ if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) {
+ throw new IllegalArgumentException("Invalid SHA1 format");
+ }
+
+ final URL url = new URL(rootURL + String.format("?q=1:\"%s\"&wt=xml", sha1));
+
+ LOGGER.fine(String.format("Searching Central url %s", url.toString()));
+
+ // Determine if we need to use a proxy. The rules:
+ // 1) If the proxy is set, AND the setting is set to true, use the proxy
+ // 2) Otherwise, don't use the proxy (either the proxy isn't configured,
+ // or proxy is specifically set to false)
+ final HttpURLConnection conn = URLConnectionFactory.createHttpURLConnection(url, useProxy);
+
+ conn.setDoOutput(true);
+
+ // JSON would be more elegant, but there's not currently a dependency
+ // on JSON, so don't want to add one just for this
+ conn.addRequestProperty("Accept", "application/xml");
+ conn.connect();
+
+ if (conn.getResponseCode() == 200) {
+ boolean missing = false;
+ try {
+ final DocumentBuilder builder = DocumentBuilderFactory
+ .newInstance().newDocumentBuilder();
+ final Document doc = builder.parse(conn.getInputStream());
+ final XPath xpath = XPathFactory.newInstance().newXPath();
+ final String numFound = xpath.evaluate("/response/result/@numFound", doc);
+ if ("0".equals(numFound)) {
+ missing = true;
+ } else {
+ final ArrayList result = new ArrayList();
+ final NodeList docs = (NodeList) xpath.evaluate("/response/result/doc", doc, XPathConstants.NODESET);
+ for (int i = 0; i < docs.getLength(); i++) {
+ final String g = xpath.evaluate("./str[@name='g']", docs.item(i));
+ LOGGER.finest(String.format("GroupId: %s", g));
+ final String a = xpath.evaluate("./str[@name='a']", docs.item(i));
+ LOGGER.finest(String.format("ArtifactId: %s", a));
+ final String v = xpath.evaluate("./str[@name='v']", docs.item(i));
+ final NodeList atts = (NodeList) xpath.evaluate("./arr[@name='ec']/str", docs.item(i), XPathConstants.NODESET);
+ boolean pomAvailable = false;
+ boolean jarAvailable = false;
+ for (int x = 0; x < atts.getLength(); x++) {
+ final String tmp = xpath.evaluate(".", atts.item(x));
+ if (".pom".equals(tmp)) {
+ pomAvailable = true;
+ } else if (".jar".equals(tmp)) {
+ jarAvailable = true;
+ }
+ }
+ LOGGER.finest(String.format("Version: %s", v));
+ result.add(new MavenArtifact(g, a, v, jarAvailable, pomAvailable));
+ }
+
+ return result;
+ }
+ } catch (Throwable e) {
+ // Anything else is jacked up XML stuff that we really can't recover
+ // from well
+ throw new IOException(e.getMessage(), e);
+ }
+
+ if (missing) {
+ throw new FileNotFoundException("Artifact not found in Central");
+ }
+ } else {
+ final String msg = String.format("Could not connect to Central received response code: %d %s",
+ conn.getResponseCode(), conn.getResponseMessage());
+ LOGGER.fine(msg);
+ throw new IOException(msg);
+ }
+
+ return null;
+ }
+}
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/central/package-info.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/central/package-info.java
new file mode 100644
index 000000000..9b51647d6
--- /dev/null
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/central/package-info.java
@@ -0,0 +1,14 @@
+/**
+ *
+ *
+ * org.owasp.dependencycheck.data.central
+ *
+ *
+ *
+ * Contains classes related to searching Maven Central.
+ *
+ * These are used to abstract Maven Central searching away from OWASP Dependency Check so they can be reused elsewhere.
+ *
+ *
+ */
+package org.owasp.dependencycheck.data.central;
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cpe/CpeMemoryIndex.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cpe/CpeMemoryIndex.java
index fa35d5eb3..355d82506 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cpe/CpeMemoryIndex.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cpe/CpeMemoryIndex.java
@@ -48,12 +48,13 @@ import org.owasp.dependencycheck.data.nvdcve.DatabaseException;
import org.owasp.dependencycheck.utils.Pair;
/**
- * An in memory lucene index that contains the vendor/product combinations from the CPE (application) identifiers within
- * the NVD CVE data.
+ * An in memory lucene index that contains the vendor/product combinations from the CPE (application) identifiers within the NVD
+ * CVE data.
*
* @author Jeremy Long
*/
public final class CpeMemoryIndex {
+
/**
* The logger.
*/
@@ -61,7 +62,7 @@ public final class CpeMemoryIndex {
/**
* singleton instance.
*/
- private static CpeMemoryIndex instance = new CpeMemoryIndex();
+ private static final CpeMemoryIndex INSTANCE = new CpeMemoryIndex();
/**
* private constructor for singleton.
@@ -75,7 +76,7 @@ public final class CpeMemoryIndex {
* @return the instance of the CpeMemoryIndex
*/
public static CpeMemoryIndex getInstance() {
- return instance;
+ return INSTANCE;
}
/**
* The in memory Lucene index.
@@ -113,18 +114,20 @@ public final class CpeMemoryIndex {
* @throws IndexException thrown if there is an error creating the index
*/
public void open(CveDB cve) throws IndexException {
- if (!openState) {
- index = new RAMDirectory();
- buildIndex(cve);
- try {
- indexReader = DirectoryReader.open(index);
- } catch (IOException ex) {
- throw new IndexException(ex);
+ synchronized (INSTANCE) {
+ if (!openState) {
+ index = new RAMDirectory();
+ buildIndex(cve);
+ try {
+ indexReader = DirectoryReader.open(index);
+ } catch (IOException ex) {
+ throw new IndexException(ex);
+ }
+ indexSearcher = new IndexSearcher(indexReader);
+ searchingAnalyzer = createSearchingAnalyzer();
+ queryParser = new QueryParser(LuceneUtils.CURRENT_VERSION, Fields.DOCUMENT_KEY, searchingAnalyzer);
+ openState = true;
}
- indexSearcher = new IndexSearcher(indexReader);
- searchingAnalyzer = createSearchingAnalyzer();
- queryParser = new QueryParser(LuceneUtils.CURRENT_VERSION, Fields.DOCUMENT_KEY, searchingAnalyzer);
- openState = true;
}
}
/**
@@ -160,7 +163,7 @@ public final class CpeMemoryIndex {
*/
@SuppressWarnings("unchecked")
private Analyzer createSearchingAnalyzer() {
- final Map fieldAnalyzers = new HashMap();
+ final Map fieldAnalyzers = new HashMap();
fieldAnalyzers.put(Fields.DOCUMENT_KEY, new KeywordAnalyzer());
productSearchFieldAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
vendorSearchFieldAnalyzer = new SearchFieldAnalyzer(LuceneUtils.CURRENT_VERSION);
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cwe/CweDB.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cwe/CweDB.java
index ce2410ec5..ca67107f3 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cwe/CweDB.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/cwe/CweDB.java
@@ -29,10 +29,12 @@ import java.util.logging.Logger;
* @author Jeremy Long
*/
public final class CweDB {
+
/**
* The Logger.
*/
private static final Logger LOGGER = Logger.getLogger(CweDB.class.getName());
+
/**
* Empty private constructor as this is a utility class.
*/
@@ -55,7 +57,9 @@ public final class CweDB {
final String filePath = "data/cwe.hashmap.serialized";
final InputStream input = CweDB.class.getClassLoader().getResourceAsStream(filePath);
oin = new ObjectInputStream(input);
- return (HashMap) oin.readObject();
+ @SuppressWarnings("unchecked")
+ final HashMap ret = (HashMap) oin.readObject();
+ return ret;
} catch (ClassNotFoundException ex) {
LOGGER.log(Level.WARNING, "Unable to load CWE data. This should not be an issue.");
LOGGER.log(Level.FINE, null, ex);
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/AbstractTokenizingFilter.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/AbstractTokenizingFilter.java
index a45b653fe..6d06d74c6 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/AbstractTokenizingFilter.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/AbstractTokenizingFilter.java
@@ -72,7 +72,7 @@ public abstract class AbstractTokenizingFilter extends TokenFilter {
* @return whether or not a new term was added
*/
protected boolean addTerm() {
- final boolean termAdded = tokens.size() > 0;
+ final boolean termAdded = !tokens.isEmpty();
if (termAdded) {
final String term = tokens.pop();
clearAttributes();
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzer.java
index e1810e38f..700034c9d 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/FieldAnalyzer.java
@@ -29,8 +29,8 @@ import org.apache.lucene.util.Version;
/**
*
- * A Lucene Analyzer that utilizes the WhitespaceTokenizer, WordDelimiterFilter, LowerCaseFilter, and StopFilter. The
- * intended purpose of this Analyzer is to index the CPE fields vendor and product.
+ * A Lucene Analyzer that utilizes the WhitespaceTokenizer, WordDelimiterFilter, LowerCaseFilter, and StopFilter. The intended
+ * purpose of this Analyzer is to index the CPE fields vendor and product.
*
* @author Jeremy Long
*/
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/LuceneUtils.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/LuceneUtils.java
index 1b59283d8..cbe6f1d32 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/LuceneUtils.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/LuceneUtils.java
@@ -17,6 +17,7 @@
*/
package org.owasp.dependencycheck.data.lucene;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.lucene.util.Version;
/**
@@ -28,10 +29,10 @@ import org.apache.lucene.util.Version;
public final class LuceneUtils {
/**
- * The current version of Lucene being used. Declaring this one place so an upgrade doesn't require hunting through
- * the code base.
+ * The current version of Lucene being used. Declaring this one place so an upgrade doesn't require hunting through the code
+ * base.
*/
- public static final Version CURRENT_VERSION = Version.LUCENE_45;
+ public static final Version CURRENT_VERSION = Version.LUCENE_47;
/**
* Private constructor as this is a utility class.
@@ -46,7 +47,7 @@ public final class LuceneUtils {
* @param text the data to be escaped
*/
@SuppressWarnings("fallthrough")
- @edu.umd.cs.findbugs.annotations.SuppressWarnings(
+ @SuppressFBWarnings(
value = "SF_SWITCH_NO_DEFAULT",
justification = "The switch below does have a default.")
public static void appendEscapedLuceneQuery(StringBuilder buf,
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchFieldAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchFieldAnalyzer.java
index 7b40e08ed..634287f5f 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchFieldAnalyzer.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchFieldAnalyzer.java
@@ -39,8 +39,7 @@ public class SearchFieldAnalyzer extends Analyzer {
*/
private final Version version;
/**
- * A local reference to the TokenPairConcatenatingFilter so that we can clear any left over state if this analyzer
- * is re-used.
+ * A local reference to the TokenPairConcatenatingFilter so that we can clear any left over state if this analyzer is re-used.
*/
private TokenPairConcatenatingFilter concatenatingFilter;
@@ -85,8 +84,7 @@ public class SearchFieldAnalyzer extends Analyzer {
/**
*
- * Resets the analyzer and clears any internal state data that may have been left-over from previous uses of the
- * analyzer.
+ * Resets the analyzer and clears any internal state data that may have been left-over from previous uses of the analyzer.
*
* If this analyzer is re-used this method must be called between uses.
*/
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchVersionAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchVersionAnalyzer.java
deleted file mode 100644
index 995a6d751..000000000
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/SearchVersionAnalyzer.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * This file is part of dependency-check-core.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * Copyright (c) 2012 Jeremy Long. All Rights Reserved.
- */
-package org.owasp.dependencycheck.data.lucene;
-
-import java.io.Reader;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
-import org.apache.lucene.analysis.core.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
-
-/**
- * SearchVersionAnalyzer is a Lucene Analyzer used to analyze version information.
- *
- * @author Jeremy Long
- * @deprecated version information is no longer stored in lucene
- */
-@Deprecated
-public class SearchVersionAnalyzer extends Analyzer {
- //TODO consider implementing payloads/custom attributes...
- // use custom attributes for major, minor, x, x, x, rcx
- // these can then be used to weight the score for searches on the version.
- // see http://lucene.apache.org/core/3_6_1/api/core/org/apache/lucene/analysis/package-summary.html#package_description
- // look at this article to implement
- // http://www.codewrecks.com/blog/index.php/2012/08/25/index-your-blog-using-tags-and-lucene-net/
-
- /**
- * The Lucene Version used.
- */
- private final Version version;
-
- /**
- * Creates a new SearchVersionAnalyzer.
- *
- * @param version the Lucene version
- */
- public SearchVersionAnalyzer(Version version) {
- this.version = version;
- }
-
- /**
- * Creates the TokenStreamComponents
- *
- * @param fieldName the field name being analyzed
- * @param reader the reader containing the input
- * @return the TokenStreamComponents
- */
- @Override
- protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
- final Tokenizer source = new WhitespaceTokenizer(version, reader);
- TokenStream stream = source;
- stream = new LowerCaseFilter(version, stream);
- stream = new VersionTokenizingFilter(stream);
- return new TokenStreamComponents(source, stream);
- }
-}
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/TokenPairConcatenatingFilter.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/TokenPairConcatenatingFilter.java
index 3a5c52a8a..69c9c0769 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/TokenPairConcatenatingFilter.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/TokenPairConcatenatingFilter.java
@@ -92,7 +92,7 @@ public final class TokenPairConcatenatingFilter extends TokenFilter {
//if we have a previousTerm - write it out as its own token concatenated
// with the current word (if one is available).
- if (previousWord != null && words.size() > 0) {
+ if (previousWord != null && !words.isEmpty()) {
final String word = words.getFirst();
clearAttributes();
termAtt.append(previousWord).append(word);
@@ -100,7 +100,7 @@ public final class TokenPairConcatenatingFilter extends TokenFilter {
return true;
}
//if we have words, write it out as a single token
- if (words.size() > 0) {
+ if (!words.isEmpty()) {
final String word = words.removeFirst();
clearAttributes();
termAtt.append(word);
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/UrlTokenizingFilter.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/UrlTokenizingFilter.java
index e5f47221a..a02253123 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/UrlTokenizingFilter.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/UrlTokenizingFilter.java
@@ -60,7 +60,7 @@ public final class UrlTokenizingFilter extends AbstractTokenizingFilter {
public boolean incrementToken() throws IOException {
final LinkedList tokens = getTokens();
final CharTermAttribute termAtt = getTermAtt();
- if (tokens.size() == 0 && input.incrementToken()) {
+ if (tokens.isEmpty() && input.incrementToken()) {
final String text = new String(termAtt.buffer(), 0, termAtt.length());
if (UrlStringUtils.containsUrl(text)) {
final String[] parts = text.split("\\s");
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/VersionAnalyzer.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/VersionAnalyzer.java
deleted file mode 100644
index b5510f480..000000000
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/VersionAnalyzer.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * This file is part of dependency-check-core.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * Copyright (c) 2012 Jeremy Long. All Rights Reserved.
- */
-package org.owasp.dependencycheck.data.lucene;
-
-import java.io.Reader;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.core.LowerCaseFilter;
-import org.apache.lucene.analysis.core.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
-
-/**
- * VersionAnalyzer is a Lucene Analyzer used to analyze version information.
- *
- * @author Jeremy Long
- * @deprecated version information is no longer stored in lucene
- */
-@Deprecated
-public class VersionAnalyzer extends Analyzer {
- //TODO consider implementing payloads/custom attributes...
- // use custom attributes for major, minor, x, x, x, rcx
- // these can then be used to weight the score for searches on the version.
- // see http://lucene.apache.org/core/3_6_1/api/core/org/apache/lucene/analysis/package-summary.html#package_description
- // look at this article to implement
- // http://www.codewrecks.com/blog/index.php/2012/08/25/index-your-blog-using-tags-and-lucene-net/
-
- /**
- * The Lucene Version used.
- */
- private final Version version;
-
- /**
- * Creates a new VersionAnalyzer.
- *
- * @param version the Lucene version
- */
- public VersionAnalyzer(Version version) {
- this.version = version;
- }
-
- /**
- * Creates the TokenStreamComponents
- *
- * @param fieldName the field name being analyzed
- * @param reader the reader containing the input
- * @return the TokenStreamComponents
- */
- @Override
- protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
- final Tokenizer source = new WhitespaceTokenizer(version, reader);
- TokenStream stream = source;
- stream = new LowerCaseFilter(version, stream);
- return new TokenStreamComponents(source, stream);
- }
-}
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/VersionTokenizingFilter.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/VersionTokenizingFilter.java
deleted file mode 100644
index 3b4d8bf48..000000000
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/lucene/VersionTokenizingFilter.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * This file is part of dependency-check-core.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * Copyright (c) 2012 Jeremy Long. All Rights Reserved.
- */
-package org.owasp.dependencycheck.data.lucene;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-
-/**
- *
- * Takes a TokenStream and splits or adds tokens to correctly index version numbers.
- *
- * @author Jeremy Long
- * @deprecated version information is no longer stored in lucene
- */
-@Deprecated
-public final class VersionTokenizingFilter extends AbstractTokenizingFilter {
-
- /**
- * Constructs a new VersionTokenizingFilter.
- *
- * @param stream the TokenStream that this filter will process
- */
- public VersionTokenizingFilter(TokenStream stream) {
- super(stream);
- }
-
- /**
- * Increments the underlying TokenStream and sets CharTermAttributes to construct an expanded set of tokens by
- * concatenating tokens with the previous token.
- *
- * @return whether or not we have hit the end of the TokenStream
- * @throws IOException is thrown when an IOException occurs
- */
- @Override
- public boolean incrementToken() throws IOException {
- final LinkedList tokens = getTokens();
- final CharTermAttribute termAtt = getTermAtt();
- if (tokens.size() == 0 && input.incrementToken()) {
- final String version = new String(termAtt.buffer(), 0, termAtt.length());
- final String[] toAnalyze = version.split("[_-]");
- //ensure we analyze the whole string as one too
- analyzeVersion(version);
- for (String str : toAnalyze) {
- analyzeVersion(str);
- }
- }
- return addTerm();
- }
-
- /**
- *
- * Analyzes the version and adds several copies of the version as different tokens. For example, the version 1.2.7
- * would create the tokens 1 1.2 1.2.7. This is useful in discovering the correct version - sometimes a maintenance
- * or build number will throw off the version identification.
- *
- * @param version the version to analyze
- */
- private void analyzeVersion(String version) {
- //todo should we also be splitting on dash or underscore? we would need
- // to incorporate the dash or underscore back in...
- final LinkedList tokens = getTokens();
- final String[] versionParts = version.split("\\.");
- String dottedVersion = null;
- for (String current : versionParts) {
- if (!current.matches("^/d+$")) {
- tokens.add(current);
- }
- if (dottedVersion == null) {
- dottedVersion = current;
- } else {
- dottedVersion = dottedVersion + "." + current;
- }
- tokens.add(dottedVersion);
- }
- }
-}
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nexus/MavenArtifact.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nexus/MavenArtifact.java
index 559f4f8bf..ad020c1f3 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nexus/MavenArtifact.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nexus/MavenArtifact.java
@@ -24,6 +24,11 @@ package org.owasp.dependencycheck.data.nexus;
*/
public class MavenArtifact {
+ /**
+ * The base URL for download artifacts from Central.
+ */
+ private static final String CENTRAL_CONTENT_URL = "http://search.maven.org/remotecontent?filepath=";
+
/**
* The groupId
*/
@@ -43,6 +48,10 @@ public class MavenArtifact {
* The artifact url. This may change depending on which Nexus server the search took place.
*/
private String artifactUrl;
+ /**
+ * The url to download the POM from.
+ */
+ private String pomUrl;
/**
* Creates an empty MavenArtifact.
@@ -58,9 +67,34 @@ public class MavenArtifact {
* @param version the version
*/
public MavenArtifact(String groupId, String artifactId, String version) {
- setGroupId(groupId);
- setArtifactId(artifactId);
- setVersion(version);
+ this.groupId = groupId;
+ this.artifactId = artifactId;
+ this.version = version;
+ }
+
+ /**
+ * Creates a MavenArtifact with the given attributes.
+ *
+ * @param groupId the groupId
+ * @param artifactId the artifactId
+ * @param version the version
+ * @param jarAvailable if the jar file is available from central
+ * @param pomAvailable if the pom file is available from central
+ */
+ public MavenArtifact(String groupId, String artifactId, String version, boolean jarAvailable, boolean pomAvailable) {
+ this.groupId = groupId;
+ this.artifactId = artifactId;
+ this.version = version;
+ if (jarAvailable) {
+ //org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom
+ this.artifactUrl = this.CENTRAL_CONTENT_URL + groupId.replace('.', '/') + "/" + artifactId.replace('.', '/') + "/"
+ + version + "/" + artifactId + "-" + version + ".jar";
+ }
+ if (pomAvailable) {
+ //org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom
+ this.pomUrl = this.CENTRAL_CONTENT_URL + groupId.replace('.', '/') + "/" + artifactId.replace('.', '/') + "/"
+ + version + "/" + artifactId + "-" + version + ".pom";
+ }
}
/**
@@ -72,10 +106,10 @@ public class MavenArtifact {
* @param url the artifactLink url
*/
public MavenArtifact(String groupId, String artifactId, String version, String url) {
- setGroupId(groupId);
- setArtifactId(artifactId);
- setVersion(version);
- setArtifactUrl(url);
+ this.groupId = groupId;
+ this.artifactId = artifactId;
+ this.version = version;
+ this.artifactUrl = url;
}
/**
@@ -159,6 +193,25 @@ public class MavenArtifact {
public String getArtifactUrl() {
return artifactUrl;
}
+
+ /**
+ * Get the value of pomUrl.
+ *
+ * @return the value of pomUrl
+ */
+ public String getPomUrl() {
+ return pomUrl;
+ }
+
+ /**
+ * Set the value of pomUrl.
+ *
+ * @param pomUrl new value of pomUrl
+ */
+ public void setPomUrl(String pomUrl) {
+ this.pomUrl = pomUrl;
+ }
+
}
// vim: cc=120:sw=4:ts=4:sts=4
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nexus/NexusSearch.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nexus/NexusSearch.java
index ec406a916..a1a66ab25 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nexus/NexusSearch.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nexus/NexusSearch.java
@@ -40,26 +40,32 @@ import org.w3c.dom.Document;
public class NexusSearch {
/**
- * The root URL for the Nexus repository service
+ * The root URL for the Nexus repository service.
*/
private final URL rootURL;
/**
- * Whether to use the Proxy when making requests
+ * Whether to use the Proxy when making requests.
*/
private boolean useProxy;
-
+ /**
+ * The username to use if the Nexus requires authentication.
+ */
+ private String userName = null;
+ /**
+ * The password to use if the Nexus requires authentication.
+ */
+ private char[] password;
/**
* Used for logging.
*/
- private static final Logger LOGGER = Logger.getLogger(NexusSearch.class
- .getName());
+ private static final Logger LOGGER = Logger.getLogger(NexusSearch.class.getName());
/**
* Creates a NexusSearch for the given repository URL.
*
- * @param rootURL the root URL of the repository on which searches should execute. full URL's are calculated
- * relative to this URL, so it should end with a /
+ * @param rootURL the root URL of the repository on which searches should execute. full URL's are calculated relative to this
+ * URL, so it should end with a /
*/
public NexusSearch(URL rootURL) {
this.rootURL = rootURL;
@@ -78,13 +84,12 @@ public class NexusSearch {
}
/**
- * Searches the configured Nexus repository for the given sha1 hash. If the artifact is found, a
- * MavenArtifact is populated with the coordinate information.
+ * Searches the configured Nexus repository for the given sha1 hash. If the artifact is found, a MavenArtifact is
+ * populated with the coordinate information.
*
* @param sha1 The SHA-1 hash string for which to search
* @return the populated Maven coordinates
- * @throws IOException if it's unable to connect to the specified repository or if the specified artifact is not
- * found.
+ * @throws IOException if it's unable to connect to the specified repository or if the specified artifact is not found.
*/
public MavenArtifact searchSha1(String sha1) throws IOException {
if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) {
@@ -99,10 +104,9 @@ public class NexusSearch {
// Determine if we need to use a proxy. The rules:
// 1) If the proxy is set, AND the setting is set to true, use the proxy
// 2) Otherwise, don't use the proxy (either the proxy isn't configured,
- // or proxy is specifically
- // set to false
- final HttpURLConnection conn = URLConnectionFactory.createHttpURLConnection(url, useProxy);
-
+ // or proxy is specifically set to false
+ HttpURLConnection conn;
+ conn = URLConnectionFactory.createHttpURLConnection(url, useProxy);
conn.setDoOutput(true);
// JSON would be more elegant, but there's not currently a dependency
@@ -131,7 +135,18 @@ public class NexusSearch {
.evaluate(
"/org.sonatype.nexus.rest.model.NexusArtifact/artifactLink",
doc);
- return new MavenArtifact(groupId, artifactId, version, link);
+ final String pomLink = xpath
+ .evaluate(
+ "/org.sonatype.nexus.rest.model.NexusArtifact/pomLink",
+ doc);
+ final MavenArtifact ma = new MavenArtifact(groupId, artifactId, version);
+ if (link != null && !"".equals(link)) {
+ ma.setArtifactUrl(link);
+ }
+ if (pomLink != null && !"".equals(pomLink)) {
+ ma.setPomUrl(pomLink);
+ }
+ return ma;
} catch (Throwable e) {
// Anything else is jacked-up XML stuff that we really can't recover
// from well
@@ -153,8 +168,10 @@ public class NexusSearch {
* @return whether the repository is listening and returns the /status URL correctly
*/
public boolean preflightRequest() {
+ HttpURLConnection conn;
try {
- final HttpURLConnection conn = URLConnectionFactory.createHttpURLConnection(new URL(rootURL, "status"), useProxy);
+ URL url = new URL(rootURL, "status");
+ conn = URLConnectionFactory.createHttpURLConnection(url, useProxy);
conn.addRequestProperty("Accept", "application/xml");
conn.connect();
if (conn.getResponseCode() != 200) {
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nvdcve/CveDB.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nvdcve/CveDB.java
index da6b2ae58..44998262f 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nvdcve/CveDB.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nvdcve/CveDB.java
@@ -17,6 +17,7 @@
*/
package org.owasp.dependencycheck.data.nvdcve;
+import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -24,8 +25,10 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
@@ -39,6 +42,7 @@ import org.owasp.dependencycheck.utils.DBUtils;
import org.owasp.dependencycheck.utils.DependencyVersion;
import org.owasp.dependencycheck.utils.DependencyVersionUtil;
import org.owasp.dependencycheck.utils.Pair;
+import org.owasp.dependencycheck.utils.Settings;
/**
* The database holding information about the NVD CVE data.
@@ -57,8 +61,8 @@ public class CveDB {
private Connection conn;
/**
- * Creates a new CveDB object and opens the database connection. Note, the connection must be closed by the caller
- * by calling the close method.
+ * Creates a new CveDB object and opens the database connection. Note, the connection must be closed by the caller by calling
+ * the close method.
*
* @throws DatabaseException thrown if there is an exception opening the database.
*/
@@ -87,7 +91,9 @@ public class CveDB {
* @throws DatabaseException thrown if there is an error opening the database connection
*/
public final void open() throws DatabaseException {
- conn = ConnectionFactory.getConnection();
+ if (!isOpen()) {
+ conn = ConnectionFactory.getConnection();
+ }
}
/**
@@ -170,8 +176,8 @@ public class CveDB {
*/
private static final String DELETE_VULNERABILITY = "DELETE FROM vulnerability WHERE id = ?";
/**
- * SQL Statement to cleanup orphan entries. Yes, the db schema could be a little tighter, but what we have works
- * well to keep the data file size down a bit.
+ * SQL Statement to cleanup orphan entries. Yes, the db schema could be a little tighter, but what we have works well to keep
+ * the data file size down a bit.
*/
private static final String CLEANUP_ORPHANS = "DELETE FROM CpeEntry WHERE id not in (SELECT CPEEntryId FROM Software); ";
/**
@@ -208,7 +214,8 @@ public class CveDB {
private static final String SELECT_CVE_FROM_SOFTWARE = "SELECT cve, cpe, previousVersion "
+ "FROM software INNER JOIN vulnerability ON vulnerability.id = software.cveId "
+ "INNER JOIN cpeEntry ON cpeEntry.id = software.cpeEntryId "
- + "WHERE vendor = ? AND product = ?";
+ + "WHERE vendor = ? AND product = ? "
+ + "ORDER BY cve, cpe"; //, previousVersion
//unfortunately, the version info is too complicated to do in a select. Need to filter this afterwards
// + " AND (version = '-' OR previousVersion IS NOT NULL OR version=?)";
//
@@ -266,8 +273,8 @@ public class CveDB {
//
/**
- * Searches the CPE entries in the database and retrieves all entries for a given vendor and product combination.
- * The returned list will include all versions of the product that are registered in the NVD CVE data.
+ * Searches the CPE entries in the database and retrieves all entries for a given vendor and product combination. The returned
+ * list will include all versions of the product that are registered in the NVD CVE data.
*
* @param vendor the identified vendor name of the dependency being analyzed
* @param product the identified name of the product of the dependency being analyzed
@@ -306,14 +313,14 @@ public class CveDB {
* @throws DatabaseException thrown when there is an error retrieving the data from the DB
*/
public Set> getVendorProductList() throws DatabaseException {
- final HashSet data = new HashSet>();
+ final Set> data = new HashSet>();
ResultSet rs = null;
PreparedStatement ps = null;
try {
ps = getConnection().prepareStatement(SELECT_VENDOR_PRODUCT_LIST);
rs = ps.executeQuery();
while (rs.next()) {
- data.add(new Pair(rs.getString(1), rs.getString(2)));
+ data.add(new Pair(rs.getString(1), rs.getString(2)));
}
} catch (SQLException ex) {
final String msg = "An unexpected SQL Exception occurred; please see the verbose log for more details.";
@@ -452,30 +459,41 @@ public class CveDB {
final List vulnerabilities = new ArrayList();
PreparedStatement ps;
- final HashSet cveEntries = new HashSet();
try {
ps = getConnection().prepareStatement(SELECT_CVE_FROM_SOFTWARE);
ps.setString(1, cpe.getVendor());
ps.setString(2, cpe.getProduct());
rs = ps.executeQuery();
+ String currentCVE = "";
+
+ final Map vulnSoftware = new HashMap();
while (rs.next()) {
final String cveId = rs.getString(1);
+ if (!currentCVE.equals(cveId)) { //check for match and add
+ final Entry matchedCPE = getMatchingSoftware(vulnSoftware, cpe.getVendor(), cpe.getProduct(), detectedVersion);
+ if (matchedCPE != null) {
+ final Vulnerability v = getVulnerability(currentCVE);
+ v.setMatchedCPE(matchedCPE.getKey(), matchedCPE.getValue() ? "Y" : null);
+ vulnerabilities.add(v);
+ }
+ vulnSoftware.clear();
+ currentCVE = cveId;
+ }
+
final String cpeId = rs.getString(2);
final String previous = rs.getString(3);
- if (!cveEntries.contains(cveId) && isAffected(cpe.getVendor(), cpe.getProduct(), detectedVersion, cpeId, previous)) {
- cveEntries.add(cveId);
- final Vulnerability v = getVulnerability(cveId);
- v.setMatchedCPE(cpeId, previous);
- vulnerabilities.add(v);
- }
+ final Boolean p = previous != null && !previous.isEmpty();
+ vulnSoftware.put(cpeId, p);
+ }
+ //remember to process the last set of CVE/CPE entries
+ final Entry matchedCPE = getMatchingSoftware(vulnSoftware, cpe.getVendor(), cpe.getProduct(), detectedVersion);
+ if (matchedCPE != null) {
+ final Vulnerability v = getVulnerability(currentCVE);
+ v.setMatchedCPE(matchedCPE.getKey(), matchedCPE.getValue() ? "Y" : null);
+ vulnerabilities.add(v);
}
DBUtils.closeResultSet(rs);
DBUtils.closeStatement(ps);
-// for (String cve : cveEntries) {
-// final Vulnerability v = getVulnerability(cve);
-// vulnerabilities.add(v);
-// }
-
} catch (SQLException ex) {
throw new DatabaseException("Exception retrieving vulnerability for " + cpeStr, ex);
} finally {
@@ -701,8 +719,45 @@ public class CveDB {
}
/**
- * It is possible that orphaned rows may be generated during database updates. This should be called after all
- * updates have been completed to ensure orphan entries are removed.
+ * Checks to see if data exists so that analysis can be performed.
+ *
+ * @return true if data exists; otherwise false
+ */
+ public boolean dataExists() {
+ Statement cs = null;
+ ResultSet rs = null;
+ try {
+ cs = conn.createStatement();
+ rs = cs.executeQuery("SELECT COUNT(*) records FROM cpeEntry");
+ if (rs.next()) {
+ if (rs.getInt(1) > 0) {
+ return true;
+ }
+ }
+ } catch (SQLException ex) {
+ String dd;
+ try {
+ dd = Settings.getDataDirectory().getAbsolutePath();
+ } catch (IOException ex1) {
+ dd = Settings.getString(Settings.KEYS.DATA_DIRECTORY);
+ }
+ final String msg = String.format("Unable to access the local database.%n%nEnsure that '%s' is a writable directory. "
+ + "If the problem persist try deleting the files in '%s' and running %s again. If the problem continues, please "
+ + "create a log file (see documentation at http://jeremylong.github.io/DependencyCheck/) and open a ticket at "
+ + "https://github.com/jeremylong/DependencyCheck/issues and include the log file.%n%n",
+ dd, dd, Settings.getString(Settings.KEYS.APPLICATION_VAME));
+ LOGGER.log(Level.SEVERE, msg);
+ LOGGER.log(Level.FINE, "", ex);
+ } finally {
+ DBUtils.closeResultSet(rs);
+ DBUtils.closeStatement(cs);
+ }
+ return false;
+ }
+
+ /**
+ * It is possible that orphaned rows may be generated during database updates. This should be called after all updates have
+ * been completed to ensure orphan entries are removed.
*/
public void cleanupDatabase() {
PreparedStatement ps = null;
@@ -721,46 +776,80 @@ public class CveDB {
}
/**
- * Determines if the given identifiedVersion is affected by the given cpeId and previous version flag. A non-null,
- * non-empty string passed to the previous version argument indicates that all previous versions are affected.
+ * Determines if the given identifiedVersion is affected by the given cpeId and previous version flag. A non-null, non-empty
+ * string passed to the previous version argument indicates that all previous versions are affected.
*
* @param vendor the vendor of the dependency being analyzed
* @param product the product name of the dependency being analyzed
+ * @param vulnerableSoftware a map of the vulnerable software with a boolean indicating if all previous versions are affected
* @param identifiedVersion the identified version of the dependency being analyzed
- * @param cpeId the cpe identifier of software that has a known vulnerability
- * @param previous a flag indicating if previous versions of the product are vulnerable
* @return true if the identified version is affected, otherwise false
*/
- private boolean isAffected(String vendor, String product, DependencyVersion identifiedVersion, String cpeId, String previous) {
- boolean affected = false;
- final boolean isStruts = "apache".equals(vendor) && "struts".equals(product);
- final DependencyVersion v = parseDependencyVersion(cpeId);
- final boolean prevAffected = previous != null && !previous.isEmpty();
- if (v == null || "-".equals(v.toString())) { //all versions
- affected = true;
- } else if (identifiedVersion == null || "-".equals(identifiedVersion.toString())) {
- if (prevAffected) {
- affected = true;
+ Entry getMatchingSoftware(Map vulnerableSoftware, String vendor, String product,
+ DependencyVersion identifiedVersion) {
+
+ final boolean isVersionTwoADifferentProduct = "apache".equals(vendor) && "struts".equals(product);
+
+ final Set majorVersionsAffectingAllPrevious = new HashSet();
+ final boolean matchesAnyPrevious = identifiedVersion == null || "-".equals(identifiedVersion.toString());
+ String majorVersionMatch = null;
+ for (Entry entry : vulnerableSoftware.entrySet()) {
+ final DependencyVersion v = parseDependencyVersion(entry.getKey());
+ if (v == null || "-".equals(v.toString())) { //all versions
+ return entry;
}
- } else if (identifiedVersion.equals(v) || (prevAffected && identifiedVersion.compareTo(v) < 0)) {
- if (isStruts) { //struts 2 vulns don't affect struts 1
- if (identifiedVersion.getVersionParts().get(0).equals(v.getVersionParts().get(0))) {
- affected = true;
+ if (entry.getValue()) {
+ if (matchesAnyPrevious) {
+ return entry;
}
- } else {
- affected = true;
+ if (identifiedVersion != null && identifiedVersion.getVersionParts().get(0).equals(v.getVersionParts().get(0))) {
+ majorVersionMatch = v.getVersionParts().get(0);
+ }
+ majorVersionsAffectingAllPrevious.add(v.getVersionParts().get(0));
}
}
- /*
- * TODO consider utilizing the matchThreeVersion method to get additional results. However, this
- * might also introduce false positives.
- */
- return affected;
+ if (matchesAnyPrevious) {
+ return null;
+ }
+
+ final boolean canSkipVersions = majorVersionMatch != null && majorVersionsAffectingAllPrevious.size() > 1;
+ //yes, we are iterating over this twice. The first time we are skipping versions those that affect all versions
+ //then later we process those that affect all versions. This could be done with sorting...
+ for (Entry entry : vulnerableSoftware.entrySet()) {
+ if (!entry.getValue()) {
+ final DependencyVersion v = parseDependencyVersion(entry.getKey());
+ //this can't dereference a null 'majorVersionMatch' as canSkipVersions accounts for this.
+ if (canSkipVersions && !majorVersionMatch.equals(v.getVersionParts().get(0))) {
+ continue;
+ }
+ //this can't dereference a null 'identifiedVersion' because if it was null we would have exited
+ //in the above loop or just after loop (if matchesAnyPrevious return null).
+ if (identifiedVersion.equals(v)) {
+ return entry;
+ }
+ }
+ }
+ for (Entry entry : vulnerableSoftware.entrySet()) {
+ if (entry.getValue()) {
+ final DependencyVersion v = parseDependencyVersion(entry.getKey());
+ //this can't dereference a null 'majorVersionMatch' as canSkipVersions accounts for this.
+ if (canSkipVersions && !majorVersionMatch.equals(v.getVersionParts().get(0))) {
+ continue;
+ }
+ //this can't dereference a null 'identifiedVersion' because if it was null we would have exited
+ //in the above loop or just after loop (if matchesAnyPrevious return null).
+ if (entry.getValue() && identifiedVersion.compareTo(v) <= 0) {
+ if (!(isVersionTwoADifferentProduct && !identifiedVersion.getVersionParts().get(0).equals(v.getVersionParts().get(0)))) {
+ return entry;
+ }
+ }
+ }
+ }
+ return null;
}
/**
- * Parses the version (including revision) from a CPE identifier. If no version is identified then a '-' is
- * returned.
+ * Parses the version (including revision) from a CPE identifier. If no version is identified then a '-' is returned.
*
* @param cpeStr a cpe identifier
* @return a dependency version
@@ -784,9 +873,9 @@ public class CveDB {
*/
private DependencyVersion parseDependencyVersion(VulnerableSoftware cpe) {
DependencyVersion cpeVersion;
- if (cpe.getVersion() != null && cpe.getVersion().length() > 0) {
+ if (cpe.getVersion() != null && !cpe.getVersion().isEmpty()) {
String versionText;
- if (cpe.getRevision() != null && cpe.getRevision().length() > 0) {
+ if (cpe.getRevision() != null && !cpe.getRevision().isEmpty()) {
versionText = String.format("%s.%s", cpe.getVersion(), cpe.getRevision());
} else {
versionText = cpe.getVersion();
diff --git a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nvdcve/DatabaseProperties.java b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nvdcve/DatabaseProperties.java
index cadcb2ae6..8b90dd0fa 100644
--- a/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nvdcve/DatabaseProperties.java
+++ b/dependency-check-core/src/main/java/org/owasp/dependencycheck/data/nvdcve/DatabaseProperties.java
@@ -91,7 +91,7 @@ public class DatabaseProperties {
}
/**
- * Writes a properties file containing the last updated date to the VULNERABLE_CPE directory.
+ * Saves the last updated information to the properties file.
*
* @param updatedValue the updated NVD CVE entry
* @throws UpdateException is thrown if there is an update exception
@@ -100,8 +100,19 @@ public class DatabaseProperties {
if (updatedValue == null) {
return;
}
- properties.put(LAST_UPDATED_BASE + updatedValue.getId(), String.valueOf(updatedValue.getTimestamp()));
- cveDB.saveProperty(LAST_UPDATED_BASE + updatedValue.getId(), String.valueOf(updatedValue.getTimestamp()));
+ save(LAST_UPDATED_BASE + updatedValue.getId(), String.valueOf(updatedValue.getTimestamp()));
+ }
+
+ /**
+ * Saves the key value pair to the properties store.
+ *
+ * @param key the property key
+ * @param value the property value
+ * @throws UpdateException is thrown if there is an update exception
+ */
+ public void save(String key, String value) throws UpdateException {
+ properties.put(key, value);
+ cveDB.saveProperty(key, value);
}
/**
@@ -142,8 +153,8 @@ public class DatabaseProperties {
*
* @return a map of the database meta data
*/
- public Map getMetaData() {
- final TreeMap map = new TreeMap();
+ public Map getMetaData() {
+ final Map map = new TreeMap();
for (Entry