[rhq] Branch 'jsanda/drift' - 2 commits - modules/core
by Jay Shaughnessy
modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java | 246 ++++----
modules/core/plugin-container/src/test/java/org/rhq/core/pc/drift/DriftDetectorTest.java | 286 ++++++----
2 files changed, 312 insertions(+), 220 deletions(-)
New commits:
commit e60b0694356357d6c73b04128606afe6d75e3041
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Wed Dec 14 16:00:07 2011 -0500
[Bug 756100 - RFE: use timestamp and file size during drift detection scans]
Worked in review feedback.
- consolidated removed and cantRead entry logic
- fixed up some comments
- clear collections after they are used to possibly help out gc
Decided to still keep around the "bucket" collections and assemble the
snapshot and delta entry lists using the buckets. It just seemed more
clear to me.
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
index 869e376..76d7afb 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
@@ -16,7 +16,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
package org.rhq.core.pc.drift;
import static org.rhq.common.drift.FileEntry.addedFileEntry;
@@ -30,6 +29,7 @@ import static org.rhq.core.util.file.FileUtil.forEachFile;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
+import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
@@ -49,6 +49,11 @@ import org.rhq.core.domain.drift.Filter;
import org.rhq.core.util.MessageDigestGenerator;
import org.rhq.core.util.file.FileVisitor;
+/**
+ * Mechanism to detect and report Drift for active Drift Definitions.
+ *
+ * @author John Sanda
+ */
public class DriftDetector implements Runnable {
private Log log = LogFactory.getLog(DriftDetector.class);
@@ -220,119 +225,142 @@ public class DriftDetector implements Runnable {
}
final List<FileEntry> unchangedEntries = new LinkedList<FileEntry>();
- final List<FileEntry> removedEntries = new LinkedList<FileEntry>();
- final List<FileEntry> cantReadEntries = new LinkedList<FileEntry>();
final List<FileEntry> changedEntries = new LinkedList<FileEntry>();
+ final List<FileEntry> removedEntries = new LinkedList<FileEntry>();
+ final List<FileEntry> addedEntries = new LinkedList<FileEntry>();
+ // for pinned snapshots, keep track of the original FileEntry objects for changed entries. These
+ // are used if we re-write the pinned snapshot file.
final List<FileEntry> changedPinnedEntries = isPinned ? new LinkedList<FileEntry>() : null;
- ChangeSetReader snapshotReader = null;
- int newVersion;
- boolean updateSnapshot = false;
try {
- snapshotReader = changeSetMgr.getChangeSetReader(snapshotFile);
-
- if (!basedir.exists()) {
- log.warn("The base directory [" + basedir.getAbsolutePath() + "] for " + schedule + " does not exist.");
- }
+ ChangeSetReader snapshotReader = null;
+ int newVersion;
+ boolean updateSnapshot = false;
+ try {
+ snapshotReader = changeSetMgr.getChangeSetReader(snapshotFile);
- if (isPinned) {
- // If pinned we compare against the pinned snapshot but we need to know the current snapshot version,
- // get it from the current full snapshot.
- ChangeSetReader currentFullSnapshotReader = null;
- try {
- currentFullSnapshotReader = changeSetMgr.getChangeSetReader(currentFullSnapshot);
- newVersion = currentFullSnapshotReader.getHeaders().getVersion() + 1;
- } finally {
- currentFullSnapshotReader.close();
+ if (!basedir.exists()) {
+ log.warn("The base directory [" + basedir.getAbsolutePath() + "] for " + schedule
+ + " does not exist.");
}
- } else {
- newVersion = snapshotReader.getHeaders().getVersion() + 1;
- }
-
- // First look for files that have either been modified or deleted
- updateSnapshot = scanSnapshotFiles(schedule, basedir, snapshotReader, newFiles, unchangedEntries,
- removedEntries, cantReadEntries, changedEntries, changedPinnedEntries);
- } finally {
- snapshotReader.close();
- }
+ if (isPinned) {
+ // If pinned we compare against the pinned snapshot but we need to know the current snapshot version,
+ // get it from the current full snapshot.
+ ChangeSetReader currentFullSnapshotReader = null;
+ try {
+ currentFullSnapshotReader = changeSetMgr.getChangeSetReader(currentFullSnapshot);
+ newVersion = currentFullSnapshotReader.getHeaders().getVersion() + 1;
+ } finally {
+ currentFullSnapshotReader.close();
+ }
+ } else {
+ newVersion = snapshotReader.getHeaders().getVersion() + 1;
+ }
- // if necessary, re-write the pinned snapshot file because we've updated timestamp/filesize info, which
- // on subsequent detection runs will help us avoid SHA generation. It must maintain the same entries.
- if (isPinned && updateSnapshot) {
- changedPinnedEntries.addAll(unchangedEntries);
+ // First look for files that have either been changed or removed
+ updateSnapshot = scanSnapshot(schedule, basedir, snapshotReader, newFiles, unchangedEntries,
+ changedEntries, removedEntries, changedPinnedEntries);
- backupAndDeleteCurrentSnapshot(snapshotFile);
- updatePinnedSnapshot(schedule, snapshotFile, changedPinnedEntries);
- }
+ } finally {
+ snapshotReader.close();
+ }
- final List<FileEntry> snapshotEntries = new LinkedList<FileEntry>(unchangedEntries);
- snapshotEntries.addAll(changedEntries);
+ // if necessary, re-write the pinned snapshot file because we've updated timestamp/filesize info, which
+ // on subsequent detection runs will help us avoid SHA generation. It must maintain the same entries.
+ if (isPinned && updateSnapshot) {
+ changedPinnedEntries.addAll(unchangedEntries);
- final List<FileEntry> deltaEntries = new LinkedList<FileEntry>(changedEntries);
- deltaEntries.addAll(removedEntries);
- deltaEntries.addAll(cantReadEntries);
+ backupAndDeleteCurrentSnapshot(snapshotFile);
+ updatePinnedSnapshot(schedule, snapshotFile, changedPinnedEntries);
+ }
- for (File file : newFiles) {
- try {
- if (log.isInfoEnabled()) {
- log.info("Detected added file for " + schedule + " --> " + file.getAbsolutePath());
- }
+ // add new files to the snapshotEntries and deltaEntries
+ for (File file : newFiles) {
+ try {
+ if (log.isInfoEnabled()) {
+ log.info("Detected added file for " + schedule + " --> " + file.getAbsolutePath());
+ }
- FileEntry newEntry = addedFileEntry(relativePath(basedir, file), sha256(file), file.lastModified(),
- file.length());
+ FileEntry newEntry = addedFileEntry(relativePath(basedir, file), sha256(file), file.lastModified(),
+ file.length());
- deltaEntries.add(newEntry);
- snapshotEntries.add(newEntry);
+ addedEntries.add(newEntry);
- } catch (IOException e) {
- log.error("An error occurred while generating a drift change set for " + schedule + ": "
- + e.getMessage());
- throw new DriftDetectionException("An error occurred while generating a drift change set", e);
+ } catch (IOException e) {
+ log.error("An error occurred while generating a drift change set for " + schedule + ": "
+ + e.getMessage());
+ throw new DriftDetectionException("An error occurred while generating a drift change set", e);
+ }
}
- }
-
- if (deltaEntries.isEmpty()) {
- File newSnapshot = currentFullSnapshot;
- if (!isPinned) {
- // If unpinned and there is no detected drift then we generally don't need to add/update any files.
- // But, if we have timestamp/filesize updates then we want to replace the current snapshot with
- // the updated entries, so we can avoid SHA generation on subsequent runs.
- if (updateSnapshot) {
- currentFullSnapshot.delete();
- newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion - 1);
+ // The new snapshot contains all changed, unchanged and added files. Not removed files.
+ final List<FileEntry> snapshotEntries = new LinkedList<FileEntry>(unchangedEntries);
+ snapshotEntries.addAll(changedEntries);
+ snapshotEntries.addAll(addedEntries);
+
+ // The snapshot delta contains all changed, added and removed files.
+ final List<FileEntry> deltaEntries = new LinkedList<FileEntry>(changedEntries);
+ deltaEntries.addAll(removedEntries);
+ deltaEntries.addAll(addedEntries);
+
+ if (deltaEntries.isEmpty()) {
+ File newSnapshot = currentFullSnapshot;
+
+ if (!isPinned) {
+ // If unpinned and there is no detected drift then we generally don't need to add/update any files.
+ // But, if we have timestamp/filesize updates then we want to replace the current snapshot with
+ // the updated entries, so we can avoid SHA generation on subsequent runs.
+ if (updateSnapshot) {
+ currentFullSnapshot.delete();
+ newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion - 1);
+ }
+ } else {
+ // If pinned and returning to compliance (meaning no drift now but the previous snapshot did have drift)
+ // then we need to reset the current snapshot to match the pinned snapshot. Note though that we
+ // increment the snapshot version in order to let the server know about the state change.
+ if (newVersion > 1
+ && !isPreviousChangeSetEmpty(schedule.getResourceId(), schedule.getDriftDefinition())) {
+ currentFullSnapshot.delete();
+ newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion);
+
+ updateDeltaSnapshot(summary, schedule, deltaEntries, newVersion, currentFullSnapshot,
+ newSnapshot);
+ }
}
+
+ summary.setNewSnapshot(newSnapshot);
+
} else {
- // If pinned and returning to compliance (meaning no drift now but the previous snapshot did have drift)
- // then we need to reset the current snapshot to match the pinned snapshot. Note though that we
- // increment the snapshot version in order to let the server know about the state change.
- if (newVersion > 1
- && !isPreviousChangeSetEmpty(schedule.getResourceId(), schedule.getDriftDefinition())) {
- currentFullSnapshot.delete();
- newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion);
-
- updateDeltaSnapshot(summary, schedule, deltaEntries, newVersion, currentFullSnapshot, newSnapshot);
- }
- }
+ // if there is drift, but we're pinned and the drift is the same as the previous detection, just
+ // mark it as a repeat to indicate that we're out of compliance but not in any new way.
+ if (isPinned && newVersion > 1 && isSameAsPreviousChangeSet(deltaEntries, currentFullSnapshot)) {
+ summary.setVersion(newVersion - 1);
+ summary.setRepeat(true);
- summary.setNewSnapshot(newSnapshot);
+ return;
+ }
- } else {
- // if there is drift, but we're pinned and the drift is the same as the previous detection, just
- // mark it as a repeat to indicate that we're out of compliance but not in any new way.
- if (isPinned && newVersion > 1 && isSameAsPreviousChangeSet(deltaEntries, currentFullSnapshot)) {
- summary.setVersion(newVersion - 1);
- summary.setRepeat(true);
+ // otherwise, generate a new current snapshot, and a snapshot delta reflecting the latest drift
+ File oldSnapshot = backupAndDeleteCurrentSnapshot(currentFullSnapshot);
+ File newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion);
- return;
+ updateDeltaSnapshot(summary, schedule, deltaEntries, newVersion, oldSnapshot, newSnapshot);
}
+ } finally {
+ // Help out the garbage collector by clearing all of our collections
+ safeClear(newFiles, unchangedEntries, changedEntries, changedPinnedEntries);
+ }
+ }
- // otherwise, generate a new current snapshot, and a snapshot delta reflecting the latest drift
- File oldSnapshot = backupAndDeleteCurrentSnapshot(currentFullSnapshot);
- File newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion);
-
- updateDeltaSnapshot(summary, schedule, deltaEntries, newVersion, oldSnapshot, newSnapshot);
+ static private void safeClear(Collection<?>... collections) {
+ if (null == collections) {
+ return;
+ }
+ for (Collection<?> c : collections) {
+ if (null != c) {
+ c.clear();
+ }
}
}
@@ -363,10 +391,9 @@ public class DriftDetector implements Runnable {
* snapshot should be re-written to disk even if there was no drift.
* @throws IOException
*/
- private boolean scanSnapshotFiles(DriftDetectionSchedule schedule, File basedir, ChangeSetReader snapshotReader,
- Set<File> newFiles, List<FileEntry> unchangedEntries, List<FileEntry> removedEntries,
- List<FileEntry> cantReadEntries, List<FileEntry> modifiedEntries, List<FileEntry> changedPinnedEntries)
- throws IOException {
+ private boolean scanSnapshot(DriftDetectionSchedule schedule, File basedir, ChangeSetReader snapshotReader,
+ Set<File> newFiles, List<FileEntry> unchangedEntries, List<FileEntry> changedEntries,
+ List<FileEntry> removedEntries, List<FileEntry> changedPinnedEntries) throws IOException {
boolean result = false;
@@ -374,10 +401,11 @@ public class DriftDetector implements Runnable {
File file = new File(basedir, entry.getFile());
newFiles.remove(file);
- if (!file.exists()) {
- // The file has been deleted since the last scan
+ if (!(file.exists() && file.canRead())) {
+ // The file has been deleted or is no longer readable, since the last scan
if (log.isDebugEnabled()) {
- log.debug("Detected deleted file for " + schedule + " --> " + file.getAbsolutePath());
+ log.debug("Detected " + (file.exists() ? "unreadable" : "deleted") + " file for " + schedule
+ + " --> " + file.getAbsolutePath());
}
removedEntries.add(removedFileEntry(entry.getFile(), entry.getNewSHA()));
@@ -387,37 +415,25 @@ public class DriftDetector implements Runnable {
continue;
- } else if (!file.canRead()) {
- if (log.isDebugEnabled()) {
- log.debug(file.getPath() + " is no longer readable. Treating it as a deleted file.");
- }
- cantReadEntries.add(removedFileEntry(entry.getFile(), entry.getNewSHA()));
-
- if (null != changedPinnedEntries) {
- changedPinnedEntries.add(entry);
- }
-
- continue;
-
} else {
String currentSHA = null;
- boolean isModified = false;
+ boolean isChanged = false;
// perform a SHA comparison if we are unable to compare size and lastModified or if the
- // size or lastModified test fails. We may not have size of lastModified values for the
+ // size or lastModified test fails. We may not have size or lastModified values for the
// entry when the current snapshot was provided by the server, either due to a synch or
// pinning scenario. The server does not store that information and will provide -1 for defaults.
if (entry.getLastModified() == -1 || entry.getSize() == -1
|| entry.getLastModified() != file.lastModified() || entry.getSize() != file.length()) {
currentSHA = sha256(file);
- isModified = !entry.getNewSHA().equals(currentSHA);
+ isChanged = !entry.getNewSHA().equals(currentSHA);
}
- if (isModified) {
- FileEntry modifiedEntry = changedFileEntry(entry.getFile(), entry.getNewSHA(), currentSHA, file
+ if (isChanged) {
+ FileEntry changedEntry = changedFileEntry(entry.getFile(), entry.getNewSHA(), currentSHA, file
.lastModified(), file.length());
- modifiedEntries.add(modifiedEntry);
+ changedEntries.add(changedEntry);
if (null != changedPinnedEntries) {
changedPinnedEntries.add(entry);
commit b7ff74899be79a903a40483691e4916f313e3500
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Wed Dec 14 15:59:24 2011 -0500
- Add some unit tests ensuring timestamp info is updated appropriately.
- remove a bunch of IDE warnings
diff --git a/modules/core/plugin-container/src/test/java/org/rhq/core/pc/drift/DriftDetectorTest.java b/modules/core/plugin-container/src/test/java/org/rhq/core/pc/drift/DriftDetectorTest.java
index b0b1143..0f2d30a 100644
--- a/modules/core/plugin-container/src/test/java/org/rhq/core/pc/drift/DriftDetectorTest.java
+++ b/modules/core/plugin-container/src/test/java/org/rhq/core/pc/drift/DriftDetectorTest.java
@@ -19,12 +19,22 @@
package org.rhq.core.pc.drift;
-import org.rhq.common.drift.*;
-import org.rhq.core.domain.drift.DriftDefinition;
-import org.rhq.core.system.OperatingSystemType;
-import org.rhq.core.system.SystemInfoFactory;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
+import static java.util.Arrays.asList;
+import static java.util.Collections.emptyList;
+import static org.apache.commons.io.FileUtils.copyFile;
+import static org.apache.commons.io.FileUtils.deleteDirectory;
+import static org.apache.commons.io.FileUtils.touch;
+import static org.rhq.common.drift.FileEntry.addedFileEntry;
+import static org.rhq.common.drift.FileEntry.changedFileEntry;
+import static org.rhq.common.drift.FileEntry.removedFileEntry;
+import static org.rhq.core.domain.drift.DriftChangeSetCategory.COVERAGE;
+import static org.rhq.core.domain.drift.DriftChangeSetCategory.DRIFT;
+import static org.rhq.test.AssertUtils.assertCollectionMatchesNoOrder;
+import static org.rhq.test.AssertUtils.assertPropertiesMatch;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertFalse;
+import static org.testng.Assert.assertTrue;
+import static org.testng.Assert.fail;
import java.io.BufferedReader;
import java.io.File;
@@ -33,15 +43,18 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
-import static java.util.Arrays.asList;
-import static java.util.Collections.emptyList;
-import static org.apache.commons.io.FileUtils.*;
-import static org.rhq.common.drift.FileEntry.*;
-import static org.rhq.core.domain.drift.DriftChangeSetCategory.COVERAGE;
-import static org.rhq.core.domain.drift.DriftChangeSetCategory.DRIFT;
-import static org.rhq.test.AssertUtils.assertCollectionMatchesNoOrder;
-import static org.rhq.test.AssertUtils.assertPropertiesMatch;
-import static org.testng.Assert.*;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import org.rhq.common.drift.ChangeSetReader;
+import org.rhq.common.drift.ChangeSetReaderImpl;
+import org.rhq.common.drift.ChangeSetWriter;
+import org.rhq.common.drift.ChangeSetWriterImpl;
+import org.rhq.common.drift.FileEntry;
+import org.rhq.common.drift.Headers;
+import org.rhq.core.domain.drift.DriftDefinition;
+import org.rhq.core.system.OperatingSystemType;
+import org.rhq.core.system.SystemInfoFactory;
public class DriftDetectorTest extends DriftTest {
@@ -66,7 +79,6 @@ public class DriftDetectorTest extends DriftTest {
detector.setScheduleQueue(scheduleQueue);
}
- @SuppressWarnings("unchecked")
@Test
public void excludeEmptyDirsFromCoverageChangeSet() throws Exception {
File confDir = mkdir(resourceDir, "conf");
@@ -74,6 +86,7 @@ public class DriftDetectorTest extends DriftTest {
// create an empty directory
File libDir = mkdir(resourceDir, "lib");
+ assert (libDir.isDirectory());
DriftDefinition driftDef = driftDefinition("coverage-test", resourceDir.getAbsolutePath());
@@ -82,14 +95,13 @@ public class DriftDetectorTest extends DriftTest {
File changeSet = changeSet(driftDef.getName(), COVERAGE);
Headers headers = createHeaders(driftDef, COVERAGE);
- List<FileEntry> expected = asList(addedFileEntry("conf/server.conf", sha256(serverConf),
- serverConf.lastModified(), serverConf.length()));
+ List<FileEntry> expected = asList(addedFileEntry("conf/server.conf", sha256(serverConf), serverConf
+ .lastModified(), serverConf.length()));
assertHeaderEquals(changeSet, headers);
assertFileEntriesMatch("Only files should be included in a change set.", expected, changeSet);
}
- @SuppressWarnings("unchecked")
@Test
public void includeMultipleFilesInDirInCoverageChangeSet() throws Exception {
File confDir = mkdir(resourceDir, "conf");
@@ -102,16 +114,15 @@ public class DriftDetectorTest extends DriftTest {
detector.run();
File changeSet = changeSet(def.getName(), COVERAGE);
- List<FileEntry> entries = asList(
- addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(), server1Conf.length()),
- addedFileEntry("conf/server-2.conf", sha256(server2Conf), server2Conf.lastModified(), server2Conf.length()));
+ List<FileEntry> entries = asList(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf
+ .lastModified(), server1Conf.length()), addedFileEntry("conf/server-2.conf", sha256(server2Conf),
+ server2Conf.lastModified(), server2Conf.length()));
assertHeaderEquals(changeSet, createHeaders(def, COVERAGE));
assertFileEntriesMatch("Each file in a directory should be included in a coverage change set", entries,
changeSet);
}
- @SuppressWarnings("unchecked")
@Test
public void includedSiblingDirsInCoverageChangeSet() throws Exception {
File confDir = mkdir(resourceDir, "conf");
@@ -126,16 +137,15 @@ public class DriftDetectorTest extends DriftTest {
detector.run();
File changeSet = changeSet(def.getName(), COVERAGE);
- List<FileEntry> entries = asList(
- addedFileEntry("conf/server.conf", sha256(serverConf), serverConf.lastModified(), serverConf.length()),
- addedFileEntry("lib/server.jar", sha256(serverLib), serverLib.lastModified(), serverLib.length()));
+ List<FileEntry> entries = asList(addedFileEntry("conf/server.conf", sha256(serverConf), serverConf
+ .lastModified(), serverConf.length()), addedFileEntry("lib/server.jar", sha256(serverLib), serverLib
+ .lastModified(), serverLib.length()));
assertHeaderEquals(changeSet, createHeaders(def, COVERAGE));
assertFileEntriesMatch("A coverage change set should include files from multiple, sibling directories",
entries, changeSet);
}
- @SuppressWarnings("unchecked")
@Test
public void includeNestedDirsInCoverageChangeSet() throws Exception {
File confDir = mkdir(resourceDir, "conf");
@@ -150,9 +160,9 @@ public class DriftDetectorTest extends DriftTest {
detector.run();
File changeSet = changeSet(def.getName(), COVERAGE);
- List<FileEntry> entries = asList(
- addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(), server1Conf.length()),
- addedFileEntry("conf/subconf/server-2.conf", sha256(server2Conf), server2Conf.lastModified(), server2Conf.length()));
+ List<FileEntry> entries = asList(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf
+ .lastModified(), server1Conf.length()), addedFileEntry("conf/subconf/server-2.conf", sha256(server2Conf),
+ server2Conf.lastModified(), server2Conf.length()));
assertHeaderEquals(changeSet, createHeaders(def, COVERAGE));
assertFileEntriesMatch("A coverage change set should include files in nested sub directories", entries,
@@ -161,12 +171,13 @@ public class DriftDetectorTest extends DriftTest {
@Test
public void updateScheduleAfterGeneratingCoverageChangeSet() throws Exception {
- DriftDefinition driftDef = driftDefinition("update-schedule-after-coverage-changeset",
- resourceDir.getAbsolutePath());
+ DriftDefinition driftDef = driftDefinition("update-schedule-after-coverage-changeset", resourceDir
+ .getAbsolutePath());
DriftDetectionSchedule schedule = new DriftDetectionSchedule(resourceId(), driftDef);
File confDir = mkdir(resourceDir, "conf");
File serverConf = createRandomFile(confDir, "server.conf");
+ assert (serverConf.exists());
long currentTime = System.currentTimeMillis();
@@ -174,8 +185,8 @@ public class DriftDetectorTest extends DriftTest {
detector.run();
assertTrue(schedule.getNextScan() >= (currentTime + driftDef.getInterval()),
- "Failed to update schedule. next scan is " + schedule.getNextScan() + " and should be greater than " +
- (currentTime + driftDef.getInterval()));
+ "Failed to update schedule. next scan is " + schedule.getNextScan() + " and should be greater than "
+ + (currentTime + driftDef.getInterval()));
}
@Test
@@ -183,11 +194,11 @@ public class DriftDetectorTest extends DriftTest {
}
- @SuppressWarnings("unchecked")
@Test
public void doNotUpdateSnapshotOrGenerateDriftChangeSetIfNothingChanges() throws Exception {
File confDir = mkdir(resourceDir, "conf");
File serverConf = createRandomFile(confDir, "server.conf");
+ assert (serverConf.exists());
DriftDefinition def = driftDefinition("nothing-to-update", resourceDir.getAbsolutePath());
@@ -217,7 +228,6 @@ public class DriftDetectorTest extends DriftTest {
+ "no drift");
}
- @SuppressWarnings("unchecked")
@Test
public void skipDetectionForScheduledThatIsDisabled() throws Exception {
detector.setDriftClient(new DriftClientTestStub() {
@@ -245,8 +255,8 @@ public class DriftDetectorTest extends DriftTest {
detector.run();
// make sure that the next scan time is not updated
- assertEquals(schedule.getNextScan(), nextScan, "The next scan time for the drift detection schedule should " +
- " not get updated if drift detection does not actually run for the definition.");
+ assertEquals(schedule.getNextScan(), nextScan, "The next scan time for the drift detection schedule should "
+ + " not get updated if drift detection does not actually run for the definition.");
}
@Test
@@ -255,7 +265,8 @@ public class DriftDetectorTest extends DriftTest {
DriftDetectionSchedule schedule = new DriftDetectionSchedule(resourceId(), driftDef);
File confDir = mkdir(resourceDir, "conf");
- File server1Conf = new File(confDir, "server.conf");
+ File server1Conf = createRandomFile(confDir, "server.conf");
+ assert (server1Conf.exists());
scheduleQueue.addSchedule(schedule);
detector.run();
@@ -263,8 +274,8 @@ public class DriftDetectorTest extends DriftTest {
long nextScan = schedule.getNextScan();
detector.run();
- assertEquals(schedule.getNextScan(), nextScan, "The next scan time for the drift detection schedule should " +
- " not get updated if drift detection does not actually run for the definition.");
+ assertEquals(schedule.getNextScan(), nextScan, "The next scan time for the drift detection schedule should "
+ + " not get updated if drift detection does not actually run for the definition.");
}
@Test
@@ -277,18 +288,17 @@ public class DriftDetectorTest extends DriftTest {
scheduleQueue.addSchedule(new DriftDetectionSchedule(resourceId(), def));
detector.run();
- assertEquals(driftClient.getReportMissingBaseDirInvocationCount(), 1, "A missing base directory should be " +
- "reported to the server if no initial snapshot has already been generated.");
- assertEquals(driftClient.getSendChangeSetInvocationCount(), 0, "No initial change set should be sent to " +
- "the server if the base directory does not exist.");
+ assertEquals(driftClient.getReportMissingBaseDirInvocationCount(), 1, "A missing base directory should be "
+ + "reported to the server if no initial snapshot has already been generated.");
+ assertEquals(driftClient.getSendChangeSetInvocationCount(), 0, "No initial change set should be sent to "
+ + "the server if the base directory does not exist.");
// verify that the initial change set was not generated
File snapshot = changeSet(def.getName(), COVERAGE);
- assertFalse(snapshot.exists(), "An initial snapshot should not be written to disk if the base directory " +
- "does not exist.");
+ assertFalse(snapshot.exists(), "An initial snapshot should not be written to disk if the base directory "
+ + "does not exist.");
}
- @SuppressWarnings("unchecked")
@Test
public void skipDetectionWhenPreviousSnapshotFileExists() throws Exception {
// The presence of a previous snapshot file means that the server has
@@ -327,7 +337,6 @@ public class DriftDetectorTest extends DriftTest {
+ "drift detection should not have run until the server acked the previous snapshot.");
}
- @SuppressWarnings("unchecked")
@Test
public void includeAddedFileInDriftChangeSet() throws Exception {
DriftDefinition def = driftDefinition("file-added-drift-test", resourceDir.getAbsolutePath());
@@ -336,8 +345,8 @@ public class DriftDetectorTest extends DriftTest {
File server1Conf = createRandomFile(confDir, "server-1.conf");
ChangeSetWriter writer = changeSetMgr.getChangeSetWriter(resourceId(), createHeaders(def, COVERAGE));
- writer.write(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(),
- server1Conf.length()));
+ writer.write(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(), server1Conf
+ .length()));
writer.close();
// Create some drift
@@ -347,8 +356,8 @@ public class DriftDetectorTest extends DriftTest {
detector.run();
File driftChangeSet = changeSet(def.getName(), DRIFT);
- List<FileEntry> driftEntries = asList(addedFileEntry("conf/server-2.conf", sha256(server2Conf),
- server2Conf.lastModified(), server2Conf.length()));
+ List<FileEntry> driftEntries = asList(addedFileEntry("conf/server-2.conf", sha256(server2Conf), server2Conf
+ .lastModified(), server2Conf.length()));
// verify that the drift change set was generated
assertTrue(driftChangeSet.exists(), "Expected to find drift change set " + driftChangeSet.getPath());
@@ -356,9 +365,9 @@ public class DriftDetectorTest extends DriftTest {
assertFileEntriesMatch("The drift change set does not match the expected values", driftEntries, driftChangeSet);
File coverageChangeSet = changeSet(def.getName(), COVERAGE);
- List<FileEntry> coverageEntries = asList(
- addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(), server1Conf.length()),
- addedFileEntry("conf/server-2.conf", sha256(server2Conf), server2Conf.lastModified(), server2Conf.length()));
+ List<FileEntry> coverageEntries = asList(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf
+ .lastModified(), server1Conf.length()), addedFileEntry("conf/server-2.conf", sha256(server2Conf),
+ server2Conf.lastModified(), server2Conf.length()));
// verify that the coverage change set was updated
assertHeaderEquals(coverageChangeSet, createHeaders(def, COVERAGE, 1));
@@ -366,7 +375,6 @@ public class DriftDetectorTest extends DriftTest {
coverageChangeSet);
}
- @SuppressWarnings("unchecked")
@Test
public void includeModifiedFileInDriftChangeSet() throws Exception {
DriftDefinition def = driftDefinition("file-modified-drift-test", resourceDir.getAbsolutePath());
@@ -388,8 +396,8 @@ public class DriftDetectorTest extends DriftTest {
detector.run();
File driftChangeSet = changeSet(def.getName(), DRIFT);
- List<FileEntry> driftEntries = asList(changedFileEntry("conf/server-1.conf", oldHash, newHash,
- server1Conf.lastModified(), server1Conf.length()));
+ List<FileEntry> driftEntries = asList(changedFileEntry("conf/server-1.conf", oldHash, newHash, server1Conf
+ .lastModified(), server1Conf.length()));
// verify that the drift change set was generated
assertTrue(driftChangeSet.exists(), "Expected to find drift change set " + driftChangeSet.getPath());
@@ -397,8 +405,8 @@ public class DriftDetectorTest extends DriftTest {
assertFileEntriesMatch("The drift change set does not match the expected values", driftEntries, driftChangeSet);
File coverageChangeSet = changeSet(def.getName(), COVERAGE);
- List<FileEntry> coverageEntries = asList(changedFileEntry("conf/server-1.conf", oldHash, newHash,
- server1Conf.lastModified(), server1Conf.length()));
+ List<FileEntry> coverageEntries = asList(changedFileEntry("conf/server-1.conf", oldHash, newHash, server1Conf
+ .lastModified(), server1Conf.length()));
// verify that the coverage change set was updated
assertHeaderEquals(coverageChangeSet, createHeaders(def, COVERAGE, 1));
@@ -406,7 +414,6 @@ public class DriftDetectorTest extends DriftTest {
coverageChangeSet);
}
- @SuppressWarnings("unchecked")
@Test(enabled = false)
public void includeFiledAddedInNewDirectoryInDriftChangeSet() throws Exception {
DriftDefinition def = driftDefinition("file-added-in-new-dir", resourceDir.getAbsolutePath());
@@ -415,8 +422,8 @@ public class DriftDetectorTest extends DriftTest {
File server1Conf = createRandomFile(confDir, "server-1.conf");
ChangeSetWriter writer = changeSetMgr.getChangeSetWriter(resourceId(), createHeaders(def, COVERAGE));
- writer.write(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(),
- server1Conf.length()));
+ writer.write(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(), server1Conf
+ .length()));
writer.close();
// create some drift
@@ -436,9 +443,9 @@ public class DriftDetectorTest extends DriftTest {
assertFileEntriesMatch("The drift change set does not match the expected values", driftEntries, driftChangeSet);
File coverageChangeSet = changeSet(def.getName(), COVERAGE);
- List<FileEntry> coverageEntries = asList(
- addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(), server1Conf.length()),
- addedFileEntry("conf/subconf/server-2.conf", sha256(server2Conf), server2Conf.lastModified(), server2Conf.length()));
+ List<FileEntry> coverageEntries = asList(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf
+ .lastModified(), server1Conf.length()), addedFileEntry("conf/subconf/server-2.conf", sha256(server2Conf),
+ server2Conf.lastModified(), server2Conf.length()));
// verify that the coverage change set was updated
assertHeaderEquals(coverageChangeSet, createHeaders(def, COVERAGE, 1));
@@ -446,7 +453,6 @@ public class DriftDetectorTest extends DriftTest {
coverageChangeSet);
}
- @SuppressWarnings("unchecked")
@Test
public void includeRemovedFileInDriftChangeSet() throws Exception {
DriftDefinition def = driftDefinition("file-removed-drift-test", resourceDir.getAbsolutePath());
@@ -458,10 +464,10 @@ public class DriftDetectorTest extends DriftTest {
String server2ConfHash = sha256(server2Conf);
ChangeSetWriter writer = changeSetMgr.getChangeSetWriter(resourceId(), createHeaders(def, COVERAGE));
- writer.write(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(),
- server1Conf.length()));
- writer.write(addedFileEntry("conf/server-2.conf", server2ConfHash, server2Conf.lastModified(),
- server2Conf.length()));
+ writer.write(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf.lastModified(), server1Conf
+ .length()));
+ writer.write(addedFileEntry("conf/server-2.conf", server2ConfHash, server2Conf.lastModified(), server2Conf
+ .length()));
writer.close();
// create some drift
@@ -480,8 +486,8 @@ public class DriftDetectorTest extends DriftTest {
// verify that the coverage change set was updated
File coverageChangeSet = changeSet(def.getName(), COVERAGE);
- List<FileEntry> coverageEntries = asList(addedFileEntry("conf/server-1.conf", sha256(server1Conf),
- server1Conf.lastModified(), server1Conf.length()));
+ List<FileEntry> coverageEntries = asList(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf
+ .lastModified(), server1Conf.length()));
assertHeaderEquals(coverageChangeSet, createHeaders(def, COVERAGE, 1));
assertFileEntriesMatch("The coverage change set was not updated as expected", coverageEntries,
@@ -529,7 +535,6 @@ public class DriftDetectorTest extends DriftTest {
}
- @SuppressWarnings("unchecked")
@Test
public void includeFilesInRemovedDirectoryInDriftChangeSet() throws Exception {
DriftDefinition def = driftDefinition("dir-removed-test", resourceDir.getAbsolutePath());
@@ -539,7 +544,8 @@ public class DriftDetectorTest extends DriftTest {
String server1Hash = sha256(server1Conf);
ChangeSetWriter writer = changeSetMgr.getChangeSetWriter(resourceId(), createHeaders(def, COVERAGE));
- writer.write(addedFileEntry("conf/server-1.conf", server1Hash, server1Conf.lastModified(), server1Conf.length()));
+ writer
+ .write(addedFileEntry("conf/server-1.conf", server1Hash, server1Conf.lastModified(), server1Conf.length()));
writer.close();
// create some drift
@@ -566,7 +572,6 @@ public class DriftDetectorTest extends DriftTest {
coverageChangeSet);
}
- @SuppressWarnings("unchecked")
@Test
public void revertToPreviousSnapshotWhenSendingChangeSetFails() throws Exception {
DriftDefinition def = driftDefinition("revert-snapshot-test", resourceDir.getAbsolutePath());
@@ -574,6 +579,7 @@ public class DriftDetectorTest extends DriftTest {
File confDir = mkdir(resourceDir, "conf");
File server1Conf = createRandomFile(confDir, "server.conf");
+ assert (server1Conf.exists());
scheduleQueue.addSchedule(schedule);
// generate the initial snapshot
@@ -612,7 +618,6 @@ public class DriftDetectorTest extends DriftTest {
+ "should be deleted once we have reverted back to it and have a new, current snapsot file.");
}
- @SuppressWarnings("unchecked")
@Test
public void purgeSnapshotWhenSendingInitialChangeSetFails() throws Exception {
// If we have just generated the initial change set and sending it to
@@ -650,8 +655,8 @@ public class DriftDetectorTest extends DriftTest {
detector.run();
File changeSet = changeSet(def.getName(), COVERAGE);
- List<FileEntry> entries = asList(addedFileEntry("conf/server-1.conf", sha256(server1Conf),
- server1Conf.lastModified(), server1Conf.length()));
+ List<FileEntry> entries = asList(addedFileEntry("conf/server-1.conf", sha256(server1Conf), server1Conf
+ .lastModified(), server1Conf.length()));
assertHeaderEquals(changeSet, createHeaders(def, COVERAGE));
assertFileEntriesMatch("Files that are non-readable should be skipped but other, readable file should still "
@@ -760,14 +765,14 @@ public class DriftDetectorTest extends DriftTest {
String newPinnedHash = sha256(pinnedSnapshot);
- assertEquals(newPinnedHash, originalPinnedHash, "When a snapshot is pinned, it should not get updated during " +
- "drift detection");
+ assertEquals(newPinnedHash, originalPinnedHash, "When a snapshot is pinned, it should not get updated during "
+ + "drift detection");
// We always generate/update the current snapshot so we still need to verify that it
// was generated/updated correctly
- List<FileEntry> fileEntries = asList(
- addedFileEntry("conf/server1.conf", sha256(server1Conf), server1Conf.lastModified(), server1Conf.length()),
- addedFileEntry("conf/server2.conf", sha256(server2Conf), server2Conf.lastModified(), server2Conf.length()));
+ List<FileEntry> fileEntries = asList(addedFileEntry("conf/server1.conf", sha256(server1Conf), server1Conf
+ .lastModified(), server1Conf.length()), addedFileEntry("conf/server2.conf", sha256(server2Conf),
+ server2Conf.lastModified(), server2Conf.length()));
assertHeaderEquals(currentSnapshot, createHeaders(driftDef, COVERAGE, 1));
assertFileEntriesMatch("The current snapshot file should still get updated even when using a pinned snapshot",
@@ -801,10 +806,10 @@ public class DriftDetectorTest extends DriftTest {
detector.run();
// verify that the current snapshot was updated
- List<FileEntry> currentSnapshotEntries = asList(
- addedFileEntry("conf/server1.conf", sha256(server1Conf), server1Conf.lastModified(), server1Conf.length()),
- addedFileEntry("conf/server2.conf", sha256(server2Conf), server2Conf.lastModified(), server2Conf.length()),
- addedFileEntry("conf/server3.conf", sha256(server3Conf), server3Conf.lastModified(), server3Conf.length()));
+ List<FileEntry> currentSnapshotEntries = asList(addedFileEntry("conf/server1.conf", sha256(server1Conf),
+ server1Conf.lastModified(), server1Conf.length()), addedFileEntry("conf/server2.conf", sha256(server2Conf),
+ server2Conf.lastModified(), server2Conf.length()), addedFileEntry("conf/server3.conf", sha256(server3Conf),
+ server3Conf.lastModified(), server3Conf.length()));
assertHeaderEquals(currentSnapshot, createHeaders(driftDef, COVERAGE, 2));
assertFileEntriesMatch("The current snapshot file should still get updated even when using a pinned snapshot",
@@ -812,9 +817,9 @@ public class DriftDetectorTest extends DriftTest {
// verify that the the drift/delta change set was generated
File driftChangeSet = changeSet(driftDef.getName(), DRIFT);
- List<FileEntry> driftEntries = asList(
- addedFileEntry("conf/server2.conf", sha256(server2Conf), server2Conf.lastModified(), server2Conf.length()),
- addedFileEntry("conf/server3.conf", sha256(server3Conf), server3Conf.lastModified(), server3Conf.length()));
+ List<FileEntry> driftEntries = asList(addedFileEntry("conf/server2.conf", sha256(server2Conf), server2Conf
+ .lastModified(), server2Conf.length()), addedFileEntry("conf/server3.conf", sha256(server3Conf),
+ server3Conf.lastModified(), server3Conf.length()));
assertHeaderEquals(driftChangeSet, createHeaders(driftDef, DRIFT, 2));
assertFileEntriesMatch("The drift change set was not generated correctly when using a pinned snapshot",
@@ -835,16 +840,16 @@ public class DriftDetectorTest extends DriftTest {
File changeSet = changeSet(driftDef.getName(), COVERAGE);
File pinnedSnapshot = new File(changeSet.getParentFile(), "snapshot.pinned");
- List<FileEntry> entries = asList(addedFileEntry("conf/server.conf", sha256(serverConf),
- serverConf.lastModified(), serverConf.length()));
+ List<FileEntry> entries = asList(addedFileEntry("conf/server.conf", sha256(serverConf), serverConf
+ .lastModified(), serverConf.length()));
assertTrue(changeSet.exists(), "An initial snapshot file should be generated even when it is pinned");
assertHeaderEquals(changeSet, createHeaders(driftDef, COVERAGE));
assertFileEntriesMatch("Initial snapshot entries are wrong for pinned snapshot", entries, changeSet);
assertTrue(pinnedSnapshot.exists(), "Pinned snapshot file should be generated when initial version is pinned");
- assertEquals(sha256(changeSet), sha256(pinnedSnapshot), "The contents of the pinned snapshot file and the " +
- "initial snapshot should be identical");
+ assertEquals(sha256(changeSet), sha256(pinnedSnapshot), "The contents of the pinned snapshot file and the "
+ + "initial snapshot should be identical");
}
@Test
@@ -895,8 +900,8 @@ public class DriftDetectorTest extends DriftTest {
public void repeatChangeSet(int resourceId, String driftDefName, int version) {
repeatChangeSetCalled.set(true);
assertEquals(resourceId, resourceId(), "The resource id for the repeat change set is wrong");
- assertEquals(driftDefName, driftDef.getName(), "The drift definition name for the repeat change set " +
- "is wrong");
+ assertEquals(driftDefName, driftDef.getName(), "The drift definition name for the repeat change set "
+ + "is wrong");
assertEquals(version, 1, "The snapshot version should not have changed since no new drift was detected");
}
});
@@ -914,8 +919,8 @@ public class DriftDetectorTest extends DriftTest {
assertTrue(repeatChangeSetCalled.get(), "Failed to notify server of repeat change set");
// verify that the previous version snapshot file has been deleted
- assertFalse(previousSnapshot.exists(), "There should be no previous version snapshot file because the " +
- "server has already acknowledged the current snapshot.");
+ assertFalse(previousSnapshot.exists(), "There should be no previous version snapshot file because the "
+ + "server has already acknowledged the current snapshot.");
}
@@ -933,7 +938,8 @@ public class DriftDetectorTest extends DriftTest {
// generate the pinned snapshot which is version zero
File pinnedSnapshot = pinnedSnapshot(driftDef.getName());
ChangeSetWriter writer = new ChangeSetWriterImpl(pinnedSnapshot, headers);
- writer.write(addedFileEntry("conf/server.conf", serverConfHash, serverConf.lastModified(), serverConf.length()));
+ writer
+ .write(addedFileEntry("conf/server.conf", serverConfHash, serverConf.lastModified(), serverConf.length()));
writer.close();
// generate the current snapshot file. we will take a shortcut here by
@@ -946,6 +952,7 @@ public class DriftDetectorTest extends DriftTest {
// now generate some drift causing the resource to go out of compliance
File newServerConf = createRandomFile(confDir, "new_server.conf");
String newServerConfHash = sha256(newServerConf);
+ assert (null != newServerConfHash);
// do a drift detection run
DriftDetectionSchedule schedule = new DriftDetectionSchedule(resourceId(), driftDef);
@@ -965,12 +972,81 @@ public class DriftDetectorTest extends DriftTest {
// verify that that current snapshot has been updated to reflect that
// the resource is back in compliance.
- List<FileEntry> entries = asList(addedFileEntry("conf/server.conf", serverConfHash,
- serverConf.lastModified(), serverConf.length()));
+ List<FileEntry> entries = asList(addedFileEntry("conf/server.conf", serverConfHash, serverConf.lastModified(),
+ serverConf.length()));
assertHeaderEquals(currentSnapshot, createHeaders(driftDef, COVERAGE, 2));
- assertFileEntriesMatch("The entries in the current snapshot should match those in the pinned snapshot " +
- "once the resource has gone back into compliance.", entries, currentSnapshot);
+ assertFileEntriesMatch("The entries in the current snapshot should match those in the pinned snapshot "
+ + "once the resource has gone back into compliance.", entries, currentSnapshot);
+ }
+
+ @Test
+ public void updateTimestampInfoNoDriftTest() throws Exception {
+ DriftDefinition def = driftDefinition("update-timestamp-nodrift-test", resourceDir.getAbsolutePath());
+
+ File confDir = mkdir(resourceDir, "conf");
+ File server1Conf = createRandomFile(confDir, "server-1.conf");
+ String server1Hash = sha256(server1Conf);
+
+ ChangeSetWriter writer = changeSetMgr.getChangeSetWriter(resourceId(), createHeaders(def, COVERAGE));
+ writer.write(addedFileEntry("conf/server-1.conf", server1Hash, -1L, -1L));
+ writer.close();
+
+ scheduleQueue.addSchedule(new DriftDetectionSchedule(resourceId(), def));
+ detector.run();
+
+ // verify that no drift change set was generated
+ File driftChangeSet = changeSet(def.getName(), DRIFT);
+ assertFalse(driftChangeSet.exists(), "Expected no drift change set " + driftChangeSet.getPath());
+
+ // verify that the coverage change set was updated with timestamp info, version is still 0
+ File coverageChangeSet = changeSet(def.getName(), COVERAGE);
+ List<FileEntry> coverageEntries = asList(addedFileEntry("conf/server-1.conf", server1Hash, server1Conf
+ .lastModified(), server1Conf.length()));
+
+ assertHeaderEquals(coverageChangeSet, createHeaders(def, COVERAGE, 0));
+ assertFileEntriesMatch("The coverage change set was not updated as expected", coverageEntries,
+ coverageChangeSet);
+ }
+
+ @Test
+ public void updateTimestampInfoDriftTest() throws Exception {
+ DriftDefinition def = driftDefinition("update-timestamp-drift-test", resourceDir.getAbsolutePath());
+
+ File confDir = mkdir(resourceDir, "conf");
+ File server1Conf = createRandomFile(confDir, "server-1.conf");
+ String server1Hash = sha256(server1Conf);
+ File server2Conf = createRandomFile(confDir, "server-2.conf");
+ String server2Hash = sha256(server2Conf);
+
+ ChangeSetWriter writer = changeSetMgr.getChangeSetWriter(resourceId(), createHeaders(def, COVERAGE));
+ writer.write(addedFileEntry("conf/server-1.conf", server1Hash, -1L, -1L));
+ writer.write(addedFileEntry("conf/server-2.conf", server2Hash, -1L, -1L));
+ writer.close();
+
+ // create some drift
+ server1Conf.delete();
+ confDir.delete();
+
+ scheduleQueue.addSchedule(new DriftDetectionSchedule(resourceId(), def));
+ detector.run();
+
+ File driftChangeSet = changeSet(def.getName(), DRIFT);
+ List<FileEntry> driftEntries = asList(removedFileEntry("conf/server-1.conf", server1Hash));
+
+ // verify that the drift change set was generated
+ assertTrue(driftChangeSet.exists(), "Expected to find drift change set " + driftChangeSet.getPath());
+ assertHeaderEquals(driftChangeSet, createHeaders(def, DRIFT, 1));
+ assertFileEntriesMatch("The drift change set does not match the expected values", driftEntries, driftChangeSet);
+
+ // verify that the coverage change set was updated with timestamp info and incremented version
+ File coverageChangeSet = changeSet(def.getName(), COVERAGE);
+ List<FileEntry> coverageEntries = asList(addedFileEntry("conf/server-2.conf", server2Hash, server2Conf
+ .lastModified(), server2Conf.length()));
+
+ assertHeaderEquals(coverageChangeSet, createHeaders(def, COVERAGE, 1));
+ assertFileEntriesMatch("The coverage change set was not updated as expected", coverageEntries,
+ coverageChangeSet);
}
private void assertHeaderEquals(File changeSet, Headers expected) throws Exception {
12 years, 5 months
[rhq] modules/enterprise
by John Sanda
modules/enterprise/pom.xml | 1 +
modules/enterprise/server/itests/pom.xml | 6 ++++++
2 files changed, 7 insertions(+)
New commits:
commit 37dac4382571c22297dc4ff7497564eb7864b1fd
Author: John Sanda <jsanda(a)redhat.com>
Date: Wed Dec 14 15:24:35 2011 -0500
Add dependency needed as a result of REST integration into server/jar
Also adding itests module to enterprise profile so that it will get
built by jenkins.
diff --git a/modules/enterprise/pom.xml b/modules/enterprise/pom.xml
index e119090..eb6a9cf 100644
--- a/modules/enterprise/pom.xml
+++ b/modules/enterprise/pom.xml
@@ -73,6 +73,7 @@
<module>server/container-lib</module>
<module>server/container</module>
<module>server/client-api</module>
+ <module>server/itests</module>
</modules>
</profile>
diff --git a/modules/enterprise/server/itests/pom.xml b/modules/enterprise/server/itests/pom.xml
index f269e77..8753e6f 100644
--- a/modules/enterprise/server/itests/pom.xml
+++ b/modules/enterprise/server/itests/pom.xml
@@ -220,6 +220,12 @@
<version>2.3.18</version>
<scope>provided</scope>
</dependency>
+
+ <dependency>
+ <groupId>org.jboss.resteasy</groupId>
+ <artifactId>resteasy-jaxrs</artifactId>
+ <version>${resteasy.version}</version>
+ </dependency>
</dependencies>
<build>
12 years, 5 months
[rhq] modules/enterprise
by ips
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/event/EventCompositeDatasource.java | 38 +++++-----
1 file changed, 21 insertions(+), 17 deletions(-)
New commits:
commit 4f832a42b5477490cdcdaaf334846980b813ff4e
Author: Ian Springer <ian.springer(a)redhat.com>
Date: Wed Dec 14 14:38:43 2011 -0500
[BZ 760185] fix NPEs that occurred if user hovered over the Severity or Source Location columns while grid data was still loading (https://bugzilla.redhat.com/show_bug.cgi?id=760185)
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/event/EventCompositeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/event/EventCompositeDatasource.java
index c935e70..afcca26 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/event/EventCompositeDatasource.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/event/EventCompositeDatasource.java
@@ -1,6 +1,6 @@
/*
* RHQ Management Platform
- * Copyright (C) 2005-2010 Red Hat, Inc.
+ * Copyright (C) 2005-2011 Red Hat, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -99,22 +99,25 @@ public class EventCompositeDatasource extends RPCDataSource<EventComposite, Even
});
severityField.setShowHover(true);
severityField.setHoverCustomizer(new HoverCustomizer() {
- @Override
public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) {
- EventSeverity severity = EventSeverity.valueOf(record.getAttribute("severity"));
+ if (value == null) {
+ return null;
+ }
+ EventSeverity severity = EventSeverity.valueOf((String)value);
switch (severity) {
- case DEBUG:
- return MSG.common_severity_debug();
- case INFO:
- return MSG.common_severity_info();
- case WARN:
- return MSG.common_severity_warn();
- case ERROR:
- return MSG.common_severity_error();
- case FATAL:
- return MSG.common_severity_fatal();
+ case DEBUG:
+ return MSG.common_severity_debug();
+ case INFO:
+ return MSG.common_severity_info();
+ case WARN:
+ return MSG.common_severity_warn();
+ case ERROR:
+ return MSG.common_severity_error();
+ case FATAL:
+ return MSG.common_severity_fatal();
+ default:
+ return null;
}
- return null;
}
});
fields.add(severityField);
@@ -147,9 +150,11 @@ public class EventCompositeDatasource extends RPCDataSource<EventComposite, Even
});
sourceField.setShowHover(true);
sourceField.setHoverCustomizer(new HoverCustomizer() {
- @Override
public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) {
- String sourceLocation = record.getAttribute("source");
+ if (value == null) {
+ return null;
+ }
+ String sourceLocation = (String)value;
return (sourceLocation.length() > 40) ? sourceLocation : null;
}
});
@@ -166,7 +171,6 @@ public class EventCompositeDatasource extends RPCDataSource<EventComposite, Even
});
resourceNameField.setShowHover(true);
resourceNameField.setHoverCustomizer(new HoverCustomizer() {
-
public String hoverHTML(Object value, ListGridRecord listGridRecord, int rowNum, int colNum) {
return AncestryUtil.getResourceHoverHTML(listGridRecord, 0);
}
12 years, 5 months
[rhq] Branch 'stefan/backingcontent' - modules/plugins
by snegrea
modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/StandaloneManagedDeploymentComponent.java | 67 ++--
modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/FileContentDelegate.java | 157 +++++++++-
modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/JarContentDelegate.java | 22 -
3 files changed, 188 insertions(+), 58 deletions(-)
New commits:
commit d4ba67e3e1739a2b368b2cc4c8f7a2677b6e6b9e
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Wed Dec 14 11:07:02 2011 -0600
[BZ 767393] Updated the plugin code to use SHA256 as the version and persist it inside the manifest file. Also, add directory based SHA computation for default server applications.
diff --git a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/StandaloneManagedDeploymentComponent.java b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/StandaloneManagedDeploymentComponent.java
index ebae861..a8eba4d 100644
--- a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/StandaloneManagedDeploymentComponent.java
+++ b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/StandaloneManagedDeploymentComponent.java
@@ -58,11 +58,10 @@ import org.rhq.core.pluginapi.content.ContentFacet;
import org.rhq.core.pluginapi.content.ContentServices;
import org.rhq.core.pluginapi.inventory.DeleteResourceFacet;
import org.rhq.core.pluginapi.measurement.MeasurementFacet;
-import org.rhq.core.util.MessageDigestGenerator;
import org.rhq.core.util.ZipUtil;
import org.rhq.core.util.exception.ThrowableUtil;
-import org.rhq.core.util.file.JarContentFileInfo;
import org.rhq.plugins.jbossas5.util.DeploymentUtils;
+import org.rhq.plugins.jbossas5.util.FileContentDelegate;
/**
* A resource component for managing a standalone/top-level Profile Service managed deployment.
@@ -144,9 +143,8 @@ public class StandaloneManagedDeploymentComponent extends AbstractManagedDeploym
+ getResourceDescription() + " does not exist.");
String fileName = this.deploymentFile.getName();
- JarContentFileInfo fileInfo = new JarContentFileInfo(this.deploymentFile);
- String sha256 = getSHA256(fileInfo);
- String version = getVersion(fileInfo, sha256);
+ String sha256 = getSHA256(this.deploymentFile);
+ String version = getVersion(sha256);
// Package name is the deployment's file name (e.g. foo.ear).
PackageDetailsKey key = new PackageDetailsKey(fileName, version, PKG_TYPE_FILE, ARCHITECTURE);
ResourcePackageDetails packageDetails = new ResourcePackageDetails(key);
@@ -164,46 +162,29 @@ public class StandaloneManagedDeploymentComponent extends AbstractManagedDeploym
return packages;
}
- // TODO: if needed we can speed this up by looking in the ResourceContainer's installedPackage
- // list for previously discovered packages. If there use the sha256 from that record. We'd have to
- // get access to that info by adding access in org.rhq.core.pluginapi.content.ContentServices
- private String getSHA256(JarContentFileInfo fileInfo) {
-
+ /**
+ * Retrieve SHA256 for a deployed app.
+ *
+ * @param file application file
+ * @return SHA256 of the content
+ */
+ private String getSHA256(File file) {
String sha256 = null;
try {
- sha256 = fileInfo.getAttributeValue(RHQ_SHA256, null);
- if (null == sha256) {
- sha256 = new MessageDigestGenerator(MessageDigestGenerator.SHA_256).calcDigestString(fileInfo
- .getContentFile());
- }
- } catch (IOException iex) {
- //log exception but move on, discovery happens often. No reason to hold up anything.
+ FileContentDelegate fileContentDelegate = new FileContentDelegate(file, null, null);
+ sha256 = fileContentDelegate.getSHA(file);
+ } catch (Exception iex) {
if (log.isDebugEnabled()) {
- log.debug("Problem calculating digest of package [" + fileInfo.getContentFile().getPath() + "]."
- + iex.getMessage());
+ log.debug("Problem calculating digest of package [" + file.getPath() + "]." + iex.getMessage());
}
}
return sha256;
}
- private String getVersion(JarContentFileInfo fileInfo, String sha256) {
- // Version string in order of preference
- // manifestVersion + sha256, sha256, manifestVersion, "0"
- String version = "0";
- String manifestVersion = fileInfo.getVersion(null);
-
- if ((null != manifestVersion) && (null != sha256)) {
- // this protects against the occasional differing binaries with poor manifest maintenance
- version = manifestVersion + " [sha256=" + sha256 + "]";
- } else if (null != sha256) {
- version = "[sha256=" + sha256 + "]";
- } else if (null != manifestVersion) {
- version = manifestVersion;
- }
-
- return version;
+ private String getVersion(String sha256) {
+ return "[sha256=" + sha256 + "]";
}
public RemovePackagesResponse removePackages(Set<ResourcePackageDetails> packages) {
@@ -238,8 +219,11 @@ public class StandaloneManagedDeploymentComponent extends AbstractManagedDeploym
log.debug("Writing new EAR/WAR bits to temporary file...");
File tempFile;
+ String sha = null;
try {
tempFile = writeNewAppBitsToTempFile(contentServices, packageDetails);
+ FileContentDelegate fileContentDelegate = new FileContentDelegate(null, null, null);
+ sha = fileContentDelegate.computeSHAForArchive(tempFile);
} catch (Exception e) {
return failApplicationDeployment("Error writing new application bits to temporary file - cause: " + e,
packageDetails);
@@ -320,6 +304,19 @@ public class StandaloneManagedDeploymentComponent extends AbstractManagedDeploym
// Deploy was successful!
deleteBackupOfOriginalFile(backupOfOriginalFile);
+ if (this.deploymentFile.isDirectory()) {
+ FileContentDelegate fileContentDelegate = new FileContentDelegate(deploymentFile, null, null);
+ try {
+ //This is a simulation of create content from FileContentDelegate split across
+ //this deployment method because JBoss AS5 is using a different deployment model.
+ //The SHA256 was pre-computed earlier (at the time the temp content file was created).
+ //The only thing left at this point is to store it in the manifest file.
+ fileContentDelegate.writeSHAToManifest(deploymentFile, sha);
+ } catch (IOException e) {
+ log.error("Unable to save SHA to manifest file for " + this.deploymentFile.getPath() + ".", e);
+ }
+ }
+
DeployPackagesResponse response = new DeployPackagesResponse(ContentResponseResult.SUCCESS);
DeployIndividualPackageResponse packageResponse = new DeployIndividualPackageResponse(packageDetails.getKey(),
ContentResponseResult.SUCCESS);
diff --git a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/FileContentDelegate.java b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/FileContentDelegate.java
index 75f890a..915374a 100644
--- a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/FileContentDelegate.java
+++ b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/FileContentDelegate.java
@@ -26,16 +26,22 @@ import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Set;
+import java.util.Stack;
+import java.util.jar.Attributes;
+import java.util.jar.Manifest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+
import org.rhq.core.domain.content.PackageDetails;
import org.rhq.core.domain.content.PackageDetailsKey;
import org.rhq.core.domain.content.transfer.ResourcePackageDetails;
import org.rhq.core.pluginapi.util.FileUtils;
+import org.rhq.core.util.MessageDigestGenerator;
import org.rhq.core.util.ZipUtil;
import org.rhq.core.util.file.FileUtil;
@@ -46,7 +52,9 @@ import org.rhq.core.util.file.FileUtil;
* @author Jason Dobies
*/
public class FileContentDelegate {
- // Attributes --------------------------------------------
+
+ private static final String RHQ_SHA_256 = "RHQ-Sha256";
+ private static final String MANIFEST_RELATIVE_PATH = "META-INF/MANIFEST.MF";
private final Log log = LogFactory.getLog(FileContentDelegate.class);
@@ -55,16 +63,12 @@ public class FileContentDelegate {
private final String fileEnding;
private final String packageTypeName;
- // Constructors --------------------------------------------
-
public FileContentDelegate(File directory, String fileEnding, String packageTypeName) {
this.directory = directory;
this.fileEnding = fileEnding;
this.packageTypeName = packageTypeName;
}
- // Public --------------------------------------------
-
public String getFileEnding() {
return fileEnding;
}
@@ -145,6 +149,44 @@ public class FileContentDelegate {
}
/**
+ * Retrieves the SHA256 for a deployed application.
+ * 1) If the app is exploded then return RHQ-Sha256 manifest attribute.
+ * 1.1) If RHQ-Sha256 is missing then compute it, save it and return the result.
+ * 2) If the app is an archive then compute SHA256 on fly and return it.
+ *
+ * @param deploymentFile deployment file
+ * @return
+ */
+ public String getSHA(File deploymentFile) {
+ String sha = null;
+ try {
+ if (deploymentFile.isDirectory()) {
+ File manifestFile = new File(deploymentFile.getAbsolutePath(), MANIFEST_RELATIVE_PATH);
+ if (manifestFile.exists()) {
+ InputStream manifestStream = new FileInputStream(manifestFile);
+ Manifest manifest = null;
+ try {
+ manifest = new Manifest(manifestStream);
+ sha = manifest.getMainAttributes().getValue(RHQ_SHA_256);
+ } finally {
+ manifestStream.close();
+ }
+ }
+
+ if (sha == null || sha.trim().isEmpty()) {
+ sha = computeAndSaveSHA(deploymentFile);
+ }
+ } else {
+ sha = new MessageDigestGenerator(MessageDigestGenerator.SHA_256).calcDigestString(deploymentFile);
+ }
+ } catch (IOException ex) {
+ throw new RuntimeException("Problem calculating digest of package [" + deploymentFile.getPath() + "].", ex);
+ }
+
+ return sha;
+ }
+
+ /**
* Returns a stream from which the content of the specified package can be read.
*
* @param details package being loaded
@@ -185,4 +227,109 @@ public class FileContentDelegate {
*/
return null;
}
+
+ /**
+ * Computes SHA256 for an archive.
+ *
+ * @param contentFile content archive
+ * @return SHA256 of the archive
+ */
+ public String computeSHAForArchive(File contentFile) {
+ if (!contentFile.isDirectory()) {
+ try {
+ MessageDigestGenerator messageDigest = new MessageDigestGenerator(MessageDigestGenerator.SHA_256);
+ return messageDigest.calcDigestString(contentFile);
+ } catch (Exception ex) {
+ log.error("Not able to compute SHA256 for " + contentFile.getPath() + " .");
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * Compute SHA256 for the content of an exploded war deployment. This method should be used to
+ * compute the SHA256 for content deployed outside RHQ or for the initial content delivered
+ * with the server.
+ *
+ * @param deploymentDirectory app deployment folder
+ * @return
+ */
+ private String computeAndSaveSHA(File deploymentDirectory) {
+ String sha = null;
+ try {
+ if (deploymentDirectory.isDirectory()) {
+ MessageDigestGenerator messageDigest = new MessageDigestGenerator(MessageDigestGenerator.SHA_256);
+
+ Stack<File> unvisitedFolders = new Stack<File>();
+ unvisitedFolders.add(deploymentDirectory);
+ while (!unvisitedFolders.empty()) {
+ for (File file : unvisitedFolders.pop().listFiles()) {
+ if (file.isDirectory()) {
+ unvisitedFolders.add(file);
+ } else {
+ FileInputStream inputStream = null;
+ try {
+ inputStream = new FileInputStream(file);
+ messageDigest.add(inputStream);
+ } finally {
+ if (inputStream != null) {
+ inputStream.close();
+ }
+ }
+ }
+ }
+ }
+
+ sha = messageDigest.getDigestString();
+ writeSHAToManifest(deploymentDirectory, sha);
+ }
+ } catch (IOException e) {
+ throw new RuntimeException("Error creating artifact for contentFile: " + deploymentDirectory, e);
+ }
+
+ return sha;
+ }
+
+ /**
+ * Write the SHA256 to the manifest using the RHQ-Sha256 attribute tag.
+ *
+ * @param deploymentFolder app deployment folder
+ * @param sha SHA256
+ * @throws IOException
+ */
+ public void writeSHAToManifest(File deploymentFolder, String sha) throws IOException {
+ File manifestFile = new File(deploymentFolder, MANIFEST_RELATIVE_PATH);
+ Manifest manifest;
+ if (manifestFile.exists()) {
+ FileInputStream inputStream = new FileInputStream(manifestFile);
+ try {
+ manifest = new Manifest(inputStream);
+ } finally {
+ inputStream.close();
+ }
+ } else {
+ manifest = new Manifest();
+ manifestFile.getParentFile().mkdirs();
+ manifestFile.createNewFile();
+ }
+
+ Attributes attribs = manifest.getMainAttributes();
+
+ //The main section of the manifest file does not get saved if both of
+ //these two attributes are missing. Please see Attributes implementation.
+ if (!attribs.containsKey(Attributes.Name.MANIFEST_VERSION.toString())
+ && !attribs.containsKey(Attributes.Name.SIGNATURE_VERSION.toString())) {
+ attribs.putValue(Attributes.Name.MANIFEST_VERSION.toString(), "1.0");
+ }
+
+ attribs.putValue(RHQ_SHA_256, sha);
+
+ FileOutputStream outputStream = new FileOutputStream(manifestFile);
+ try {
+ manifest.write(outputStream);
+ } finally {
+ outputStream.close();
+ }
+ }
}
diff --git a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/JarContentDelegate.java b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/JarContentDelegate.java
index d68f429..3bb920a 100644
--- a/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/JarContentDelegate.java
+++ b/modules/plugins/jboss-as-5/src/main/java/org/rhq/plugins/jbossas5/util/JarContentDelegate.java
@@ -113,9 +113,9 @@ public class JarContentDelegate extends FileContentDelegate {
} catch (Exception e) {
// leave as null
}
- String version = getVersion(manifestVersion, sha256);
+
ResourcePackageDetails details = new ResourcePackageDetails(new PackageDetailsKey(file.getName(),
- version, getPackageTypeName(), "noarch"));
+ getVersion(sha256), getPackageTypeName(), "noarch"));
packages.add(details);
details.setFileCreatedDate(file.lastModified()); // Why don't we have a last modified time?
@@ -140,21 +140,7 @@ public class JarContentDelegate extends FileContentDelegate {
return packages;
}
- private String getVersion(String manifestVersion, String sha256) {
- // Version string in order of preference
- // manifestVersion + sha256, sha256, manifestVersion, "0"
- String version = "0";
-
- if ((null != manifestVersion) && (null != sha256)) {
- // this protects against the occasional differing binaries with poor manifest maintenance
- version = manifestVersion + " [sha256=" + sha256 + "]";
- } else if (null != sha256) {
- version = "[sha256=" + sha256 + "]";
- } else if (null != manifestVersion) {
- version = manifestVersion;
- }
-
- return version;
+ private String getVersion(String sha256) {
+ return "[sha256=" + sha256 + "]";
}
-
}
12 years, 5 months
[rhq] Branch 'stefan/backingcontent' - modules/plugins
by snegrea
modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/ApplicationComponent.java | 43 --
modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/JBossASServerComponent.java | 5
modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/FileContentDelegate.java | 172 ++++++++--
modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/JarContentDelegate.java | 21 -
4 files changed, 154 insertions(+), 87 deletions(-)
New commits:
commit 7879cf11f3d447e305feb9bbfd8ef304d4181f20
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue Dec 13 17:17:58 2011 -0600
[BZ 767247] Updated the JBoss AS4 plugin to use the same design as Tomcat plugin with regards to version and SHA256.
diff --git a/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/ApplicationComponent.java b/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/ApplicationComponent.java
index a7c70db..bf7e925 100644
--- a/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/ApplicationComponent.java
+++ b/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/ApplicationComponent.java
@@ -52,11 +52,10 @@ import org.rhq.core.pluginapi.inventory.DeleteResourceFacet;
import org.rhq.core.pluginapi.operation.OperationFacet;
import org.rhq.core.pluginapi.operation.OperationResult;
import org.rhq.core.pluginapi.util.FileUtils;
-import org.rhq.core.util.MessageDigestGenerator;
import org.rhq.core.util.ZipUtil;
import org.rhq.core.util.exception.ThrowableUtil;
-import org.rhq.core.util.file.JarContentFileInfo;
import org.rhq.plugins.jbossas.helper.MainDeployer;
+import org.rhq.plugins.jbossas.util.FileContentDelegate;
import org.rhq.plugins.jmx.MBeanResourceComponent;
/**
@@ -128,9 +127,8 @@ public class ApplicationComponent extends MBeanResourceComponent<JBossASServerCo
// Package name and file name of the application are the same
String fileName = new File(fullFileName).getName();
- JarContentFileInfo fileInfo = new JarContentFileInfo(file);
- String sha256 = getSHA256(fileInfo);
- String version = getVersion(fileInfo, sha256);
+ String sha256 = getSHA256(file);
+ String version = getVersion(sha256);
PackageDetailsKey key = new PackageDetailsKey(fileName, version, PKG_TYPE_FILE, ARCHITECTURE);
ResourcePackageDetails details = new ResourcePackageDetails(key);
details.setFileName(fileName);
@@ -148,46 +146,25 @@ public class ApplicationComponent extends MBeanResourceComponent<JBossASServerCo
return packages;
}
- // TODO: if needed we can speed this up by looking in the ResourceContainer's installedPackage
- // list for previously discovered packages. If there use the sha256 from that record. We'd have to
- // get access to that info by adding access in org.rhq.core.pluginapi.content.ContentServices
- private String getSHA256(JarContentFileInfo fileInfo) {
+ private String getSHA256(File file) {
String sha256 = null;
try {
- sha256 = fileInfo.getAttributeValue(RHQ_SHA256, null);
- if (null == sha256) {
- sha256 = new MessageDigestGenerator(MessageDigestGenerator.SHA_256).calcDigestString(fileInfo
- .getContentFile());
- }
- } catch (IOException iex) {
+ FileContentDelegate fileContentDelegate = new FileContentDelegate(file, null, null);
+ sha256 = fileContentDelegate.getSHA(file);
+ } catch (Exception iex) {
//log exception but move on, discovery happens often. No reason to hold up anything.
if (log.isDebugEnabled()) {
- log.debug("Problem calculating digest of package [" + fileInfo.getContentFile().getPath() + "]."
- + iex.getMessage());
+ log.debug("Problem calculating digest of package [" + file.getPath() + "]." + iex.getMessage());
}
}
return sha256;
}
- private String getVersion(JarContentFileInfo fileInfo, String sha256) {
- // Version string in order of preference
- // manifestVersion + sha256, sha256, manifestVersion, "0"
- String version = "0";
- String manifestVersion = fileInfo.getVersion(null);
-
- if ((null != manifestVersion) && (null != sha256)) {
- // this protects against the occasional differing binaries with poor manifest maintenance
- version = manifestVersion + " [sha256=" + sha256 + "]";
- } else if (null != sha256) {
- version = "[sha256=" + sha256 + "]";
- } else if (null != manifestVersion) {
- version = manifestVersion;
- }
-
- return version;
+ private String getVersion(String sha256) {
+ return "[sha256=" + sha256 + "]";
}
public RemovePackagesResponse removePackages(Set<ResourcePackageDetails> packages) {
diff --git a/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/JBossASServerComponent.java b/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/JBossASServerComponent.java
index 9cc19bc..834f3c4 100644
--- a/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/JBossASServerComponent.java
+++ b/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/JBossASServerComponent.java
@@ -100,7 +100,6 @@ import org.rhq.core.pluginapi.support.SnapshotReportResults;
import org.rhq.core.pluginapi.support.SupportFacet;
import org.rhq.core.pluginapi.util.FileUtils;
import org.rhq.core.pluginapi.util.SelectiveSkippingEntityResolver;
-import org.rhq.core.util.MessageDigestGenerator;
import org.rhq.plugins.jbossas.helper.JavaSystemProperties;
import org.rhq.plugins.jbossas.helper.MainDeployer;
import org.rhq.plugins.jbossas.util.ConnectionFactoryConfigurationEditor;
@@ -896,9 +895,7 @@ public class JBossASServerComponent<T extends ResourceComponent<?>> implements M
return;
}
- InputStream isForTempDir = new BufferedInputStream(new FileInputStream(tempFile));
- String shaString = new MessageDigestGenerator(MessageDigestGenerator.SHA_256).getDigestString(tempFile);
- deployer.createContent(details, isForTempDir, !zip, createBackup, shaString);
+ deployer.createContent(details, tempFile, !zip, createBackup);
String vhost = null;
if (resourceTypeName.equals(RESOURCE_TYPE_WAR)) {
diff --git a/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/FileContentDelegate.java b/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/FileContentDelegate.java
index 6c4e83b..499d5a5 100644
--- a/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/FileContentDelegate.java
+++ b/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/FileContentDelegate.java
@@ -30,6 +30,7 @@ import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Set;
+import java.util.Stack;
import java.util.jar.Attributes;
import java.util.jar.Manifest;
@@ -40,6 +41,7 @@ import org.rhq.core.domain.content.PackageDetails;
import org.rhq.core.domain.content.PackageDetailsKey;
import org.rhq.core.domain.content.transfer.ResourcePackageDetails;
import org.rhq.core.pluginapi.util.FileUtils;
+import org.rhq.core.util.MessageDigestGenerator;
import org.rhq.core.util.ZipUtil;
import org.rhq.core.util.file.FileUtil;
@@ -50,7 +52,9 @@ import org.rhq.core.util.file.FileUtil;
* @author Jason Dobies
*/
public class FileContentDelegate {
- // Attributes --------------------------------------------
+
+ private static final String RHQ_SHA_256 = "RHQ-Sha256";
+ private static final String MANIFEST_RELATIVE_PATH = "META-INF/MANIFEST.MF";
private Log log = LogFactory.getLog(FileContentDelegate.class);
@@ -59,16 +63,12 @@ public class FileContentDelegate {
private String fileEnding;
private String packageTypeName;
- // Constructors --------------------------------------------
-
public FileContentDelegate(File directory, String fileEnding, String packageTypeName) {
this.directory = directory;
this.fileEnding = fileEnding;
this.packageTypeName = packageTypeName;
}
- // Public --------------------------------------------
-
public String getFileEnding() {
return fileEnding;
}
@@ -92,41 +92,23 @@ public class FileContentDelegate {
* @param createBackup If <code>true</code>, the original file will be backed up to file.bak
* @param shaString the SHA-256 of the specified input stream
*/
- public void createContent(PackageDetails details, InputStream content, boolean unzip, boolean createBackup,
- String shaString) {
- File contentFile = getPath(details);
+ public void createContent(PackageDetails details, File sourceContentFile, boolean unzip, boolean createBackup) {
+ File destinationContentFile = getPath(details);
try {
if (createBackup) {
- moveToBackup(contentFile, ".bak");
+ moveToBackup(destinationContentFile, ".bak");
}
if (unzip) {
- ZipUtil.unzipFile(content, contentFile);
- File manifestFile = new File(contentFile, "META-INF/MANIFEST.MF");
- Manifest manifest;
- if (manifestFile.exists()) {
- FileInputStream inputStream = new FileInputStream(manifestFile);
- try {
- manifest = new Manifest(inputStream);
- } finally {
- inputStream.close();
- }
- } else {
- manifest = new Manifest();
- }
- Attributes attribs = manifest.getMainAttributes();
- attribs.putValue("RHQ-Sha256", shaString);
- FileOutputStream outputStream = new FileOutputStream(manifestFile);
- try {
- manifest.write(outputStream);
- } finally {
- outputStream.close();
- }
+ ZipUtil.unzipFile(sourceContentFile, destinationContentFile);
+ String shaString = new MessageDigestGenerator(MessageDigestGenerator.SHA_256)
+ .calcDigestString(sourceContentFile);
+ writeSHAToManifest(destinationContentFile, shaString);
} else {
- FileUtil.writeFile(content, contentFile);
+ FileUtil.copyFile(sourceContentFile, destinationContentFile);
}
- details.setFileName(contentFile.getPath());
+ details.setFileName(destinationContentFile.getPath());
} catch (IOException e) {
- throw new RuntimeException("Error creating artifact from details: " + contentFile, e);
+ throw new RuntimeException("Error creating artifact from details: " + destinationContentFile, e);
}
}
@@ -211,4 +193,128 @@ public class FileContentDelegate {
*/
return null;
}
+
+ /**
+ * Retrieves the SHA256 for a deployed application.
+ * 1) If the app is exploded then return RHQ-Sha256 manifest attribute.
+ * 1.1) If RHQ-Sha256 is missing then compute it, save it and return the result.
+ * 2) If the app is an archive then compute SHA256 on fly and return it.
+ *
+ * @param deploymentFile deployment file
+ * @return
+ */
+ public String getSHA(File deploymentFile) {
+ String sha = null;
+ try {
+ if (deploymentFile.isDirectory()) {
+ File manifestFile = new File(deploymentFile.getAbsolutePath(), MANIFEST_RELATIVE_PATH);
+ if (manifestFile.exists()) {
+ InputStream manifestStream = new FileInputStream(manifestFile);
+ Manifest manifest = null;
+ try {
+ manifest = new Manifest(manifestStream);
+ sha = manifest.getMainAttributes().getValue(RHQ_SHA_256);
+ } finally {
+ manifestStream.close();
+ }
+ }
+
+ if (sha == null || sha.trim().isEmpty()) {
+ sha = computeAndSaveSHA(deploymentFile);
+ }
+ } else {
+ sha = new MessageDigestGenerator(MessageDigestGenerator.SHA_256).calcDigestString(deploymentFile);
+ }
+ } catch (IOException ex) {
+ throw new RuntimeException("Problem calculating digest of package [" + deploymentFile.getPath() + "].", ex);
+ }
+
+ return sha;
+ }
+
+ /**
+ * Compute SHA256 for the content of an exploded war deployment. This method should be used to
+ * compute the SHA256 for content deployed outside RHQ or for the initial content delivered
+ * with the server.
+ *
+ * @param deploymentDirectory app deployment folder
+ * @return
+ */
+ private String computeAndSaveSHA(File deploymentDirectory) {
+ String sha = null;
+ try {
+ if (deploymentDirectory.isDirectory()) {
+ MessageDigestGenerator messageDigest = new MessageDigestGenerator(MessageDigestGenerator.SHA_256);
+
+ Stack<File> unvisitedFolders = new Stack<File>();
+ unvisitedFolders.add(deploymentDirectory);
+ while (!unvisitedFolders.empty()) {
+ for (File file : unvisitedFolders.pop().listFiles()) {
+ if (file.isDirectory()) {
+ unvisitedFolders.add(file);
+ } else {
+ FileInputStream inputStream = null;
+ try {
+ inputStream = new FileInputStream(file);
+ messageDigest.add(inputStream);
+ } finally {
+ if (inputStream != null) {
+ inputStream.close();
+ }
+ }
+ }
+ }
+ }
+
+ sha = messageDigest.getDigestString();
+ writeSHAToManifest(deploymentDirectory, sha);
+ }
+ } catch (IOException e) {
+ throw new RuntimeException("Error creating artifact for contentFile: " + deploymentDirectory, e);
+ }
+
+ return sha;
+ }
+
+ /**
+ * Write the SHA256 to the manifest using the RHQ-Sha256 attribute tag.
+ *
+ * @param deploymentFolder app deployment folder
+ * @param sha SHA256
+ * @throws IOException
+ */
+ private void writeSHAToManifest(File deploymentFolder, String sha) throws IOException {
+ File manifestFile = new File(deploymentFolder, MANIFEST_RELATIVE_PATH);
+ Manifest manifest;
+ if (manifestFile.exists()) {
+ FileInputStream inputStream = new FileInputStream(manifestFile);
+ try {
+ manifest = new Manifest(inputStream);
+ } finally {
+ inputStream.close();
+ }
+ } else {
+ manifest = new Manifest();
+ manifestFile.getParentFile().mkdirs();
+ manifestFile.createNewFile();
+ }
+
+ Attributes attribs = manifest.getMainAttributes();
+
+ //The main section of the manifest file does not get saved if both of
+ //these two attributes are missing. Please see Attributes implementation.
+ if (!attribs.containsKey(Attributes.Name.MANIFEST_VERSION.toString())
+ && !attribs.containsKey(Attributes.Name.SIGNATURE_VERSION.toString())) {
+ attribs.putValue(Attributes.Name.MANIFEST_VERSION.toString(), "1.0");
+ }
+
+ attribs.putValue(RHQ_SHA_256, sha);
+
+ FileOutputStream outputStream = new FileOutputStream(manifestFile);
+ try {
+ manifest.write(outputStream);
+ } finally {
+ outputStream.close();
+ }
+ }
}
\ No newline at end of file
diff --git a/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/JarContentDelegate.java b/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/JarContentDelegate.java
index ee3e603..9dd080e 100644
--- a/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/JarContentDelegate.java
+++ b/modules/plugins/jboss-as/src/main/java/org/rhq/plugins/jbossas/util/JarContentDelegate.java
@@ -111,9 +111,9 @@ public class JarContentDelegate extends FileContentDelegate {
} catch (Exception e) {
// leave as null
}
- String version = getVersion(manifestVersion, sha256);
+
ResourcePackageDetails details = new ResourcePackageDetails(new PackageDetailsKey(file.getName(),
- version, getPackageTypeName(), "noarch"));
+ getVersion(sha256), getPackageTypeName(), "noarch"));
packages.add(details);
details.setFileCreatedDate(file.lastModified()); // Why don't we have a last modified time?
@@ -138,21 +138,8 @@ public class JarContentDelegate extends FileContentDelegate {
return packages;
}
- private String getVersion(String manifestVersion, String sha256) {
- // Version string in order of preference
- // manifestVersion + sha256, sha256, manifestVersion, "0"
- String version = "0";
-
- if ((null != manifestVersion) && (null != sha256)) {
- // this protects against the occasional differing binaries with poor manifest maintenance
- version = manifestVersion + " [sha256=" + sha256 + "]";
- } else if (null != sha256) {
- version = "[sha256=" + sha256 + "]";
- } else if (null != manifestVersion) {
- version = manifestVersion;
- }
-
- return version;
+ private String getVersion(String sha256) {
+ return "[sha256=" + sha256 + "]";
}
}
\ No newline at end of file
12 years, 5 months
[rhq] Branch 'jsanda/drift' - 4 commits - modules/common modules/core
by Jay Shaughnessy
modules/common/drift/src/main/java/org/rhq/common/drift/FileEntry.java | 16
modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManager.java | 8
modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManagerImpl.java | 47 -
modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetectionSchedule.java | 1
modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java | 332 ++++++----
modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftManager.java | 12
6 files changed, 245 insertions(+), 171 deletions(-)
New commits:
commit 2e75028c77cc24a2b097e82385d4c7dfb9b774d5
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Tue Dec 13 17:17:29 2011 -0500
[Bug 756100 - RFE: use timestamp and file size during drift detection scans]
More work on using timestamp (and filesize) info to avoid SHA digest
generation. Handle situations where the current changeset is supplied
by the server, due to pinning or agent sync. In these situations work
to replace the non-timestamped changesets with timestamped versions as
soon as possible.
diff --git a/modules/common/drift/src/main/java/org/rhq/common/drift/FileEntry.java b/modules/common/drift/src/main/java/org/rhq/common/drift/FileEntry.java
index e1766a3..564e9c5 100644
--- a/modules/common/drift/src/main/java/org/rhq/common/drift/FileEntry.java
+++ b/modules/common/drift/src/main/java/org/rhq/common/drift/FileEntry.java
@@ -29,7 +29,7 @@ import java.util.Date;
import org.rhq.core.domain.drift.DriftCategory;
import org.rhq.core.util.file.FileUtil;
-public class FileEntry implements Serializable {
+public class FileEntry implements Serializable, Comparable<FileEntry> {
private static final long serialVersionUID = 1L;
@@ -117,9 +117,23 @@ public class FileEntry implements Serializable {
return size;
}
+ public void setLastModified(Long lastModified) {
+ this.lastModified = lastModified;
+ }
+
+ public void setSize(Long size) {
+ this.size = size;
+ }
+
@Override
public String toString() {
return getClass().getSimpleName() + "[newSHA: " + newSHA + ", oldSHA: " + oldSHA + ", file: " + file
+ ", type: " + type.code() + ", lastModified: " + new Date(lastModified) + ", size: " + size + "]";
}
+
+ // Support Sets and Ordering by deferring to a String compare on the file path
+ @Override
+ public int compareTo(FileEntry o) {
+ return this.file.compareTo(o.getFile());
+ }
}
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
index 10d2006..869e376 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
@@ -184,117 +184,152 @@ public class DriftDetector implements Runnable {
log.debug("Generating drift change set for " + schedule);
- File currentSnapshot = changeSetMgr.findChangeSet(schedule.getResourceId(), schedule.getDriftDefinition()
+ boolean isPinned = schedule.getDriftDefinition().isPinned();
+ final File basedir = new File(basedir(schedule.getResourceId(), schedule.getDriftDefinition()));
+
+ File currentFullSnapshot = changeSetMgr.findChangeSet(schedule.getResourceId(), schedule.getDriftDefinition()
.getName(), COVERAGE);
- File snapshotFile = currentSnapshot;
- if (schedule.getDriftDefinition().isPinned()) {
- snapshotFile = new File(snapshotFile.getParentFile(), FILE_SNAPSHOT_PINNED);
+ // unless pinned use the current full snapshot file for the definition, otherwise use the pinned snapshot
+ File snapshotFile = isPinned ? new File(currentFullSnapshot.getParentFile(), FILE_SNAPSHOT_PINNED)
+ : currentFullSnapshot;
+
+ // get a Set of all files in the detection, consider them initially new files, and we'll knock the
+ // list down as we go. As we build up FileEntries in memory this Set will shrink. It's marginally
+ // less memory than if we had both in memory at the same time.
+ final Set<File> newFiles = new HashSet<File>(1000);
+
+ // If the basedir is still valid we need to do a directory tree scan to look for newly added files
+ if (basedir.isDirectory()) {
+ DriftDefinition driftDef = schedule.getDriftDefinition();
+ List<Filter> includes = driftDef.getIncludes();
+ List<Filter> excludes = driftDef.getExcludes();
+
+ for (File dir : getScanDirectories(basedir, includes)) {
+ forEachFile(dir, new FilterFileVisitor(basedir, includes, excludes, new FileVisitor() {
+ @Override
+ public void visit(File file) {
+ if (file.canRead()) {
+ newFiles.add(file);
+ } else if (log.isDebugEnabled()) {
+ log.debug("Skipping " + file.getPath() + " as new file since it is not readable.");
+ }
+ }
+ }));
+ }
}
- final File basedir = new File(basedir(schedule.getResourceId(), schedule.getDriftDefinition()));
- final Set<File> processedFiles = new HashSet<File>();
- final List<FileEntry> snapshotEntries = new LinkedList<FileEntry>();
- final List<FileEntry> deltaEntries = new LinkedList<FileEntry>();
+ final List<FileEntry> unchangedEntries = new LinkedList<FileEntry>();
+ final List<FileEntry> removedEntries = new LinkedList<FileEntry>();
+ final List<FileEntry> cantReadEntries = new LinkedList<FileEntry>();
+ final List<FileEntry> changedEntries = new LinkedList<FileEntry>();
+ final List<FileEntry> changedPinnedEntries = isPinned ? new LinkedList<FileEntry>() : null;
- ChangeSetReader coverageReader = null;
+ ChangeSetReader snapshotReader = null;
int newVersion;
+ boolean updateSnapshot = false;
try {
- coverageReader = changeSetMgr.getChangeSetReader(snapshotFile);
+ snapshotReader = changeSetMgr.getChangeSetReader(snapshotFile);
if (!basedir.exists()) {
log.warn("The base directory [" + basedir.getAbsolutePath() + "] for " + schedule + " does not exist.");
}
- if (schedule.getDriftDefinition().isPinned()) {
- ChangeSetReader snapshotReader = null;
+ if (isPinned) {
+ // If pinned we compare against the pinned snapshot but we need to know the current snapshot version,
+ // get it from the current full snapshot.
+ ChangeSetReader currentFullSnapshotReader = null;
try {
- snapshotReader = changeSetMgr.getChangeSetReader(currentSnapshot);
- newVersion = snapshotReader.getHeaders().getVersion() + 1;
+ currentFullSnapshotReader = changeSetMgr.getChangeSetReader(currentFullSnapshot);
+ newVersion = currentFullSnapshotReader.getHeaders().getVersion() + 1;
} finally {
- snapshotReader.close();
+ currentFullSnapshotReader.close();
}
} else {
- newVersion = coverageReader.getHeaders().getVersion() + 1;
+ newVersion = snapshotReader.getHeaders().getVersion() + 1;
}
// First look for files that have either been modified or deleted
- scanForModifiedOrDeletedFiles(schedule, basedir, processedFiles, snapshotEntries, deltaEntries,
- coverageReader);
+ updateSnapshot = scanSnapshotFiles(schedule, basedir, snapshotReader, newFiles, unchangedEntries,
+ removedEntries, cantReadEntries, changedEntries, changedPinnedEntries);
+
} finally {
- coverageReader.close();
+ snapshotReader.close();
}
- // If the basedir is still valid we need to do a directory tree scan to look for newly added files
- if (basedir.isDirectory()) {
- DriftDefinition driftDef = schedule.getDriftDefinition();
- List<Filter> includes = driftDef.getIncludes();
- List<Filter> excludes = driftDef.getExcludes();
+ // if necessary, re-write the pinned snapshot file because we've updated timestamp/filesize info, which
+ // on subsequent detection runs will help us avoid SHA generation. It must maintain the same entries.
+ if (isPinned && updateSnapshot) {
+ changedPinnedEntries.addAll(unchangedEntries);
- for (File dir : getScanDirectories(basedir, includes)) {
- forEachFile(dir, new FilterFileVisitor(basedir, includes, excludes, new FileVisitor() {
- @Override
- public void visit(File file) {
- try {
- if (processedFiles.contains(file)) {
- return;
- }
+ backupAndDeleteCurrentSnapshot(snapshotFile);
+ updatePinnedSnapshot(schedule, snapshotFile, changedPinnedEntries);
+ }
- if (!file.canRead()) {
- if (log.isDebugEnabled()) {
- log.debug("Skipping " + file.getPath() + " since it is not readable.");
- }
- return;
- }
+ final List<FileEntry> snapshotEntries = new LinkedList<FileEntry>(unchangedEntries);
+ snapshotEntries.addAll(changedEntries);
- if (log.isInfoEnabled()) {
- log.info("Detected added file for " + schedule + " --> " + file.getAbsolutePath());
- }
+ final List<FileEntry> deltaEntries = new LinkedList<FileEntry>(changedEntries);
+ deltaEntries.addAll(removedEntries);
+ deltaEntries.addAll(cantReadEntries);
- FileEntry newEntry = addedFileEntry(relativePath(basedir, file), sha256(file), file
- .lastModified(), file.length());
- deltaEntries.add(newEntry);
- snapshotEntries.add(newEntry);
- } catch (IOException e) {
- log.error("An error occurred while generating a drift change set for " + schedule + ": "
- + e.getMessage());
- throw new DriftDetectionException("An error occurred while generating a drift change set",
- e);
- }
- }
- }));
+ for (File file : newFiles) {
+ try {
+ if (log.isInfoEnabled()) {
+ log.info("Detected added file for " + schedule + " --> " + file.getAbsolutePath());
+ }
+
+ FileEntry newEntry = addedFileEntry(relativePath(basedir, file), sha256(file), file.lastModified(),
+ file.length());
+
+ deltaEntries.add(newEntry);
+ snapshotEntries.add(newEntry);
+
+ } catch (IOException e) {
+ log.error("An error occurred while generating a drift change set for " + schedule + ": "
+ + e.getMessage());
+ throw new DriftDetectionException("An error occurred while generating a drift change set", e);
}
}
if (deltaEntries.isEmpty()) {
- summary.setNewSnapshot(currentSnapshot);
-
- // If nothing has changed, there is no need to add/update any files
- // unless the definition is pinned in which case we need to reset
- // the current snapshot to match the pinned snapshot. Note though
- // that we increment the snapshot version in order to let the server
- // know about the state change.
- // if no timestamp must re-gen the files.
- if (schedule.getDriftDefinition().isPinned() && newVersion > 1
- && !isPreviousChangeSetEmpty(schedule.getResourceId(), schedule.getDriftDefinition())) {
- currentSnapshot.delete();
- File newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion);
-
- updateDeltaSnapshot(summary, schedule, deltaEntries, newVersion, currentSnapshot, newSnapshot);
- // TODO report back to the server that we are back in compliance
+ File newSnapshot = currentFullSnapshot;
+
+ if (!isPinned) {
+ // If unpinned and there is no detected drift then we generally don't need to add/update any files.
+ // But, if we have timestamp/filesize updates then we want to replace the current snapshot with
+ // the updated entries, so we can avoid SHA generation on subsequent runs.
+ if (updateSnapshot) {
+ currentFullSnapshot.delete();
+ newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion - 1);
+ }
+ } else {
+ // If pinned and returning to compliance (meaning no drift now but the previous snapshot did have drift)
+ // then we need to reset the current snapshot to match the pinned snapshot. Note though that we
+ // increment the snapshot version in order to let the server know about the state change.
+ if (newVersion > 1
+ && !isPreviousChangeSetEmpty(schedule.getResourceId(), schedule.getDriftDefinition())) {
+ currentFullSnapshot.delete();
+ newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion);
+
+ updateDeltaSnapshot(summary, schedule, deltaEntries, newVersion, currentFullSnapshot, newSnapshot);
+ }
}
+
+ summary.setNewSnapshot(newSnapshot);
+
} else {
- if (schedule.getDriftDefinition().isPinned() && newVersion > 1
- && isSameAsPreviousChangeSet(deltaEntries, currentSnapshot)) {
- // if we are still out of compliance just report, we report a
- // repeat change set to indicate no changes but also still out
- // of compliance.
+ // if there is drift, but we're pinned and the drift is the same as the previous detection, just
+ // mark it as a repeat to indicate that we're out of compliance but not in any new way.
+ if (isPinned && newVersion > 1 && isSameAsPreviousChangeSet(deltaEntries, currentFullSnapshot)) {
summary.setVersion(newVersion - 1);
summary.setRepeat(true);
+
return;
}
- File oldSnapshot = backupAndDeleteCurrentSnapshot(currentSnapshot);
+ // otherwise, generate a new current snapshot, and a snapshot delta reflecting the latest drift
+ File oldSnapshot = backupAndDeleteCurrentSnapshot(currentFullSnapshot);
File newSnapshot = updateCurrentSnapshot(schedule, snapshotEntries, newVersion);
updateDeltaSnapshot(summary, schedule, deltaEntries, newVersion, oldSnapshot, newSnapshot);
@@ -321,55 +356,88 @@ public class DriftDetector implements Runnable {
return directories;
}
- private void scanForModifiedOrDeletedFiles(DriftDetectionSchedule schedule, File basedir, Set<File> processedFiles,
- List<FileEntry> snapshotEntries, List<FileEntry> deltaEntries, ChangeSetReader coverageReader)
+ /**
+ * Process the entries for the snapshotReader. Each entry will be placed in one of the various Lists depending
+ * on what bucket it fall into.
+ * @return true if unchangedEntries (meaning no drift) had timestamp/filesize info updated, in which case the
+ * snapshot should be re-written to disk even if there was no drift.
+ * @throws IOException
+ */
+ private boolean scanSnapshotFiles(DriftDetectionSchedule schedule, File basedir, ChangeSetReader snapshotReader,
+ Set<File> newFiles, List<FileEntry> unchangedEntries, List<FileEntry> removedEntries,
+ List<FileEntry> cantReadEntries, List<FileEntry> modifiedEntries, List<FileEntry> changedPinnedEntries)
throws IOException {
- for (FileEntry entry : coverageReader) {
+ boolean result = false;
+
+ for (FileEntry entry : snapshotReader) {
File file = new File(basedir, entry.getFile());
+ newFiles.remove(file);
+
if (!file.exists()) {
// The file has been deleted since the last scan
if (log.isDebugEnabled()) {
log.debug("Detected deleted file for " + schedule + " --> " + file.getAbsolutePath());
}
- deltaEntries.add(removedFileEntry(entry.getFile(), entry.getNewSHA()));
+ removedEntries.add(removedFileEntry(entry.getFile(), entry.getNewSHA()));
+
+ if (null != changedPinnedEntries) {
+ changedPinnedEntries.add(entry);
+ }
+
+ continue;
+
} else if (!file.canRead()) {
- processedFiles.add(file);
if (log.isDebugEnabled()) {
log.debug(file.getPath() + " is no longer readable. Treating it as a deleted file.");
}
- deltaEntries.add(removedFileEntry(entry.getFile(), entry.getNewSHA()));
+ cantReadEntries.add(removedFileEntry(entry.getFile(), entry.getNewSHA()));
+
+ if (null != changedPinnedEntries) {
+ changedPinnedEntries.add(entry);
+ }
+
+ continue;
+
} else {
- processedFiles.add(file);
- // if we do not have the last modification time and the file size, then we
- // have to compare SHAs. We can wind up without a timestamp of a file size
- // when the agent is restarted and the data directory is purged. The server
- // sends snapshots down to the agent and since the server does not store the
- // file modification time or size, the fields will get initialized to -1.
- if (entry.getLastModified() == -1 || entry.getSize() == -1) {
- String currentSHA = sha256(file);
- if (!entry.getNewSHA().equals(currentSHA)) {
- FileEntry modifiedEntry = changedFileEntry(entry.getFile(), entry.getNewSHA(), currentSHA, file
- .lastModified(), file.length());
- deltaEntries.add(modifiedEntry);
- snapshotEntries.add(modifiedEntry);
- }
- } else if (entry.getLastModified() != -1 && entry.getSize() != -1
- && (entry.getLastModified() != file.lastModified() || entry.getSize() != file.length())) {
- if (log.isDebugEnabled()) {
- log.debug("Detected modified file for " + schedule + " --> " + file.getAbsolutePath());
- }
- String currentSHA = sha256(file);
+ String currentSHA = null;
+ boolean isModified = false;
+
+ // perform a SHA comparison if we are unable to compare size and lastModified or if the
+ // size or lastModified test fails. We may not have size of lastModified values for the
+ // entry when the current snapshot was provided by the server, either due to a synch or
+ // pinning scenario. The server does not store that information and will provide -1 for defaults.
+ if (entry.getLastModified() == -1 || entry.getSize() == -1
+ || entry.getLastModified() != file.lastModified() || entry.getSize() != file.length()) {
+
+ currentSHA = sha256(file);
+ isModified = !entry.getNewSHA().equals(currentSHA);
+ }
+
+ if (isModified) {
FileEntry modifiedEntry = changedFileEntry(entry.getFile(), entry.getNewSHA(), currentSHA, file
.lastModified(), file.length());
- deltaEntries.add(modifiedEntry);
- snapshotEntries.add(modifiedEntry);
+ modifiedEntries.add(modifiedEntry);
+
+ if (null != changedPinnedEntries) {
+ changedPinnedEntries.add(entry);
+ }
+
} else {
- // The file has not changed
- snapshotEntries.add(entry);
+ if (-1 == entry.getLastModified()) {
+ entry.setLastModified(file.lastModified());
+ result = true;
+ }
+ if (-1 == entry.getSize()) {
+ entry.setSize(file.length());
+ result = true;
+ }
+ unchangedEntries.add(entry);
}
}
}
+
+ return result;
}
@SuppressWarnings("unused")
@@ -449,6 +517,26 @@ public class DriftDetector implements Runnable {
}
}
+ private File updatePinnedSnapshot(DriftDetectionSchedule schedule, File pinnedSnapshot,
+ List<FileEntry> snapshotEntries) throws IOException {
+
+ ChangeSetWriter newSnapshotWriter = null;
+
+ try {
+ Headers snapshotHeaders = createHeaders(schedule, COVERAGE, 0);
+ newSnapshotWriter = changeSetMgr.getChangeSetWriter(pinnedSnapshot, snapshotHeaders);
+
+ for (FileEntry entry : snapshotEntries) {
+ newSnapshotWriter.write(entry);
+ }
+
+ return pinnedSnapshot;
+
+ } finally {
+ newSnapshotWriter.close();
+ }
+ }
+
private boolean isSameAsPreviousChangeSet(List<FileEntry> entries, File currentSnapsotFile) throws IOException {
HashMap<String, FileEntry> entriesMap = new HashMap<String, FileEntry>();
for (FileEntry e : entries) {
commit bf599134469592012e7ee69a00354cc815e7db8e
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Tue Dec 13 16:22:13 2011 -0500
Make sure the drift def id is also in the copy so it can be applied to
generated changeset headers.
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetectionSchedule.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetectionSchedule.java
index 8ddfc03..60fb35b 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetectionSchedule.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetectionSchedule.java
@@ -43,6 +43,7 @@ public class DriftDetectionSchedule implements Comparable<DriftDetectionSchedule
public DriftDetectionSchedule copy() {
DriftDetectionSchedule copy = new DriftDetectionSchedule(resourceId, new DriftDefinition(driftDef
.getConfiguration().deepCopyWithoutProxies()));
+ copy.driftDef.setId(driftDef.getId());
copy.nextScan = nextScan;
return copy;
}
commit 54ccb75ffb746b3e167f8fdfb2fba3cc70dc13e2
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon Dec 12 15:53:14 2011 -0500
Ensure we have both the timestamp and the filesize before we skip using
a SHA test.
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
index 11a2d85..10d2006 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
@@ -346,7 +346,7 @@ public class DriftDetector implements Runnable {
// when the agent is restarted and the data directory is purged. The server
// sends snapshots down to the agent and since the server does not store the
// file modification time or size, the fields will get initialized to -1.
- if (entry.getLastModified() == -1 && entry.getSize() == -1) {
+ if (entry.getLastModified() == -1 || entry.getSize() == -1) {
String currentSHA = sha256(file);
if (!entry.getNewSHA().equals(currentSHA)) {
FileEntry modifiedEntry = changedFileEntry(entry.getFile(), entry.getNewSHA(), currentSHA, file
commit 610d9447060444806f5fd27c29a5f118195636c0
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon Dec 12 15:51:46 2011 -0500
Just some cleanup
- remove unused/obsolete methods from ChangeSetManager and the impl
- move some commonly referenced filenames into constants
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManager.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManager.java
index 1fc9fe9..5117ab1 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManager.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManager.java
@@ -26,7 +26,6 @@ import org.rhq.common.drift.ChangeSetReader;
import org.rhq.common.drift.ChangeSetWriter;
import org.rhq.common.drift.Headers;
import org.rhq.core.domain.drift.DriftChangeSetCategory;
-import org.rhq.core.domain.drift.DriftDefinition;
public interface ChangeSetManager {
@@ -66,11 +65,4 @@ public interface ChangeSetManager {
ChangeSetWriter getChangeSetWriter(int resourceId, Headers headers) throws IOException;
ChangeSetWriter getChangeSetWriter(File changeSetFile, Headers headers) throws IOException;
-
- ChangeSetWriter getChangeSetWriterForUpdate(int resourceId, Headers headers) throws IOException;
-
- void updateChangeSet(int resourceId, Headers headers) throws IOException;
-
- void addFileToChangeSet(int resourceId, DriftDefinition driftDefinition, File file);
-
}
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManagerImpl.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManagerImpl.java
index 132a7ca..97e92d3 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManagerImpl.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/ChangeSetManagerImpl.java
@@ -21,11 +21,7 @@ package org.rhq.core.pc.drift;
import static org.rhq.core.domain.drift.DriftChangeSetCategory.COVERAGE;
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
import java.io.IOException;
import org.rhq.common.drift.ChangeSetReader;
@@ -34,8 +30,6 @@ import org.rhq.common.drift.ChangeSetWriter;
import org.rhq.common.drift.ChangeSetWriterImpl;
import org.rhq.common.drift.Headers;
import org.rhq.core.domain.drift.DriftChangeSetCategory;
-import org.rhq.core.domain.drift.DriftDefinition;
-import org.rhq.core.util.stream.StreamUtil;
public class ChangeSetManagerImpl implements ChangeSetManager {
@@ -51,13 +45,13 @@ public class ChangeSetManagerImpl implements ChangeSetManager {
if (changeSetDir == null || !changeSetDir.exists()) {
return false;
}
- return new File(changeSetDir, "changeset.txt").exists();
+ return new File(changeSetDir, DriftDetector.FILE_CHANGESET_FULL).exists();
}
@Override
public File findChangeSet(int resourceId, String driftDefinitionName) throws IOException {
File changeSetDir = findChangeSetDir(resourceId, driftDefinitionName);
- File changeSetFile = new File(changeSetDir, "changeset.txt");
+ File changeSetFile = new File(changeSetDir, DriftDetector.FILE_CHANGESET_FULL);
if (changeSetFile.exists()) {
return changeSetFile;
@@ -76,9 +70,9 @@ public class ChangeSetManagerImpl implements ChangeSetManager {
switch (type) {
case COVERAGE:
- return new File(changeSetDir, "changeset.txt");
+ return new File(changeSetDir, DriftDetector.FILE_CHANGESET_FULL);
case DRIFT:
- return new File(changeSetDir, "drift-changeset.txt");
+ return new File(changeSetDir, DriftDetector.FILE_CHANGESET_DELTA);
default:
throw new IllegalArgumentException(type + " is not a recognized, supported change set type.");
}
@@ -111,9 +105,9 @@ public class ChangeSetManagerImpl implements ChangeSetManager {
File changeSet;
if (headers.getType() == COVERAGE) {
- changeSet = new File(changeSetDir, "changeset.txt");
+ changeSet = new File(changeSetDir, DriftDetector.FILE_CHANGESET_FULL);
} else {
- changeSet = new File(changeSetDir, "drift-changeset.txt");
+ changeSet = new File(changeSetDir, DriftDetector.FILE_CHANGESET_DELTA);
}
return new ChangeSetWriterImpl(changeSet, headers);
}
@@ -123,35 +117,6 @@ public class ChangeSetManagerImpl implements ChangeSetManager {
return new ChangeSetWriterImpl(changeSetFile, headers);
}
- @Override
- public ChangeSetWriter getChangeSetWriterForUpdate(int resourceId, Headers headers) throws IOException {
- File resourceDir = new File(changeSetsDir, Integer.toString(resourceId));
- File changeSetDir = new File(resourceDir, headers.getDriftDefinitionName());
-
- return new ChangeSetWriterImpl(new File(changeSetDir, "changeset.working"), headers);
- }
-
- @Override
- public void updateChangeSet(int resourceId, Headers headers) throws IOException {
- File resourceDir = new File(changeSetsDir, Integer.toString(resourceId));
- File changeSetDir = new File(resourceDir, headers.getDriftDefinitionName());
- File newChangeSet = new File(changeSetDir, "changeset.working");
- File changeSet = new File(changeSetDir, "changeset.txt");
-
- if (!newChangeSet.exists()) {
- return;
- }
-
- changeSet.delete();
-
- StreamUtil.copy(new BufferedInputStream(new FileInputStream(newChangeSet)), new BufferedOutputStream(
- new FileOutputStream(changeSet)));
- }
-
- @Override
- public void addFileToChangeSet(int resourceId, DriftDefinition driftDefinition, File file) {
- }
-
private File findChangeSetDir(int resourceId, String driftDefinitionName) {
File resourceDir = new File(changeSetsDir, Integer.toString(resourceId));
if (!resourceDir.exists()) {
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
index 848ae93..11a2d85 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftDetector.java
@@ -19,8 +19,26 @@
package org.rhq.core.pc.drift;
+import static org.rhq.common.drift.FileEntry.addedFileEntry;
+import static org.rhq.common.drift.FileEntry.changedFileEntry;
+import static org.rhq.common.drift.FileEntry.removedFileEntry;
+import static org.rhq.core.domain.drift.DriftChangeSetCategory.COVERAGE;
+import static org.rhq.core.domain.drift.DriftChangeSetCategory.DRIFT;
+import static org.rhq.core.util.file.FileUtil.copyFile;
+import static org.rhq.core.util.file.FileUtil.forEachFile;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+
import org.rhq.common.drift.ChangeSetReader;
import org.rhq.common.drift.ChangeSetWriter;
import org.rhq.common.drift.FileEntry;
@@ -31,20 +49,13 @@ import org.rhq.core.domain.drift.Filter;
import org.rhq.core.util.MessageDigestGenerator;
import org.rhq.core.util.file.FileVisitor;
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.*;
-
-import static org.rhq.common.drift.FileEntry.*;
-import static org.rhq.core.domain.drift.DriftChangeSetCategory.COVERAGE;
-import static org.rhq.core.domain.drift.DriftChangeSetCategory.DRIFT;
-import static org.rhq.core.util.file.FileUtil.copyFile;
-import static org.rhq.core.util.file.FileUtil.forEachFile;
-
public class DriftDetector implements Runnable {
private Log log = LogFactory.getLog(DriftDetector.class);
+ static final String FILE_CHANGESET_FULL = "changeset.txt";
+ static final String FILE_CHANGESET_DELTA = "drift-changeset.txt";
+ static final String FILE_SNAPSHOT_PINNED = "snapshot.pinned";
+
private ScheduleQueue scheduleQueue;
private ChangeSetManager changeSetMgr;
@@ -178,7 +189,7 @@ public class DriftDetector implements Runnable {
File snapshotFile = currentSnapshot;
if (schedule.getDriftDefinition().isPinned()) {
- snapshotFile = new File(snapshotFile.getParentFile(), "snapshot.pinned");
+ snapshotFile = new File(snapshotFile.getParentFile(), FILE_SNAPSHOT_PINNED);
}
final File basedir = new File(basedir(schedule.getResourceId(), schedule.getDriftDefinition()));
@@ -240,14 +251,16 @@ public class DriftDetector implements Runnable {
log.info("Detected added file for " + schedule + " --> " + file.getAbsolutePath());
}
- FileEntry newEntry = addedFileEntry(relativePath(basedir, file), sha256(file),
- file.lastModified(), file.length());
- deltaEntries.add(newEntry);
- snapshotEntries.add(newEntry);
- } catch (IOException e) {
- log.error("An error occurred while generating a drift change set for " + schedule + ": "
- + e.getMessage());
- throw new DriftDetectionException("An error occurred while generating a drift change set", e);
+ FileEntry newEntry = addedFileEntry(relativePath(basedir, file), sha256(file), file
+ .lastModified(), file.length());
+ deltaEntries.add(newEntry);
+ snapshotEntries.add(newEntry);
+ } catch (IOException e) {
+ log.error("An error occurred while generating a drift change set for " + schedule + ": "
+ + e.getMessage());
+ throw new DriftDetectionException("An error occurred while generating a drift change set",
+ e);
+ }
}
}));
}
@@ -261,6 +274,7 @@ public class DriftDetector implements Runnable {
// the current snapshot to match the pinned snapshot. Note though
// that we increment the snapshot version in order to let the server
// know about the state change.
+ // if no timestamp must re-gen the files.
if (schedule.getDriftDefinition().isPinned() && newVersion > 1
&& !isPreviousChangeSetEmpty(schedule.getResourceId(), schedule.getDriftDefinition())) {
currentSnapshot.delete();
@@ -335,19 +349,19 @@ public class DriftDetector implements Runnable {
if (entry.getLastModified() == -1 && entry.getSize() == -1) {
String currentSHA = sha256(file);
if (!entry.getNewSHA().equals(currentSHA)) {
- FileEntry modifiedEntry = changedFileEntry(entry.getFile(), entry.getNewSHA(), currentSHA,
- file.lastModified(), file.length());
+ FileEntry modifiedEntry = changedFileEntry(entry.getFile(), entry.getNewSHA(), currentSHA, file
+ .lastModified(), file.length());
deltaEntries.add(modifiedEntry);
snapshotEntries.add(modifiedEntry);
}
- } else if (entry.getLastModified() != -1 && entry.getSize() != -1 &&
- (entry.getLastModified() != file.lastModified() || entry.getSize() != file.length())) {
+ } else if (entry.getLastModified() != -1 && entry.getSize() != -1
+ && (entry.getLastModified() != file.lastModified() || entry.getSize() != file.length())) {
if (log.isDebugEnabled()) {
log.debug("Detected modified file for " + schedule + " --> " + file.getAbsolutePath());
}
String currentSHA = sha256(file);
- FileEntry modifiedEntry = changedFileEntry(entry.getFile(), entry.getNewSHA(), currentSHA,
- file.lastModified(), file.length());
+ FileEntry modifiedEntry = changedFileEntry(entry.getFile(), entry.getNewSHA(), currentSHA, file
+ .lastModified(), file.length());
deltaEntries.add(modifiedEntry);
snapshotEntries.add(modifiedEntry);
} else {
@@ -443,7 +457,7 @@ public class DriftDetector implements Runnable {
ChangeSetReader reader = null;
try {
- File deltaChangeSet = new File(currentSnapsotFile.getParentFile(), "drift-changeset.txt");
+ File deltaChangeSet = new File(currentSnapsotFile.getParentFile(), FILE_CHANGESET_DELTA);
reader = changeSetMgr.getChangeSetReader(deltaChangeSet);
int numEntries = 0;
@@ -511,7 +525,7 @@ public class DriftDetector implements Runnable {
writer = null;
}
if (schedule.getDriftDefinition().isPinned()) {
- copyFile(snapshot, new File(snapshot.getParentFile(), "snapshot.pinned"));
+ copyFile(snapshot, new File(snapshot.getParentFile(), FILE_SNAPSHOT_PINNED));
}
summary.setNewSnapshot(snapshot);
} finally {
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftManager.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftManager.java
index 1182407..386ad73 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftManager.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/drift/DriftManager.java
@@ -201,7 +201,7 @@ public class DriftManager extends AgentService implements DriftAgentService, Dri
// change set.
DriftSnapshot pinnedSnapshot = driftServer.getSnapshot(driftDefinition.getId(), 0, 0);
Headers pinnedHeaders = createHeaders(resource.getId(), driftDefinition);
- File pinnedSnapshotFile = new File(currentSnapshotFile.getParent(), "snapshot.pinned");
+ File pinnedSnapshotFile = new File(currentSnapshotFile.getParent(), DriftDetector.FILE_SNAPSHOT_PINNED);
log.info("Preparing to write pinned snapshot to disk for "
+ toString(resource.getId(), driftDefinition));
writeSnapshotToFile(pinnedSnapshot, pinnedSnapshotFile, pinnedHeaders);
@@ -212,7 +212,7 @@ public class DriftManager extends AgentService implements DriftAgentService, Dri
// drift that has already been reported to the server.
DriftSnapshot deltaSnapshot = driftServer.getSnapshot(driftDefinition.getId(), snapshot
.getVersion(), snapshot.getVersion());
- File deltaFile = new File(currentSnapshotFile.getParentFile(), "drift-changeset.txt");
+ File deltaFile = new File(currentSnapshotFile.getParentFile(), DriftDetector.FILE_CHANGESET_DELTA);
Headers deltaHeaders = createHeaders(resource.getId(), driftDefinition);
deltaHeaders.setVersion(snapshot.getVersion());
deltaHeaders.setType(DRIFT);
@@ -546,7 +546,7 @@ public class DriftManager extends AgentService implements DriftAgentService, Dri
public void run() {
File currentSnapshot = changeSetMgr.findChangeSet(schedule.getResourceId(), schedule
.getDriftDefinition().getName(), COVERAGE);
- File pinnedSnapshot = new File(currentSnapshot.getParentFile(), "snapshot.pinned");
+ File pinnedSnapshot = new File(currentSnapshot.getParentFile(), DriftDetector.FILE_SNAPSHOT_PINNED);
pinnedSnapshot.delete();
if (log.isDebugEnabled()) {
@@ -561,7 +561,7 @@ public class DriftManager extends AgentService implements DriftAgentService, Dri
@Override
public void updateDriftDetection(int resourceId, DriftDefinition driftDef, DriftSnapshot driftSnapshot) {
File currentSnapshot = changeSetMgr.findChangeSet(resourceId, driftDef.getName(), COVERAGE);
- File pinnedSnapshot = new File(currentSnapshot.getParentFile(), "snapshot.pinned");
+ File pinnedSnapshot = new File(currentSnapshot.getParentFile(), DriftDetector.FILE_SNAPSHOT_PINNED);
Headers headers = createHeaders(resourceId, driftDef);
try {
@@ -777,8 +777,8 @@ public class DriftManager extends AgentService implements DriftAgentService, Dri
writer.write(addedFileEntry(drift.getPath(), drift.getNewDriftFile().getHashId(), -1L, -1L));
break;
case FILE_CHANGED:
- writer.write(changedFileEntry(drift.getPath(), drift.getOldDriftFile().getHashId(),
- drift.getNewDriftFile().getHashId(), -1L, -1L));
+ writer.write(changedFileEntry(drift.getPath(), drift.getOldDriftFile().getHashId(), drift
+ .getNewDriftFile().getHashId(), -1L, -1L));
break;
default: // FILE_REMOVED
writer.write(removedFileEntry(drift.getPath(), drift.getOldDriftFile().getHashId()));
12 years, 5 months
[rhq] modules/enterprise
by John Sanda
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftTemplateManagerBean.java | 40 ++++------
1 file changed, 19 insertions(+), 21 deletions(-)
New commits:
commit ee66d10c7c3dee5a9c9bd102d9ea2db347e0e769
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Dec 13 17:12:32 2011 -0500
[BZ 767328] Make sure that each drift def is loaded into the persistence context
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftTemplateManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftTemplateManagerBean.java
index 361eb44..9a1c980 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftTemplateManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftTemplateManagerBean.java
@@ -19,26 +19,11 @@
package org.rhq.enterprise.server.drift;
-import static javax.ejb.TransactionAttributeType.NEVER;
-import static org.rhq.core.domain.common.EntityContext.forResource;
-import static org.rhq.core.domain.drift.DriftChangeSetCategory.COVERAGE;
-import static org.rhq.core.domain.drift.DriftConfigurationDefinition.DriftHandlingMode.normal;
-
-import javax.ejb.EJB;
-import javax.ejb.Stateless;
-import javax.ejb.TransactionAttribute;
-import javax.persistence.EntityManager;
-import javax.persistence.PersistenceContext;
-
import org.rhq.core.domain.auth.Subject;
import org.rhq.core.domain.authz.Permission;
import org.rhq.core.domain.criteria.DriftDefinitionCriteria;
import org.rhq.core.domain.criteria.DriftDefinitionTemplateCriteria;
-import org.rhq.core.domain.drift.DriftDefinition;
-import org.rhq.core.domain.drift.DriftDefinitionComparator;
-import org.rhq.core.domain.drift.DriftDefinitionTemplate;
-import org.rhq.core.domain.drift.DriftSnapshot;
-import org.rhq.core.domain.drift.DriftSnapshotRequest;
+import org.rhq.core.domain.drift.*;
import org.rhq.core.domain.drift.dto.DriftChangeSetDTO;
import org.rhq.core.domain.resource.ResourceType;
import org.rhq.core.domain.util.PageList;
@@ -49,6 +34,17 @@ import org.rhq.enterprise.server.resource.ResourceTypeNotFoundException;
import org.rhq.enterprise.server.util.CriteriaQueryGenerator;
import org.rhq.enterprise.server.util.CriteriaQueryRunner;
+import javax.ejb.EJB;
+import javax.ejb.Stateless;
+import javax.ejb.TransactionAttribute;
+import javax.persistence.EntityManager;
+import javax.persistence.PersistenceContext;
+
+import static javax.ejb.TransactionAttributeType.NEVER;
+import static org.rhq.core.domain.common.EntityContext.forResource;
+import static org.rhq.core.domain.drift.DriftChangeSetCategory.COVERAGE;
+import static org.rhq.core.domain.drift.DriftConfigurationDefinition.DriftHandlingMode.normal;
+
@Stateless
public class DriftTemplateManagerBean implements DriftTemplateManagerLocal, DriftTemplateManagerRemote {
@@ -205,13 +201,15 @@ public class DriftTemplateManagerBean implements DriftTemplateManagerLocal, Drif
DriftDefinition templateDef = template.getTemplateDefinition();
for (DriftDefinition resourceDef : template.getDriftDefinitions()) {
- if (resourceDef.isAttached()) {
- resourceDef.setInterval(templateDef.getInterval());
- resourceDef.setDriftHandlingMode(templateDef.getDriftHandlingMode());
- resourceDef.setEnabled(templateDef.isEnabled());
+ DriftDefinition driftDef = entityMgr.find(DriftDefinition.class, resourceDef.getId());
+ if (driftDef.isAttached()) {
+ driftDef.setInterval(templateDef.getInterval());
+ driftDef.setDriftHandlingMode(templateDef.getDriftHandlingMode());
+ driftDef.setEnabled(templateDef.isEnabled());
- driftMgr.updateDriftDefinition(subject, forResource(resourceDef.getResource().getId()), resourceDef);
+ driftMgr.updateDriftDefinition(subject, forResource(driftDef.getResource().getId()), driftDef);
}
}
}
+
}
12 years, 5 months
[rhq] modules/enterprise
by mazz
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftManagerBean.java | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
New commits:
commit 9b42a3a83b51bdce587b6841774e373b25343e5a
Author: John Mazzitelli <mazz(a)redhat.com>
Date: Tue Dec 13 14:48:51 2011 -0500
baseDir might be null, so don't assume it isn't
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftManagerBean.java
index c06f7a4..12c7233 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/drift/DriftManagerBean.java
@@ -68,8 +68,11 @@ import org.rhq.core.domain.drift.DriftChangeSetCategory;
import org.rhq.core.domain.drift.DriftComplianceStatus;
import org.rhq.core.domain.drift.DriftComposite;
import org.rhq.core.domain.drift.DriftConfigurationDefinition;
+import org.rhq.core.domain.drift.DriftConfigurationDefinition.DriftHandlingMode;
import org.rhq.core.domain.drift.DriftDefinition;
+import org.rhq.core.domain.drift.DriftDefinition.BaseDirectory;
import org.rhq.core.domain.drift.DriftDefinitionComparator;
+import org.rhq.core.domain.drift.DriftDefinitionComparator.CompareMode;
import org.rhq.core.domain.drift.DriftDefinitionComposite;
import org.rhq.core.domain.drift.DriftDefinitionTemplate;
import org.rhq.core.domain.drift.DriftDetails;
@@ -78,9 +81,6 @@ import org.rhq.core.domain.drift.DriftSnapshot;
import org.rhq.core.domain.drift.DriftSnapshotRequest;
import org.rhq.core.domain.drift.FileDiffReport;
import org.rhq.core.domain.drift.Filter;
-import org.rhq.core.domain.drift.DriftConfigurationDefinition.DriftHandlingMode;
-import org.rhq.core.domain.drift.DriftDefinition.BaseDirectory;
-import org.rhq.core.domain.drift.DriftDefinitionComparator.CompareMode;
import org.rhq.core.domain.drift.dto.DriftChangeSetDTO;
import org.rhq.core.domain.drift.dto.DriftDTO;
import org.rhq.core.domain.drift.dto.DriftFileDTO;
@@ -803,7 +803,7 @@ public class DriftManagerBean implements DriftManagerLocal, DriftManagerRemote {
if (null == baseDir
|| !baseDir.getValueName().matches(DriftConfigurationDefinition.PROP_BASEDIR_PATH_REGEX_PATTERN)) {
throw new IllegalArgumentException(
- "Drift definition base directory is null or contains invalid characters: " + baseDir.getValueName());
+ "Drift definition base directory is null or contains invalid characters: " + baseDir);
}
List<List<Filter>> filtersList = new ArrayList<List<Filter>>(2);
filtersList.add(driftDef.getIncludes());
12 years, 5 months
[rhq] modules/enterprise
by mazz
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/drift/DriftDefinitionDataSource.java | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
New commits:
commit 1c2b7c69036191a7a79a9421aba63dff18cf0a8b
Author: John Mazzitelli <mazz(a)redhat.com>
Date: Tue Dec 13 13:46:12 2011 -0500
[BZ 767263] don't assume there is always a template
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/drift/DriftDefinitionDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/drift/DriftDefinitionDataSource.java
index b29e9a6..bece740 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/drift/DriftDefinitionDataSource.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/drift/DriftDefinitionDataSource.java
@@ -198,6 +198,9 @@ public class DriftDefinitionDataSource extends RPCDataSource<DriftDefinitionComp
templateField.setCellFormatter(new CellFormatter() {
public String format(Object o, ListGridRecord listGridRecord, int i, int i1) {
DriftDefinition def = (DriftDefinition) listGridRecord.getAttributeAsObject(ATTR_ENTITY);
+ if (null == def.getTemplate()) {
+ return MSG.common_val_none();
+ }
if (null != globalPermissions && globalPermissions.contains(Permission.MANAGE_SETTINGS)) {
int typeId = def.getResource().getResourceType().getId();
int templateId = def.getTemplate().getId();
@@ -422,7 +425,11 @@ public class DriftDefinitionDataSource extends RPCDataSource<DriftDefinitionComp
record.setAttribute(ATTR_IS_PINNED, def.isPinned() ? ImageManager.getPinnedIcon() : ImageManager
.getUnpinnedIcon());
record.setAttribute(ATTR_ATTACHED, def.isAttached() ? MSG.common_val_yes() : MSG.common_val_no());
- record.setAttribute(ATTR_TEMPLATE, def.getTemplate().getName());
+ if (def.getTemplate() != null) {
+ record.setAttribute(ATTR_TEMPLATE, def.getTemplate().getName());
+ } else {
+ record.setAttribute(ATTR_TEMPLATE, MSG.common_val_none());
+ }
record.setAttribute(ATTR_CHANGE_SET_VERSION, (null != changeSet) ? String.valueOf(changeSet.getVersion()) : MSG
.common_label_none());
12 years, 5 months
[rhq] modules/core
by ips
modules/core/plugin-container/src/main/java/org/rhq/core/pc/PluginContainerConfiguration.java | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
New commits:
commit b664d35951234f42400d90d1b5c7b477b0e9d3d5
Author: Ian Springer <ian.springer(a)redhat.com>
Date: Tue Dec 13 13:05:19 2011 -0500
minor - fix some typos in javadoc
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/PluginContainerConfiguration.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/PluginContainerConfiguration.java
index d786ee3..570f7da 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/PluginContainerConfiguration.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/PluginContainerConfiguration.java
@@ -453,7 +453,7 @@ public class PluginContainerConfiguration {
}
/**
- * When measurement's are scheduled for collection, the collection will be performed by threads from a thread pool.
+ * When measurements are scheduled for collection, the collection will be performed by threads from a thread pool.
* This defines the number of threads within that thread pool, effectively defining the number of measurements that
* can be collected concurrently.
*
@@ -465,9 +465,9 @@ public class PluginContainerConfiguration {
}
/**
- * Defines the number of threads that can concurrent collection measurements.
+ * Defines the number of threads that can concurrently collection measurements.
*
- * @param size the new size of the threadpool
+ * @param size the new size of the thread pool
*/
public void setMeasurementCollectionThreadPoolSize(int size) {
configuration.put(MEASUREMENT_COLLECTION_THREADCOUNT_PROP, Integer.valueOf(size));
@@ -609,7 +609,7 @@ public class PluginContainerConfiguration {
/**
* Defines the number of threads that can concurrent execute operations.
*
- * @param size the new size of the threadpool
+ * @param size the new size of the thread pool
*/
public void setOperationInvokerThreadPoolSize(int size) {
configuration.put(OPERATION_INVOKER_THREADCOUNT_PROP, Integer.valueOf(size));
12 years, 5 months