[rhq] Branch 'feature/cassandra-backend' - 5 commits - modules/common modules/enterprise
by snegrea
modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java | 2
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DateTimeService.java | 8
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java | 13
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsConfiguration.java | 13
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java | 8
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java | 715 +---------
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ScrollableDataSource.java | 38
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/Telemetry.java | 57
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/AbstractMigrationWorker.java | 123 +
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/AggregateDataMigrator.java | 262 +++
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/CallableMigrationWorker.java | 38
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/DeleteAllData.java | 89 +
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/MetricsIndexUpdateAccumulator.java | 167 ++
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/MigrationQuery.java | 71
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/RawDataMigrator.java | 250 +++
modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java | 1
16 files changed, 1227 insertions(+), 628 deletions(-)
New commits:
commit d78491ca24d15ec3ccab6fb5a3063fc3a601a3a6
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Thu May 23 01:12:07 2013 -0500
Split the big data migrator class and decouple functionality from the big main class.
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
index 952c454..9578e73 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
@@ -20,40 +20,18 @@
package org.rhq.server.metrics.migrator;
-import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Set;
-
import javax.persistence.EntityManager;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
-import com.datastax.driver.core.querybuilder.Batch;
-import com.datastax.driver.core.querybuilder.QueryBuilder;
-import org.apache.commons.lang.time.StopWatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.hibernate.Query;
-import org.hibernate.StatelessSession;
-import org.joda.time.DateTime;
-import org.joda.time.Duration;
-
-import org.rhq.server.metrics.DateTimeService;
-import org.rhq.server.metrics.MetricsConfiguration;
-import org.rhq.server.metrics.domain.AggregateType;
+
import org.rhq.server.metrics.domain.MetricsTable;
+import org.rhq.server.metrics.migrator.workers.AggregateDataMigrator;
+import org.rhq.server.metrics.migrator.workers.CallableMigrationWorker;
+import org.rhq.server.metrics.migrator.workers.DeleteAllData;
+import org.rhq.server.metrics.migrator.workers.RawDataMigrator;
/**
@@ -66,79 +44,21 @@ public class DataMigrator {
Postgres, Oracle
}
- private final Log log = LogFactory.getLog(DataMigrator.class);
-
-
- private static final int MAX_RECORDS_TO_LOAD_FROM_SQL = 30000;
- private static final int MAX_RAW_BATCH_TO_CASSANDRA = 100;
- private static final int MAX_AGGREGATE_BATCH_TO_CASSANDRA = 50;
- private static final int MAX_NUMBER_OF_FAILURES = 5;
- private static final int NUMBER_OF_BATCHES_FOR_ESTIMATION = 4;
- private static final double UNDER_ESTIMATION_FACTOR = .15;
- public static final int SQL_TIMEOUT = 6000000;
-
-
- private enum MigrationQuery {
- SELECT_1H_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1H ORDER BY schedule_id, time_stamp"),
- SELECT_6H_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_6H ORDER BY schedule_id, time_stamp"),
- SELECT_1D_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D ORDER BY schedule_id, time_stamp"),
-
- DELETE_1H_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
- DELETE_6H_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
- DELETE_1D_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
-
- COUNT_1H_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
- COUNT_6H_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
- COUNT_1D_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
-
- MAX_TIMESTAMP_1H_DATA("SELECT MAX(time_stamp) FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
- MAX_TIMESTAMP_6H_DATA("SELECT MAX(time_stamp) FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
- MAX_TIMESTAMP_1D_DATA("SELECT MAX(time_stamp) FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
-
-
- COUNT_RAW("SELECT COUNT(*) FROM %s"),
- SELECT_RAW_DATA("SELECT schedule_id, time_stamp, value FROM %s ORDER BY schedule_id, time_stamp"),
- DELETE_RAW_ALL_DATA("DELETE FROM %s"),
- DELETE_RAW_ENTRY("DELETE FROM %s WHERE schedule_id = ?");
-
- public static final int SCHEDULE_INDEX = 0;
- public static final int TIMESTAMP_INDEX = 1;
- public static final int VALUE_INDEX = 2;
- public static final int MIN_VALUE_INDEX = 3;
- public static final int MAX_VALUE_INDEX = 4;
-
- private String query;
-
- private MigrationQuery(String query){
- this.query = query;
- }
-
- /**
- * @return the query
- */
- public String getQuery() {
- return query;
- }
-
- @Override
- public String toString() {
- return query;
- }
+ public enum Task {
+ Migrate, Estimate
}
- private final EntityManager entityManager;
- private final Session session;
- private final DatabaseType databaseType;
- private final boolean experimentalDataSource;
+ private interface RunnableWithException extends Runnable {
+ Exception getException();
+ }
- private boolean deleteDataImmediatelyAfterMigration;
- private boolean deleteAllDataAtEndOfMigration;
+ private final Log log = LogFactory.getLog(DataMigrator.class);
- private boolean runRawDataMigration;
- private boolean run1HAggregateDataMigration;
- private boolean run6HAggregateDataMigration;
- private boolean run1DAggregateDataMigration;
+ public static final double UNDER_ESTIMATION_FACTOR = .15;
+ public static final int SQL_TIMEOUT = 6000000;
+ public static final int MAX_NUMBER_OF_FAILURES = 5;
+ private final DataMigratorConfiguration config;
private long estimation;
public DataMigrator(EntityManager entityManager, Session session, DatabaseType databaseType) {
@@ -147,72 +67,67 @@ public class DataMigrator {
public DataMigrator(EntityManager entityManager, Session session, DatabaseType databaseType,
boolean experimentalDataSource) {
- this.entityManager = entityManager;
- this.session = session;
- this.databaseType = databaseType;
-
- this.experimentalDataSource = experimentalDataSource;
-
- this.deleteDataImmediatelyAfterMigration = false;
- this.deleteAllDataAtEndOfMigration = false;
- this.runRawDataMigration = true;
- this.run1HAggregateDataMigration = true;
- this.run6HAggregateDataMigration = true;
- this.run1DAggregateDataMigration = true;
+
+ config = new DataMigratorConfiguration(entityManager, session, databaseType, experimentalDataSource);
+ config.setDeleteDataImmediatelyAfterMigration(false);
+ config.setDeleteAllDataAtEndOfMigration(false);
+ config.setRunRawDataMigration(true);
+ config.setRun1HAggregateDataMigration(true);
+ config.setRun6HAggregateDataMigration(true);
+ config.setRun1DAggregateDataMigration(true);
}
public void runRawDataMigration(boolean value) {
- this.runRawDataMigration = value;
+ config.setRunRawDataMigration(value);
}
public void run1HAggregateDataMigration(boolean value) {
- this.run1HAggregateDataMigration = value;
+ config.setRun1HAggregateDataMigration(value);
}
public void run6HAggregateDataMigration(boolean value) {
- this.run6HAggregateDataMigration = value;
+ config.setRun6HAggregateDataMigration(value);
}
public void run1DAggregateDataMigration(boolean value) {
- this.run1DAggregateDataMigration = value;
+ config.setRun1DAggregateDataMigration(value);
}
-
public void deleteDataImmediatelyAfterMigration() {
- this.deleteDataImmediatelyAfterMigration = true;
- this.deleteAllDataAtEndOfMigration = false;
+ config.setDeleteDataImmediatelyAfterMigration(true);
+ config.setDeleteAllDataAtEndOfMigration(false);
}
public void deleteAllDataAtEndOfMigration() {
- this.deleteAllDataAtEndOfMigration = true;
- this.deleteDataImmediatelyAfterMigration = false;
+ config.setDeleteAllDataAtEndOfMigration(true);
+ config.setDeleteDataImmediatelyAfterMigration(false);
}
public void preserveData() {
- this.deleteAllDataAtEndOfMigration = false;
- this.deleteDataImmediatelyAfterMigration = false;
+ config.setDeleteAllDataAtEndOfMigration(false);
+ config.setDeleteDataImmediatelyAfterMigration(false);
}
public long estimate() throws Exception {
this.estimation = 0;
- if (runRawDataMigration) {
- retryOnFailure(new RawDataMigrator(), Task.Estimate);
+ if (config.isRunRawDataMigration()) {
+ retryOnFailure(new RawDataMigrator(config), Task.Estimate);
}
- if (run1HAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.ONE_HOUR), Task.Estimate);
+ if (config.isRun1HAggregateDataMigration()) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.ONE_HOUR, config), Task.Estimate);
}
- if (run6HAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.SIX_HOUR), Task.Estimate);
+ if (config.isRun6HAggregateDataMigration()) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.SIX_HOUR, config), Task.Estimate);
}
- if (run1DAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.TWENTY_FOUR_HOUR), Task.Estimate);
+ if (config.isRun1DAggregateDataMigration()) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.TWENTY_FOUR_HOUR, config), Task.Estimate);
}
- if (deleteAllDataAtEndOfMigration) {
- retryOnFailure(new DeleteAllData(), Task.Estimate);
+ if (config.isDeleteAllDataAtEndOfMigration()) {
+ retryOnFailure(new DeleteAllData(config), Task.Estimate);
}
estimation = (long) (estimation + estimation * UNDER_ESTIMATION_FACTOR);
@@ -221,30 +136,30 @@ public class DataMigrator {
}
public void migrateData() throws Exception {
- if (runRawDataMigration) {
- retryOnFailure(new RawDataMigrator(), Task.Migrate);
+ if (config.isRunRawDataMigration()) {
+ retryOnFailure(new RawDataMigrator(config), Task.Migrate);
}
- if (run1HAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.ONE_HOUR), Task.Migrate);
+ if (config.isRun1HAggregateDataMigration()) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.ONE_HOUR, config), Task.Migrate);
}
- if (run6HAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.SIX_HOUR), Task.Migrate);
+ if (config.isRun6HAggregateDataMigration()) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.SIX_HOUR, config), Task.Migrate);
}
- if (run1DAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.TWENTY_FOUR_HOUR), Task.Migrate);
+ if (config.isRun1DAggregateDataMigration()) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.TWENTY_FOUR_HOUR, config), Task.Migrate);
}
- if (deleteAllDataAtEndOfMigration) {
- retryOnFailure(new DeleteAllData(), Task.Migrate);
+ if (config.isDeleteAllDataAtEndOfMigration()) {
+ retryOnFailure(new DeleteAllData(config), Task.Migrate);
}
}
public void deleteOldData() throws Exception {
- if (deleteAllDataAtEndOfMigration) {
- retryOnFailure(new DeleteAllData(), Task.Migrate);
+ if (config.isDeleteAllDataAtEndOfMigration()) {
+ retryOnFailure(new DeleteAllData(config), Task.Migrate);
}
}
@@ -304,713 +219,91 @@ public class DataMigrator {
return localThread;
}
- /**
- * Returns a list of all the raw SQL metric tables.
- * There is no equivalent in Cassandra, all raw data is stored in a single column family.
- *
- * @return SQL raw metric tables
- */
- private String[] getRawDataTables() {
- int tableCount = 15;
- String tablePrefix = "RHQ_MEAS_DATA_NUM_R";
-
- String[] tables = new String[tableCount];
- for (int i = 0; i < tableCount; i++) {
- if (i < 10) {
- tables[i] = tablePrefix + "0" + i;
- } else {
- tables[i] = tablePrefix + i;
- }
- }
-
- return tables;
- }
+ public class DataMigratorConfiguration {
- private ExistingDataSource getExistingDataSource(String query, Task task) {
- if (Task.Migrate.equals(task)) {
- if (DatabaseType.Oracle.equals(this.databaseType)) {
- return new ScrollableDataSource(this.entityManager, this.databaseType, query);
- } else {
- if (!experimentalDataSource) {
- return new ScrollableDataSource(this.entityManager, this.databaseType, query);
- } else {
- return new ExistingPostgresDataBulkExportSource(this.entityManager, query);
- }
- }
- } else if (Task.Estimate.equals(task)) {
- int limit = MAX_RECORDS_TO_LOAD_FROM_SQL * (NUMBER_OF_BATCHES_FOR_ESTIMATION + 1);
-
- if (DatabaseType.Oracle.equals(this.databaseType)) {
- return new ScrollableDataSource(this.entityManager, this.databaseType, query, limit);
- } else {
- if (!experimentalDataSource) {
- return new ScrollableDataSource(this.entityManager, this.databaseType, query, limit);
- } else {
- return new ExistingPostgresDataBulkExportSource(this.entityManager, query, limit);
- }
- }
- }
+ private final EntityManager entityManager;
+ private final Session session;
+ private final DatabaseType databaseType;
+ private final boolean experimentalDataSource;
- return new ScrollableDataSource(this.entityManager, this.databaseType, query);
- }
+ private boolean deleteDataImmediatelyAfterMigration;
+ private boolean deleteAllDataAtEndOfMigration;
- private void prepareSQLSession(StatelessSession session) {
- if (DatabaseType.Postgres.equals(this.databaseType)) {
- log.debug("Preparing SQL connection with timeout: " + SQL_TIMEOUT);
+ private boolean runRawDataMigration;
+ private boolean run1HAggregateDataMigration;
+ private boolean run6HAggregateDataMigration;
+ private boolean run1DAggregateDataMigration;
- org.hibernate.Query query = session.createSQLQuery("SET LOCAL statement_timeout = " + SQL_TIMEOUT);
- query.setReadOnly(true);
- query.executeUpdate();
+ public DataMigratorConfiguration(EntityManager entityManager, Session session, DatabaseType databaseType,
+ boolean experimentalDataSource) {
+ this.entityManager = entityManager;
+ this.session = session;
+ this.databaseType = databaseType;
+ this.experimentalDataSource = experimentalDataSource;
}
- }
- private StatelessSession getSQLSession() {
- StatelessSession session = ((org.hibernate.Session) this.entityManager.getDelegate()).getSessionFactory()
- .openStatelessSession();
-
- prepareSQLSession(session);
-
- return session;
- }
-
- private void closeSQLSession(StatelessSession session) {
- try {
- if (session != null) {
- session.close();
- }
- } catch (Exception e) {
- log.debug("Unable to close SQL stateless session. " + e);
+ public boolean isDeleteDataImmediatelyAfterMigration() {
+ return deleteDataImmediatelyAfterMigration;
}
- }
-
- private enum Task {
- Migrate, Estimate
- }
-
- private class Telemetry {
- private StopWatch generalTimer;
- private StopWatch migrationTimer;
-
- public Telemetry() {
- this.generalTimer = new StopWatch();
- this.migrationTimer = new StopWatch();
+ private void setDeleteDataImmediatelyAfterMigration(boolean deleteDataImmediatelyAfterMigration) {
+ this.deleteDataImmediatelyAfterMigration = deleteDataImmediatelyAfterMigration;
}
- public StopWatch getGeneralTimer() {
- return generalTimer;
+ public boolean isDeleteAllDataAtEndOfMigration() {
+ return deleteAllDataAtEndOfMigration;
}
- public StopWatch getMigrationTimer() {
- return migrationTimer;
+ private void setDeleteAllDataAtEndOfMigration(boolean deleteAllDataAtEndOfMigration) {
+ this.deleteAllDataAtEndOfMigration = deleteAllDataAtEndOfMigration;
}
- public long getMigrationTime() {
- return migrationTimer.getTime();
+ public boolean isRunRawDataMigration() {
+ return runRawDataMigration;
}
- public long getGeneralTime() {
- return generalTimer.getTime();
+ private void setRunRawDataMigration(boolean runRawDataMigration) {
+ this.runRawDataMigration = runRawDataMigration;
}
- public long getNonMigrationTime() {
- return this.getGeneralTime() - this.getMigrationTime();
+ public boolean isRun1HAggregateDataMigration() {
+ return run1HAggregateDataMigration;
}
- }
- private class MetricsIndexUpdateAccumulator {
- private static final int MAX_SIZE = 3000;
-
- private final DateTimeService dateTimeService = new DateTimeService();
- private final MetricsConfiguration configuration = new MetricsConfiguration();
-
- private final Map<Integer, Set<Long>> accumulator = new HashMap<Integer, Set<Long>>();
- private final long timeLimit;
- private final MetricsTable table;
- private final PreparedStatement updateMetricsIndex;
- private final Duration sliceDuration;
- private final boolean validAccumulatorTable;
-
- private int currentCount = 0;
-
- public MetricsIndexUpdateAccumulator(MetricsTable table) {
- this.table = table;
-
- if (MetricsTable.RAW.equals(table) || MetricsTable.ONE_HOUR.equals(table)
- || MetricsTable.SIX_HOUR.equals(table)) {
- this.sliceDuration = configuration.getTimeSliceDuration(table);
- this.timeLimit = this.getLastAggregationTime(table) - this.sliceDuration.getMillis();
- this.updateMetricsIndex = session.prepare("INSERT INTO " + MetricsTable.INDEX.getTableName()
- + " (bucket, time, schedule_id) VALUES (?, ?, ?)");
- this.validAccumulatorTable = true;
- } else {
- this.timeLimit = Integer.MAX_VALUE;
- this.updateMetricsIndex = null;
- this.sliceDuration = null;
- this.validAccumulatorTable = false;
- }
+ private void setRun1HAggregateDataMigration(boolean run1hAggregateDataMigration) {
+ run1HAggregateDataMigration = run1hAggregateDataMigration;
}
- public void add(int scheduleId, long timestamp) throws Exception {
- if (validAccumulatorTable && timeLimit <= timestamp) {
- long alignedTimeSlice = dateTimeService.getTimeSlice(timestamp, sliceDuration).getMillis();
-
- if (accumulator.containsKey(scheduleId)) {
- Set<Long> timestamps = accumulator.get(scheduleId);
- if (!timestamps.contains(alignedTimeSlice)) {
- timestamps.add(alignedTimeSlice);
-
- currentCount++;
- }
- } else {
- Set<Long> timestamps = new HashSet<Long>();
- timestamps.add(timestamp);
- accumulator.put(scheduleId, timestamps);
-
- currentCount++;
- }
- }
-
- if (currentCount > MAX_SIZE) {
- drain();
- }
+ public boolean isRun6HAggregateDataMigration() {
+ return run6HAggregateDataMigration;
}
- public void drain() throws Exception {
- if (log.isDebugEnabled()) {
- log.debug("Draining metrics index accumulator with " + currentCount + " entries");
- }
-
- List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
-
- for (Map.Entry<Integer, Set<Long>> entry : accumulator.entrySet()) {
- for (Long timestamp : entry.getValue()) {
- BoundStatement statement = updateMetricsIndex.bind(this.table.getTableName(), new Date(timestamp),
- entry.getKey());
- resultSetFutures.add(session.executeAsync(statement));
- }
- }
-
- for (ResultSetFuture future : resultSetFutures) {
- future.get();
- }
-
- accumulator.clear();
- currentCount = 0;
+ private void setRun6HAggregateDataMigration(boolean run6hAggregateDataMigration) {
+ run6HAggregateDataMigration = run6hAggregateDataMigration;
}
- private long getLastAggregationTime(MetricsTable migratedTable) {
- StatelessSession session = getSQLSession();
-
- long aggregationSlice = -1;
- Duration duration = null;
- String queryString = null;
-
- if (MetricsTable.RAW.equals(migratedTable)) {
- duration = configuration.getRawTimeSliceDuration();
- queryString = MigrationQuery.MAX_TIMESTAMP_1H_DATA.toString();
- } else if (MetricsTable.ONE_HOUR.equals(migratedTable)) {
- duration = configuration.getOneHourTimeSliceDuration();
- queryString = MigrationQuery.MAX_TIMESTAMP_6H_DATA.toString();
- } else if (MetricsTable.SIX_HOUR.equals(migratedTable)) {
- duration = configuration.getSixHourTimeSliceDuration();
- queryString = MigrationQuery.MAX_TIMESTAMP_1D_DATA.toString();
- }
-
- if (duration != null && queryString != null) {
- Query query = session.createSQLQuery(queryString);
- String queryResult = query.uniqueResult().toString();
- Long timestamp = Long.parseLong(queryResult);
- aggregationSlice = dateTimeService.getTimeSlice(new DateTime(timestamp), duration).getMillis();
- }
-
- closeSQLSession(session);
-
- return aggregationSlice;
+ public boolean isRun1DAggregateDataMigration() {
+ return run1DAggregateDataMigration;
}
- }
-
- private interface CallableMigrationWorker {
-
- long estimate() throws Exception;
-
- void migrate() throws Exception;
- }
-
- private interface RunnableWithException extends Runnable {
- Exception getException();
- }
-
- private class AggregateDataMigrator implements CallableMigrationWorker {
-
- private final String selectQuery;
- private final String deleteQuery;
- private final String countQuery;
- private final MetricsTable metricsTable;
- private final MetricsIndexUpdateAccumulator metricsIndexAccumulator;
-
- /**
- * @param query
- * @param metricsTable
- */
- public AggregateDataMigrator(MetricsTable metricsTable) throws Exception {
- this.metricsTable = metricsTable;
-
- if (MetricsTable.ONE_HOUR.equals(this.metricsTable)) {
- this.selectQuery = MigrationQuery.SELECT_1H_DATA.toString();
- this.deleteQuery = MigrationQuery.DELETE_1H_DATA.toString();
- this.countQuery = MigrationQuery.COUNT_1H_DATA.toString();
- } else if (MetricsTable.SIX_HOUR.equals(this.metricsTable)) {
- this.selectQuery = MigrationQuery.SELECT_6H_DATA.toString();
- this.deleteQuery = MigrationQuery.DELETE_6H_DATA.toString();
- this.countQuery = MigrationQuery.COUNT_6H_DATA.toString();
- } else if (MetricsTable.TWENTY_FOUR_HOUR.equals(this.metricsTable)) {
- this.selectQuery = MigrationQuery.SELECT_1D_DATA.toString();
- this.deleteQuery = MigrationQuery.DELETE_1D_DATA.toString();
- this.countQuery = MigrationQuery.COUNT_1D_DATA.toString();
- } else {
- throw new Exception("MetricsTable " + metricsTable.toString() + " not supported by this migrator.");
- }
- metricsIndexAccumulator = new MetricsIndexUpdateAccumulator(metricsTable);
+ private void setRun1DAggregateDataMigration(boolean run1dAggregateDataMigration) {
+ run1DAggregateDataMigration = run1dAggregateDataMigration;
}
- @Override
- public long estimate() throws Exception {
- long recordCount = this.getRowCount(this.countQuery);
- log.debug("Retrieved record count for table " + metricsTable.toString() + " -- " + recordCount);
-
- Telemetry telemetry = this.performMigration(Task.Estimate);
- long estimatedTimeToMigrate = telemetry.getMigrationTime();
-
- long estimation = (recordCount / (long) MAX_RECORDS_TO_LOAD_FROM_SQL / (long) NUMBER_OF_BATCHES_FOR_ESTIMATION)
- * estimatedTimeToMigrate;
-
- estimation += telemetry.getNonMigrationTime();
-
- return estimation;
+ public EntityManager getEntityManager() {
+ return entityManager;
}
- public void migrate() throws Exception {
- performMigration(Task.Migrate);
- if (deleteDataImmediatelyAfterMigration) {
- deleteTableData();
- }
+ public Session getSession() {
+ return session;
}
- private long getRowCount(String countQuery) {
- StatelessSession session = getSQLSession();
-
- org.hibernate.Query query = session.createSQLQuery(countQuery);
- query.setReadOnly(true);
- query.setTimeout(SQL_TIMEOUT);
- long count = Long.parseLong(query.uniqueResult().toString());
-
- closeSQLSession(session);
-
- return count;
+ public DatabaseType getDatabaseType() {
+ return databaseType;
}
- private void deleteTableData() throws Exception {
- int failureCount = 0;
- while (failureCount < MAX_NUMBER_OF_FAILURES) {
- try {
- StatelessSession session = getSQLSession();
- session.getTransaction().begin();
- org.hibernate.Query nativeQuery = session.createSQLQuery(this.deleteQuery);
- nativeQuery.executeUpdate();
- session.getTransaction().commit();
- closeSQLSession(session);
- log.info("- " + metricsTable.toString() + " - Cleaned -");
- } catch (Exception e) {
- log.error("Failed to delete " + metricsTable.toString()
- + " data. Attempting to delete data one more time...");
-
- failureCount++;
- if (failureCount == MAX_NUMBER_OF_FAILURES) {
- throw e;
- }
- }
- }
- }
-
- private Telemetry performMigration(Task task) throws Exception {
- Telemetry telemetry = new Telemetry();
- telemetry.getGeneralTimer().start();
-
- long numberOfBatchesMigrated = 0;
-
- List<Object[]> existingData;
- int failureCount;
-
- int lastMigratedRecord = 0;
- ExistingDataSource dataSource = getExistingDataSource(selectQuery, task);
- dataSource.initialize();
-
- telemetry.getMigrationTimer().start();
- while (true) {
- existingData = dataSource.getData(lastMigratedRecord, MAX_RECORDS_TO_LOAD_FROM_SQL);
-
- if (existingData.size() == 0) {
- break;
- }
-
- lastMigratedRecord += existingData.size();
-
- failureCount = 0;
- while (failureCount < MAX_NUMBER_OF_FAILURES) {
- try {
- insertDataToCassandra(existingData);
- break;
- } catch (Exception e) {
- log.error("Failed to insert " + metricsTable.toString()
- + " data. Attempting to insert the current batch of data one more time");
- log.error(e);
-
- failureCount++;
- if (failureCount == MAX_NUMBER_OF_FAILURES) {
- throw e;
- }
- }
- }
-
- log.info("- " + metricsTable + " - " + lastMigratedRecord + " -");
-
- numberOfBatchesMigrated++;
- if (Task.Estimate.equals(task) && numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
- break;
- }
- }
-
- metricsIndexAccumulator.drain();
-
- telemetry.getMigrationTimer().stop();
-
- dataSource.close();
- telemetry.getGeneralTimer().stop();
-
- return telemetry;
- }
-
- private void insertDataToCassandra(List<Object[]> existingData)
- throws Exception {
- List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
- Batch batch = QueryBuilder.batch();
- int batchSize = 0;
-
- //only need approximate TTL to speed up processing
- //given that each batch is processed within seconds, getting the
- //system time once per batch has minimal impact on the record retention
- long creationTimeMillis;
- long itemTTLSeconds;
- long currentTimeMillis = System.currentTimeMillis();
- long expectedTTLMillis = metricsTable.getTTLinMilliseconds();
-
-
- for (Object[] rawMeasurement : existingData) {
- creationTimeMillis = Long.parseLong(rawMeasurement[MigrationQuery.TIMESTAMP_INDEX].toString());
- itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
-
- if(itemTTLSeconds > 0 ){
- int scheduleId = Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString());
- Date time = new Date(creationTimeMillis);
-
- batch.add(QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id", scheduleId)
- .value("time", time)
- .value("type", AggregateType.AVG.ordinal())
- .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.VALUE_INDEX].toString()))
- .using(ttl((int) itemTTLSeconds)));
-
- batch.add(QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id", scheduleId)
- .value("time", time)
- .value("type", AggregateType.MIN.ordinal())
- .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MIN_VALUE_INDEX].toString()))
- .using(ttl((int) itemTTLSeconds)));
-
- batch.add(QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id", scheduleId)
- .value("time", time)
- .value("type", AggregateType.MAX.ordinal())
- .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MAX_VALUE_INDEX].toString()))
- .using(ttl((int) itemTTLSeconds)));
-
- batchSize += 3;
-
- metricsIndexAccumulator.add(scheduleId, creationTimeMillis);
- }
-
- if (batchSize >= MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
- resultSetFutures.add(session.executeAsync(batch));
- batch = QueryBuilder.batch();
- batchSize = 0;
- }
- }
-
- if (batchSize != 0) {
- resultSetFutures.add(session.executeAsync(batch));
- }
-
- for (ResultSetFuture future : resultSetFutures) {
- future.get();
- }
- }
- }
-
-
- private class RawDataMigrator implements CallableMigrationWorker {
-
- private final Queue<String> tablesNotProcessed = new LinkedList<String>(Arrays.asList(getRawDataTables()));
- private final MetricsIndexUpdateAccumulator metricsIndexAccumulator;
-
- public RawDataMigrator() {
- this.metricsIndexAccumulator = new MetricsIndexUpdateAccumulator(MetricsTable.RAW);
- }
-
- public long estimate() throws Exception {
- long recordCount = 0;
- for (String table : getRawDataTables()) {
- String countQuery = String.format(MigrationQuery.COUNT_RAW.toString(), table);
- long tableRecordCount = this.getRowCount(countQuery);
-
- log.debug("Retrieved record count for table " + table + " -- " + tableRecordCount);
-
- recordCount += tableRecordCount;
- }
-
- Telemetry telemetry = this.performMigration(Task.Estimate);
- long estimatedTimeToMigrate = telemetry.getMigrationTime();
- long estimation = (recordCount / (long) MAX_RECORDS_TO_LOAD_FROM_SQL / (long) NUMBER_OF_BATCHES_FOR_ESTIMATION)
- * estimatedTimeToMigrate;
- estimation += telemetry.getNonMigrationTime();
-
- return estimation;
- }
-
- public void migrate() throws Exception {
- performMigration(Task.Migrate);
- }
-
- private long getRowCount(String countQuery) {
- StatelessSession session = getSQLSession();
-
- org.hibernate.Query query = session.createSQLQuery(countQuery);
- query.setReadOnly(true);
- query.setTimeout(SQL_TIMEOUT);
-
- long count = Long.parseLong(query.uniqueResult().toString());
-
- closeSQLSession(session);
-
- return count;
- }
-
- private Telemetry performMigration(Task task) throws Exception {
- Telemetry telemetry = new Telemetry();
- telemetry.getGeneralTimer().start();
-
- long numberOfBatchesMigrated = 0;
-
- List<Object[]> existingData;
- int failureCount;
-
- telemetry.getMigrationTimer().start();
- telemetry.getMigrationTimer().suspend();
-
- while (!tablesNotProcessed.isEmpty()) {
- String table = tablesNotProcessed.peek();
-
- String selectQuery = String.format(MigrationQuery.SELECT_RAW_DATA.toString(), table);
-
- ExistingDataSource dataSource = getExistingDataSource(selectQuery, task);
- dataSource.initialize();
-
- log.info("Start migrating raw table: " + table);
-
- telemetry.getMigrationTimer().resume();
- int lastMigratedRecord = 0;
- while (true) {
- existingData = dataSource.getData(lastMigratedRecord, MAX_RECORDS_TO_LOAD_FROM_SQL);
-
- if (existingData.size() == 0) {
- break;
- }
-
- lastMigratedRecord += existingData.size();
-
- failureCount = 0;
- while (failureCount < MAX_NUMBER_OF_FAILURES) {
- try {
- insertDataToCassandra(existingData);
- break;
- } catch (Exception e) {
- log.error("Failed to insert " + MetricsTable.RAW.toString()
- + " data. Attempting to insert the current batch of data one more time");
- log.error(e);
-
-
- failureCount++;
- if (failureCount == MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
- throw e;
- }
- }
- }
-
- log.info("- " + table + " - " + lastMigratedRecord + " -");
-
- numberOfBatchesMigrated++;
- if (Task.Estimate.equals(task) && numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
- break;
- }
- }
- telemetry.getMigrationTimer().suspend();
-
- if (Task.Migrate.equals(task)) {
- log.info("Done migrating raw table" + table + "---------------------");
-
- if (deleteDataImmediatelyAfterMigration) {
- deleteTableData(table);
- }
- } else if (numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
- break;
- }
-
- dataSource.close();
- tablesNotProcessed.poll();
- }
-
- telemetry.getMigrationTimer().resume();
- metricsIndexAccumulator.drain();
- telemetry.getMigrationTimer().suspend();
-
- telemetry.getGeneralTimer().stop();
- return telemetry;
- }
-
- private void deleteTableData(String table) throws Exception {
- String deleteQuery = String.format(MigrationQuery.DELETE_RAW_ENTRY.toString(), table);
- int failureCount = 0;
- while (failureCount < MAX_NUMBER_OF_FAILURES) {
- try {
- StatelessSession session = getSQLSession();
- session.getTransaction().begin();
- org.hibernate.Query nativeQuery = session.createSQLQuery(deleteQuery);
- nativeQuery.executeUpdate();
- session.getTransaction().commit();
- closeSQLSession(session);
- log.info("- " + table + " - Cleaned -");
- } catch (Exception e) {
- log.error("Failed to delete " + table + " data. Attempting to delete data one more time...");
-
- failureCount++;
- if (failureCount == MAX_NUMBER_OF_FAILURES) {
- throw e;
- }
- }
- }
- }
-
- private void insertDataToCassandra(List<Object[]> existingData) throws Exception {
- List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
- Batch batch = QueryBuilder.batch();
- int batchSize = 0;
-
- //only need approximate TTL to speed up processing
- //given that each batch is processed within seconds, getting the
- //system time once per batch has minimal impact on the record retention
- long creationTimeMillis;
- long itemTTLSeconds;
- long currentTimeMillis = System.currentTimeMillis();
- long expectedTTLMillis = MetricsTable.RAW.getTTLinMilliseconds();
-
-
- for (Object[] rawDataPoint : existingData) {
- creationTimeMillis = Long.parseLong(rawDataPoint[MigrationQuery.TIMESTAMP_INDEX].toString());
- itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
-
- if (itemTTLSeconds > 0) {
- int scheduleId = Integer.parseInt(rawDataPoint[MigrationQuery.SCHEDULE_INDEX].toString());
- Date creationTime = new Date(creationTimeMillis);
-
- batch.add(QueryBuilder.insertInto(MetricsTable.RAW.toString())
- .value("schedule_id", scheduleId)
- .value("time", creationTime)
- .value("value", Double.parseDouble(rawDataPoint[MigrationQuery.VALUE_INDEX].toString()))
- .using(ttl((int) itemTTLSeconds)));
- batchSize++;
-
- metricsIndexAccumulator.add(scheduleId, creationTimeMillis);
- }
-
- if (batchSize >= MAX_RAW_BATCH_TO_CASSANDRA) {
- resultSetFutures.add(session.executeAsync(batch));
- batch = QueryBuilder.batch();
- batchSize = 0;
- }
- }
-
- if (batchSize != 0) {
- resultSetFutures.add(session.executeAsync(batch));
- }
-
- for (ResultSetFuture future : resultSetFutures) {
- future.get();
- }
- }
- }
-
-
- private class DeleteAllData implements CallableMigrationWorker {
-
- public void migrate() {
- org.hibernate.Query nativeQuery;
-
- StatelessSession session = getSQLSession();
-
- if (run1HAggregateDataMigration) {
- session.getTransaction().begin();
- nativeQuery = session.createSQLQuery(MigrationQuery.DELETE_1H_DATA.toString());
- nativeQuery.executeUpdate();
- session.getTransaction().commit();
- log.info("- RHQ_MEASUREMENT_DATA_NUM_1H - Cleaned -");
- }
-
- if (run6HAggregateDataMigration) {
- session.getTransaction().begin();
- nativeQuery = session.createSQLQuery(MigrationQuery.DELETE_6H_DATA.toString());
- nativeQuery.executeUpdate();
- session.getTransaction().commit();
- log.info("- RHQ_MEASUREMENT_DATA_NUM_6H - Cleaned -");
- }
-
- if (run1DAggregateDataMigration) {
- session.getTransaction().begin();
- nativeQuery = session.createSQLQuery(MigrationQuery.DELETE_1D_DATA.toString());
- nativeQuery.executeUpdate();
- session.getTransaction().commit();
- log.info("- RHQ_MEASUREMENT_DATA_NUM_1D - Cleaned -");
- }
-
- if (runRawDataMigration) {
- for (String table : getRawDataTables()) {
- session.getTransaction().begin();
- String deleteAllData = String.format(MigrationQuery.DELETE_RAW_ALL_DATA.toString(), table);
- nativeQuery = session.createSQLQuery(deleteAllData);
- nativeQuery.executeUpdate();
- session.getTransaction().commit();
- log.info("- " + table + " - Cleaned -");
- }
- }
-
- closeSQLSession(session);
- }
-
- @Override
- public long estimate() throws Exception {
- return 300000; // return return 5 minutes for now without any database side checks.
+ public boolean isExperimentalDataSource() {
+ return experimentalDataSource;
}
}
}
-
-
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/Telemetry.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/Telemetry.java
new file mode 100644
index 0000000..92bf37a
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/Telemetry.java
@@ -0,0 +1,57 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2013, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator;
+
+import org.apache.commons.lang.time.StopWatch;
+
+/**
+ * @author Stefan Negrea
+ *
+ */
+public class Telemetry {
+ private StopWatch generalTimer;
+ private StopWatch migrationTimer;
+
+ public Telemetry() {
+ this.generalTimer = new StopWatch();
+ this.migrationTimer = new StopWatch();
+ }
+
+ public StopWatch getGeneralTimer() {
+ return generalTimer;
+ }
+
+ public StopWatch getMigrationTimer() {
+ return migrationTimer;
+ }
+
+ public long getMigrationTime() {
+ return migrationTimer.getTime();
+ }
+
+ public long getGeneralTime() {
+ return generalTimer.getTime();
+ }
+
+ public long getNonMigrationTime() {
+ return this.getGeneralTime() - this.getMigrationTime();
+ }
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/AbstractMigrationWorker.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/AbstractMigrationWorker.java
new file mode 100644
index 0000000..3c006be
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/AbstractMigrationWorker.java
@@ -0,0 +1,123 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2013, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator.workers;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.StatelessSession;
+
+import org.rhq.server.metrics.migrator.DataMigrator;
+import org.rhq.server.metrics.migrator.DataMigrator.DataMigratorConfiguration;
+import org.rhq.server.metrics.migrator.DataMigrator.DatabaseType;
+import org.rhq.server.metrics.migrator.DataMigrator.Task;
+import org.rhq.server.metrics.migrator.ExistingDataSource;
+import org.rhq.server.metrics.migrator.ExistingPostgresDataBulkExportSource;
+import org.rhq.server.metrics.migrator.ScrollableDataSource;
+
+/**
+ * @author Stefan Negrea
+ *
+ */
+public abstract class AbstractMigrationWorker {
+ private final Log log = LogFactory.getLog(AbstractMigrationWorker.class);
+
+ /**
+ * Returns a list of all the raw SQL metric tables.
+ * There is no equivalent in Cassandra, all raw data is stored in a single column family.
+ *
+ * @return SQL raw metric tables
+ */
+ protected static String[] getRawDataTables() {
+ int tableCount = 15;
+ String tablePrefix = "RHQ_MEAS_DATA_NUM_R";
+
+ String[] tables = new String[tableCount];
+ for (int i = 0; i < tableCount; i++) {
+ if (i < 10) {
+ tables[i] = tablePrefix + "0" + i;
+ } else {
+ tables[i] = tablePrefix + i;
+ }
+ }
+
+ return tables;
+ }
+
+ protected ExistingDataSource getExistingDataSource(String query, Task task, DataMigratorConfiguration config) {
+ if (Task.Migrate.equals(task)) {
+ if (DatabaseType.Oracle.equals(config.getDatabaseType())) {
+ return new ScrollableDataSource(config.getEntityManager(), config.getDatabaseType(), query);
+ } else {
+ if (!config.isExperimentalDataSource()) {
+ return new ScrollableDataSource(config.getEntityManager(), config.getDatabaseType(), query);
+ } else {
+ return new ExistingPostgresDataBulkExportSource(config.getEntityManager(), query);
+ }
+ }
+ } else if (Task.Estimate.equals(task)) {
+ int limit = CallableMigrationWorker.MAX_RECORDS_TO_LOAD_FROM_SQL
+ * (CallableMigrationWorker.NUMBER_OF_BATCHES_FOR_ESTIMATION + 1);
+
+ if (DatabaseType.Oracle.equals(config.getDatabaseType())) {
+ return new ScrollableDataSource(config.getEntityManager(), config.getDatabaseType(), query, limit);
+ } else {
+ if (!config.isExperimentalDataSource()) {
+ return new ScrollableDataSource(config.getEntityManager(), config.getDatabaseType(), query, limit);
+ } else {
+ return new ExistingPostgresDataBulkExportSource(config.getEntityManager(), query, limit);
+ }
+ }
+ }
+
+ return new ScrollableDataSource(config.getEntityManager(), config.getDatabaseType(), query);
+ }
+
+ protected void prepareSQLSession(StatelessSession session, DataMigratorConfiguration config) {
+ if (DatabaseType.Postgres.equals(config.getDatabaseType())) {
+ log.debug("Preparing SQL connection with timeout: " + DataMigrator.SQL_TIMEOUT);
+
+ org.hibernate.Query query = session.createSQLQuery("SET LOCAL statement_timeout = "
+ + DataMigrator.SQL_TIMEOUT);
+ query.setReadOnly(true);
+ query.executeUpdate();
+ }
+ }
+
+ protected StatelessSession getSQLSession(DataMigratorConfiguration config) {
+ StatelessSession session = ((org.hibernate.Session) config.getEntityManager().getDelegate())
+ .getSessionFactory().openStatelessSession();
+
+ prepareSQLSession(session, config);
+
+ return session;
+ }
+
+ protected void closeSQLSession(StatelessSession session) {
+ try {
+ if (session != null) {
+ session.close();
+ }
+ } catch (Exception e) {
+ //log.debug("Unable to close SQL stateless session. " + e);
+ }
+ }
+
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/AggregateDataMigrator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/AggregateDataMigrator.java
new file mode 100644
index 0000000..5511321
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/AggregateDataMigrator.java
@@ -0,0 +1,262 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2013, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator.workers;
+
+import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+import com.datastax.driver.core.ResultSetFuture;
+import com.datastax.driver.core.querybuilder.Batch;
+import com.datastax.driver.core.querybuilder.QueryBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.StatelessSession;
+
+import org.rhq.server.metrics.domain.AggregateType;
+import org.rhq.server.metrics.domain.MetricsTable;
+import org.rhq.server.metrics.migrator.DataMigrator;
+import org.rhq.server.metrics.migrator.DataMigrator.DataMigratorConfiguration;
+import org.rhq.server.metrics.migrator.DataMigrator.Task;
+import org.rhq.server.metrics.migrator.ExistingDataSource;
+import org.rhq.server.metrics.migrator.Telemetry;
+
+/**
+ * @author Stefan Negrea
+ *
+ */
+public class AggregateDataMigrator extends AbstractMigrationWorker implements CallableMigrationWorker {
+
+ private final Log log = LogFactory.getLog(AggregateDataMigrator.class);
+
+ private final DataMigratorConfiguration config;
+ private final String selectQuery;
+ private final String deleteQuery;
+ private final String countQuery;
+ private final MetricsTable metricsTable;
+ private final MetricsIndexUpdateAccumulator metricsIndexAccumulator;
+
+ /**
+ * @param query
+ * @param metricsTable
+ */
+ public AggregateDataMigrator(MetricsTable metricsTable, DataMigratorConfiguration config)
+ throws Exception {
+
+ this.metricsTable = metricsTable;
+ this.config = config;
+
+ if (MetricsTable.ONE_HOUR.equals(this.metricsTable)) {
+ this.selectQuery = MigrationQuery.SELECT_1H_DATA.toString();
+ this.deleteQuery = MigrationQuery.DELETE_1H_DATA.toString();
+ this.countQuery = MigrationQuery.COUNT_1H_DATA.toString();
+ } else if (MetricsTable.SIX_HOUR.equals(this.metricsTable)) {
+ this.selectQuery = MigrationQuery.SELECT_6H_DATA.toString();
+ this.deleteQuery = MigrationQuery.DELETE_6H_DATA.toString();
+ this.countQuery = MigrationQuery.COUNT_6H_DATA.toString();
+ } else if (MetricsTable.TWENTY_FOUR_HOUR.equals(this.metricsTable)) {
+ this.selectQuery = MigrationQuery.SELECT_1D_DATA.toString();
+ this.deleteQuery = MigrationQuery.DELETE_1D_DATA.toString();
+ this.countQuery = MigrationQuery.COUNT_1D_DATA.toString();
+ } else {
+ throw new Exception("MetricsTable " + metricsTable.toString() + " not supported by this migrator.");
+ }
+
+ metricsIndexAccumulator = new MetricsIndexUpdateAccumulator(metricsTable, config);
+ }
+
+ @Override
+ public long estimate() throws Exception {
+ long recordCount = this.getRowCount(this.countQuery);
+ log.debug("Retrieved record count for table " + metricsTable.toString() + " -- " + recordCount);
+
+ Telemetry telemetry = this.performMigration(Task.Estimate);
+ long estimatedTimeToMigrate = telemetry.getMigrationTime();
+
+ long estimation = (recordCount / (long) MAX_RECORDS_TO_LOAD_FROM_SQL / (long) NUMBER_OF_BATCHES_FOR_ESTIMATION)
+ * estimatedTimeToMigrate;
+
+ estimation += telemetry.getNonMigrationTime();
+
+ return estimation;
+ }
+
+ public void migrate() throws Exception {
+ performMigration(Task.Migrate);
+ if (config.isDeleteDataImmediatelyAfterMigration()) {
+ deleteTableData();
+ }
+ }
+
+ private long getRowCount(String countQuery) {
+ StatelessSession session = getSQLSession(config);
+
+ org.hibernate.Query query = session.createSQLQuery(countQuery);
+ query.setReadOnly(true);
+ query.setTimeout(DataMigrator.SQL_TIMEOUT);
+ long count = Long.parseLong(query.uniqueResult().toString());
+
+ closeSQLSession(session);
+
+ return count;
+ }
+
+ private void deleteTableData() throws Exception {
+ int failureCount = 0;
+ while (failureCount < MAX_NUMBER_OF_FAILURES) {
+ try {
+ StatelessSession session = getSQLSession(config);
+ session.getTransaction().begin();
+ org.hibernate.Query nativeQuery = session.createSQLQuery(this.deleteQuery);
+ nativeQuery.executeUpdate();
+ session.getTransaction().commit();
+ closeSQLSession(session);
+ log.info("- " + metricsTable.toString() + " - Cleaned -");
+ } catch (Exception e) {
+ log.error("Failed to delete " + metricsTable.toString()
+ + " data. Attempting to delete data one more time...");
+
+ failureCount++;
+ if (failureCount == MAX_NUMBER_OF_FAILURES) {
+ throw e;
+ }
+ }
+ }
+ }
+
+ private Telemetry performMigration(Task task) throws Exception {
+ Telemetry telemetry = new Telemetry();
+ telemetry.getGeneralTimer().start();
+
+ long numberOfBatchesMigrated = 0;
+
+ List<Object[]> existingData;
+ int failureCount;
+
+ int lastMigratedRecord = 0;
+ ExistingDataSource dataSource = getExistingDataSource(selectQuery, task, config);
+ dataSource.initialize();
+
+ telemetry.getMigrationTimer().start();
+ while (true) {
+ existingData = dataSource.getData(lastMigratedRecord, MAX_RECORDS_TO_LOAD_FROM_SQL);
+
+ if (existingData.size() == 0) {
+ break;
+ }
+
+ lastMigratedRecord += existingData.size();
+
+ failureCount = 0;
+ while (failureCount < MAX_NUMBER_OF_FAILURES) {
+ try {
+ insertDataToCassandra(existingData);
+ break;
+ } catch (Exception e) {
+ log.error("Failed to insert " + metricsTable.toString()
+ + " data. Attempting to insert the current batch of data one more time");
+ log.error(e);
+
+ failureCount++;
+ if (failureCount == MAX_NUMBER_OF_FAILURES) {
+ throw e;
+ }
+ }
+ }
+
+ log.info("- " + metricsTable + " - " + lastMigratedRecord + " -");
+
+ numberOfBatchesMigrated++;
+ if (Task.Estimate.equals(task) && numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
+ break;
+ }
+ }
+
+ metricsIndexAccumulator.drain();
+
+ telemetry.getMigrationTimer().stop();
+
+ dataSource.close();
+ telemetry.getGeneralTimer().stop();
+
+ return telemetry;
+ }
+
+ private void insertDataToCassandra(List<Object[]> existingData) throws Exception {
+ List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
+ Batch batch = QueryBuilder.batch();
+ int batchSize = 0;
+
+ //only need approximate TTL to speed up processing
+ //given that each batch is processed within seconds, getting the
+ //system time once per batch has minimal impact on the record retention
+ long creationTimeMillis;
+ long itemTTLSeconds;
+ long currentTimeMillis = System.currentTimeMillis();
+ long expectedTTLMillis = metricsTable.getTTLinMilliseconds();
+
+ for (Object[] rawMeasurement : existingData) {
+ creationTimeMillis = Long.parseLong(rawMeasurement[MigrationQuery.TIMESTAMP_INDEX].toString());
+ itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
+
+ if (itemTTLSeconds > 0) {
+ int scheduleId = Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString());
+ Date time = new Date(creationTimeMillis);
+
+ batch.add(QueryBuilder.insertInto(metricsTable.toString()).value("schedule_id", scheduleId)
+ .value("time", time).value("type", AggregateType.AVG.ordinal())
+ .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.VALUE_INDEX].toString()))
+ .using(ttl((int) itemTTLSeconds)));
+
+ batch.add(QueryBuilder.insertInto(metricsTable.toString()).value("schedule_id", scheduleId)
+ .value("time", time).value("type", AggregateType.MIN.ordinal())
+ .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MIN_VALUE_INDEX].toString()))
+ .using(ttl((int) itemTTLSeconds)));
+
+ batch.add(QueryBuilder.insertInto(metricsTable.toString()).value("schedule_id", scheduleId)
+ .value("time", time).value("type", AggregateType.MAX.ordinal())
+ .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MAX_VALUE_INDEX].toString()))
+ .using(ttl((int) itemTTLSeconds)));
+
+ batchSize += 3;
+
+ metricsIndexAccumulator.add(scheduleId, creationTimeMillis);
+ }
+
+ if (batchSize >= MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
+ resultSetFutures.add(config.getSession().executeAsync(batch));
+ batch = QueryBuilder.batch();
+ batchSize = 0;
+ }
+ }
+
+ if (batchSize != 0) {
+ resultSetFutures.add(config.getSession().executeAsync(batch));
+ }
+
+ for (ResultSetFuture future : resultSetFutures) {
+ future.get();
+ }
+ }
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/CallableMigrationWorker.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/CallableMigrationWorker.java
new file mode 100644
index 0000000..05a7ac6
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/CallableMigrationWorker.java
@@ -0,0 +1,38 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2013, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator.workers;
+
+/**
+ * @author Stefan Negrea
+ *
+ */
+public interface CallableMigrationWorker {
+
+ public static final int MAX_RECORDS_TO_LOAD_FROM_SQL = 30000;
+ public static final int MAX_RAW_BATCH_TO_CASSANDRA = 100;
+ public static final int MAX_AGGREGATE_BATCH_TO_CASSANDRA = 50;
+ public static final int NUMBER_OF_BATCHES_FOR_ESTIMATION = 4;
+ public static final int MAX_NUMBER_OF_FAILURES = 5;
+
+ long estimate() throws Exception;
+
+ void migrate() throws Exception;
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/DeleteAllData.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/DeleteAllData.java
new file mode 100644
index 0000000..c1cb62c
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/DeleteAllData.java
@@ -0,0 +1,89 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2013, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator.workers;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.StatelessSession;
+
+import org.rhq.server.metrics.migrator.DataMigrator.DataMigratorConfiguration;
+
+/**
+ * @author Stefan Negrea
+ *
+ */
+public class DeleteAllData extends AbstractMigrationWorker implements CallableMigrationWorker {
+
+ private final Log log = LogFactory.getLog(DeleteAllData.class);
+
+ private final DataMigratorConfiguration config;
+
+ public DeleteAllData(DataMigratorConfiguration config) {
+ this.config = config;
+ }
+
+ public void migrate() {
+ org.hibernate.Query nativeQuery;
+ StatelessSession session = getSQLSession(config);
+
+ if (config.isRun1HAggregateDataMigration()) {
+ session.getTransaction().begin();
+ nativeQuery = session.createSQLQuery(MigrationQuery.DELETE_1H_DATA.toString());
+ nativeQuery.executeUpdate();
+ session.getTransaction().commit();
+ log.info("- RHQ_MEASUREMENT_DATA_NUM_1H - Cleaned -");
+ }
+
+ if (config.isRun6HAggregateDataMigration()) {
+ session.getTransaction().begin();
+ nativeQuery = session.createSQLQuery(MigrationQuery.DELETE_6H_DATA.toString());
+ nativeQuery.executeUpdate();
+ session.getTransaction().commit();
+ log.info("- RHQ_MEASUREMENT_DATA_NUM_6H - Cleaned -");
+ }
+
+ if (config.isRun1DAggregateDataMigration()) {
+ session.getTransaction().begin();
+ nativeQuery = session.createSQLQuery(MigrationQuery.DELETE_1D_DATA.toString());
+ nativeQuery.executeUpdate();
+ session.getTransaction().commit();
+ log.info("- RHQ_MEASUREMENT_DATA_NUM_1D - Cleaned -");
+ }
+
+ if (config.isRunRawDataMigration()) {
+ for (String table : getRawDataTables()) {
+ session.getTransaction().begin();
+ String deleteAllData = String.format(MigrationQuery.DELETE_RAW_ALL_DATA.toString(), table);
+ nativeQuery = session.createSQLQuery(deleteAllData);
+ nativeQuery.executeUpdate();
+ session.getTransaction().commit();
+ log.info("- " + table + " - Cleaned -");
+ }
+ }
+
+ closeSQLSession(session);
+ }
+
+ @Override
+ public long estimate() throws Exception {
+ return 300000; // return return 5 minutes for now without any database side checks.
+ }
+}
\ No newline at end of file
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/MetricsIndexUpdateAccumulator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/MetricsIndexUpdateAccumulator.java
new file mode 100644
index 0000000..a126d7e
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/MetricsIndexUpdateAccumulator.java
@@ -0,0 +1,167 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2013, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator.workers;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.ResultSetFuture;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.Query;
+import org.hibernate.StatelessSession;
+import org.joda.time.DateTime;
+import org.joda.time.Duration;
+
+import org.rhq.server.metrics.DateTimeService;
+import org.rhq.server.metrics.MetricsConfiguration;
+import org.rhq.server.metrics.domain.MetricsTable;
+import org.rhq.server.metrics.migrator.DataMigrator.DataMigratorConfiguration;
+
+/**
+ * @author Stefan Negrea
+ *
+ */
+public class MetricsIndexUpdateAccumulator extends AbstractMigrationWorker {
+ private static final int MAX_SIZE = 3000;
+
+ private final Log log = LogFactory.getLog(MetricsIndexUpdateAccumulator.class);
+
+ private final DateTimeService dateTimeService = new DateTimeService();
+ private final MetricsConfiguration configuration = new MetricsConfiguration();
+ private final Map<Integer, Set<Long>> accumulator = new HashMap<Integer, Set<Long>>();
+
+ private final MetricsTable table;
+ private final DataMigratorConfiguration config;
+
+ private final long timeLimit;
+ private final PreparedStatement updateMetricsIndex;
+ private final Duration sliceDuration;
+ private final boolean validAccumulatorTable;
+
+ private int currentCount = 0;
+
+ public MetricsIndexUpdateAccumulator(MetricsTable table, DataMigratorConfiguration config) {
+ this.table = table;
+ this.config = config;
+
+ if (MetricsTable.RAW.equals(table) || MetricsTable.ONE_HOUR.equals(table)
+ || MetricsTable.SIX_HOUR.equals(table)) {
+ this.sliceDuration = configuration.getTimeSliceDuration(table);
+ this.timeLimit = this.getLastAggregationTime(table) - this.sliceDuration.getMillis();
+ this.updateMetricsIndex = config.getSession().prepare(
+ "INSERT INTO " + MetricsTable.INDEX.getTableName()
+ + " (bucket, time, schedule_id) VALUES (?, ?, ?)");
+ this.validAccumulatorTable = true;
+ } else {
+ this.timeLimit = Integer.MAX_VALUE;
+ this.updateMetricsIndex = null;
+ this.sliceDuration = null;
+ this.validAccumulatorTable = false;
+ }
+ }
+
+ public void add(int scheduleId, long timestamp) throws Exception {
+ if (validAccumulatorTable && timeLimit <= timestamp) {
+ long alignedTimeSlice = dateTimeService.getTimeSlice(timestamp, sliceDuration).getMillis();
+
+ if (accumulator.containsKey(scheduleId)) {
+ Set<Long> timestamps = accumulator.get(scheduleId);
+ if (!timestamps.contains(alignedTimeSlice)) {
+ timestamps.add(alignedTimeSlice);
+
+ currentCount++;
+ }
+ } else {
+ Set<Long> timestamps = new HashSet<Long>();
+ timestamps.add(timestamp);
+ accumulator.put(scheduleId, timestamps);
+
+ currentCount++;
+ }
+ }
+
+ if (currentCount > MAX_SIZE) {
+ drain();
+ }
+ }
+
+ public void drain() throws Exception {
+ if (log.isDebugEnabled()) {
+ log.debug("Draining metrics index accumulator with " + currentCount + " entries");
+ }
+
+ List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
+
+ for (Map.Entry<Integer, Set<Long>> entry : accumulator.entrySet()) {
+ for (Long timestamp : entry.getValue()) {
+ BoundStatement statement = updateMetricsIndex.bind(this.table.getTableName(), new Date(timestamp),
+ entry.getKey());
+ resultSetFutures.add(config.getSession().executeAsync(statement));
+ }
+ }
+
+ for (ResultSetFuture future : resultSetFutures) {
+ future.get();
+ }
+
+ accumulator.clear();
+ currentCount = 0;
+ }
+
+ private long getLastAggregationTime(MetricsTable migratedTable) {
+ StatelessSession session = getSQLSession(config);
+
+ long aggregationSlice = -1;
+ Duration duration = null;
+ String queryString = null;
+
+ if (MetricsTable.RAW.equals(migratedTable)) {
+ duration = configuration.getRawTimeSliceDuration();
+ queryString = MigrationQuery.MAX_TIMESTAMP_1H_DATA.toString();
+ } else if (MetricsTable.ONE_HOUR.equals(migratedTable)) {
+ duration = configuration.getOneHourTimeSliceDuration();
+ queryString = MigrationQuery.MAX_TIMESTAMP_6H_DATA.toString();
+ } else if (MetricsTable.SIX_HOUR.equals(migratedTable)) {
+ duration = configuration.getSixHourTimeSliceDuration();
+ queryString = MigrationQuery.MAX_TIMESTAMP_1D_DATA.toString();
+ }
+
+ if (duration != null && queryString != null) {
+ Query query = session.createSQLQuery(queryString);
+ String queryResult = query.uniqueResult().toString();
+ Long timestamp = Long.parseLong(queryResult);
+ aggregationSlice = dateTimeService.getTimeSlice(new DateTime(timestamp), duration).getMillis();
+ }
+
+ closeSQLSession(session);
+
+ return aggregationSlice;
+ }
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/MigrationQuery.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/MigrationQuery.java
new file mode 100644
index 0000000..7f13349
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/MigrationQuery.java
@@ -0,0 +1,71 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2013, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator.workers;
+
+/**
+ * @author Stefan Negrea
+ *
+ */
+public enum MigrationQuery {
+ SELECT_1H_DATA(
+ "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1H ORDER BY schedule_id, time_stamp"), SELECT_6H_DATA(
+ "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_6H ORDER BY schedule_id, time_stamp"), SELECT_1D_DATA(
+ "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D ORDER BY schedule_id, time_stamp"),
+
+ DELETE_1H_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1H"), DELETE_6H_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_6H"), DELETE_1D_DATA(
+ "DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+
+ COUNT_1H_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_1H"), COUNT_6H_DATA(
+ "SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_6H"), COUNT_1D_DATA(
+ "SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+
+ MAX_TIMESTAMP_1H_DATA("SELECT MAX(time_stamp) FROM RHQ_MEASUREMENT_DATA_NUM_1H"), MAX_TIMESTAMP_6H_DATA(
+ "SELECT MAX(time_stamp) FROM RHQ_MEASUREMENT_DATA_NUM_6H"), MAX_TIMESTAMP_1D_DATA(
+ "SELECT MAX(time_stamp) FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+
+ COUNT_RAW("SELECT COUNT(*) FROM %s"), SELECT_RAW_DATA(
+ "SELECT schedule_id, time_stamp, value FROM %s ORDER BY schedule_id, time_stamp"), DELETE_RAW_ALL_DATA(
+ "DELETE FROM %s"), DELETE_RAW_ENTRY("DELETE FROM %s WHERE schedule_id = ?");
+
+ public static final int SCHEDULE_INDEX = 0;
+ public static final int TIMESTAMP_INDEX = 1;
+ public static final int VALUE_INDEX = 2;
+ public static final int MIN_VALUE_INDEX = 3;
+ public static final int MAX_VALUE_INDEX = 4;
+
+ private String query;
+
+ private MigrationQuery(String query) {
+ this.query = query;
+ }
+
+ /**
+ * @return the query
+ */
+ public String getQuery() {
+ return query;
+ }
+
+ @Override
+ public String toString() {
+ return query;
+ }
+}
\ No newline at end of file
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/RawDataMigrator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/RawDataMigrator.java
new file mode 100644
index 0000000..6680fd2
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/workers/RawDataMigrator.java
@@ -0,0 +1,250 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2013, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator.workers;
+
+import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+
+import com.datastax.driver.core.ResultSetFuture;
+import com.datastax.driver.core.querybuilder.Batch;
+import com.datastax.driver.core.querybuilder.QueryBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.StatelessSession;
+
+import org.rhq.server.metrics.domain.MetricsTable;
+import org.rhq.server.metrics.migrator.DataMigrator;
+import org.rhq.server.metrics.migrator.DataMigrator.DataMigratorConfiguration;
+import org.rhq.server.metrics.migrator.DataMigrator.Task;
+import org.rhq.server.metrics.migrator.ExistingDataSource;
+import org.rhq.server.metrics.migrator.Telemetry;
+
+/**
+ * @author Stefan Negrea
+ *
+ */
+public class RawDataMigrator extends AbstractMigrationWorker implements CallableMigrationWorker {
+ private final Log log = LogFactory.getLog(RawDataMigrator.class);
+
+ private final Queue<String> tablesNotProcessed = new LinkedList<String>(Arrays.asList(getRawDataTables()));
+ private final MetricsIndexUpdateAccumulator metricsIndexAccumulator;
+ private final DataMigratorConfiguration config;
+
+ public RawDataMigrator(DataMigratorConfiguration config) {
+ this.config = config;
+ this.metricsIndexAccumulator = new MetricsIndexUpdateAccumulator(MetricsTable.RAW, config);
+ }
+
+ public long estimate() throws Exception {
+ long recordCount = 0;
+ for (String table : getRawDataTables()) {
+ String countQuery = String.format(MigrationQuery.COUNT_RAW.toString(), table);
+ long tableRecordCount = this.getRowCount(countQuery);
+
+ log.debug("Retrieved record count for table " + table + " -- " + tableRecordCount);
+
+ recordCount += tableRecordCount;
+ }
+
+ Telemetry telemetry = this.performMigration(Task.Estimate);
+ long estimatedTimeToMigrate = telemetry.getMigrationTime();
+ long estimation = (recordCount / (long) MAX_RECORDS_TO_LOAD_FROM_SQL / (long) NUMBER_OF_BATCHES_FOR_ESTIMATION)
+ * estimatedTimeToMigrate;
+ estimation += telemetry.getNonMigrationTime();
+
+ return estimation;
+ }
+
+ public void migrate() throws Exception {
+ performMigration(Task.Migrate);
+ }
+
+ private long getRowCount(String countQuery) {
+ StatelessSession session = getSQLSession(config);
+
+ org.hibernate.Query query = session.createSQLQuery(countQuery);
+ query.setReadOnly(true);
+ query.setTimeout(DataMigrator.SQL_TIMEOUT);
+
+ long count = Long.parseLong(query.uniqueResult().toString());
+
+ closeSQLSession(session);
+
+ return count;
+ }
+
+ private Telemetry performMigration(Task task) throws Exception {
+ Telemetry telemetry = new Telemetry();
+ telemetry.getGeneralTimer().start();
+
+ long numberOfBatchesMigrated = 0;
+
+ List<Object[]> existingData;
+ int failureCount;
+
+ telemetry.getMigrationTimer().start();
+ telemetry.getMigrationTimer().suspend();
+
+ while (!tablesNotProcessed.isEmpty()) {
+ String table = tablesNotProcessed.peek();
+
+ String selectQuery = String.format(MigrationQuery.SELECT_RAW_DATA.toString(), table);
+
+ ExistingDataSource dataSource = getExistingDataSource(selectQuery, task, config);
+ dataSource.initialize();
+
+ log.info("Start migrating raw table: " + table);
+
+ telemetry.getMigrationTimer().resume();
+ int lastMigratedRecord = 0;
+ while (true) {
+ existingData = dataSource.getData(lastMigratedRecord, MAX_RECORDS_TO_LOAD_FROM_SQL);
+
+ if (existingData.size() == 0) {
+ break;
+ }
+
+ lastMigratedRecord += existingData.size();
+
+ failureCount = 0;
+ while (failureCount < MAX_NUMBER_OF_FAILURES) {
+ try {
+ insertDataToCassandra(existingData);
+ break;
+ } catch (Exception e) {
+ log.error("Failed to insert " + MetricsTable.RAW.toString()
+ + " data. Attempting to insert the current batch of data one more time");
+ log.error(e);
+
+ failureCount++;
+ if (failureCount == MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
+ throw e;
+ }
+ }
+ }
+
+ log.info("- " + table + " - " + lastMigratedRecord + " -");
+
+ numberOfBatchesMigrated++;
+ if (Task.Estimate.equals(task) && numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
+ break;
+ }
+ }
+ telemetry.getMigrationTimer().suspend();
+
+ if (Task.Migrate.equals(task)) {
+ log.info("Done migrating raw table" + table + "---------------------");
+
+ if (config.isDeleteDataImmediatelyAfterMigration()) {
+ deleteTableData(table);
+ }
+ } else if (numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
+ break;
+ }
+
+ dataSource.close();
+ tablesNotProcessed.poll();
+ }
+
+ telemetry.getMigrationTimer().resume();
+ metricsIndexAccumulator.drain();
+ telemetry.getMigrationTimer().suspend();
+
+ telemetry.getGeneralTimer().stop();
+ return telemetry;
+ }
+
+ private void deleteTableData(String table) throws Exception {
+ String deleteQuery = String.format(MigrationQuery.DELETE_RAW_ENTRY.toString(), table);
+ int failureCount = 0;
+ while (failureCount < MAX_NUMBER_OF_FAILURES) {
+ try {
+ StatelessSession session = getSQLSession(config);
+ session.getTransaction().begin();
+ org.hibernate.Query nativeQuery = session.createSQLQuery(deleteQuery);
+ nativeQuery.executeUpdate();
+ session.getTransaction().commit();
+ closeSQLSession(session);
+ log.info("- " + table + " - Cleaned -");
+ } catch (Exception e) {
+ log.error("Failed to delete " + table + " data. Attempting to delete data one more time...");
+
+ failureCount++;
+ if (failureCount == MAX_NUMBER_OF_FAILURES) {
+ throw e;
+ }
+ }
+ }
+ }
+
+ private void insertDataToCassandra(List<Object[]> existingData) throws Exception {
+ List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
+ Batch batch = QueryBuilder.batch();
+ int batchSize = 0;
+
+ //only need approximate TTL to speed up processing
+ //given that each batch is processed within seconds, getting the
+ //system time once per batch has minimal impact on the record retention
+ long creationTimeMillis;
+ long itemTTLSeconds;
+ long currentTimeMillis = System.currentTimeMillis();
+ long expectedTTLMillis = MetricsTable.RAW.getTTLinMilliseconds();
+
+ for (Object[] rawDataPoint : existingData) {
+ creationTimeMillis = Long.parseLong(rawDataPoint[MigrationQuery.TIMESTAMP_INDEX].toString());
+ itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
+
+ if (itemTTLSeconds > 0) {
+ int scheduleId = Integer.parseInt(rawDataPoint[MigrationQuery.SCHEDULE_INDEX].toString());
+ Date creationTime = new Date(creationTimeMillis);
+
+ batch.add(QueryBuilder.insertInto(MetricsTable.RAW.toString()).value("schedule_id", scheduleId)
+ .value("time", creationTime)
+ .value("value", Double.parseDouble(rawDataPoint[MigrationQuery.VALUE_INDEX].toString()))
+ .using(ttl((int) itemTTLSeconds)));
+ batchSize++;
+
+ metricsIndexAccumulator.add(scheduleId, creationTimeMillis);
+ }
+
+ if (batchSize >= MAX_RAW_BATCH_TO_CASSANDRA) {
+ resultSetFutures.add(config.getSession().executeAsync(batch));
+ batch = QueryBuilder.batch();
+ batchSize = 0;
+ }
+ }
+
+ if (batchSize != 0) {
+ resultSetFutures.add(config.getSession().executeAsync(batch));
+ }
+
+ for (ResultSetFuture future : resultSetFutures) {
+ future.get();
+ }
+ }
+}
commit 8147d8460585122942b9ae5aa22f7a8bc9306623
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Wed May 22 15:22:09 2013 -0500
Update the metrics index table during the migration to insert metrics index. This will trigger aggregation on the recently migrated data if needed. The period to be added to aggreation is the from one aggregation slice before the last aggregation.
Also, added manual connection timeouts to postgres connections since the driver does not honor them properly when set in hibernate only.
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DateTimeService.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DateTimeService.java
index 8f4e5bb..01b6f71 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DateTimeService.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DateTimeService.java
@@ -51,6 +51,10 @@ public class DateTimeService {
this.configuration = configuration;
}
+ public DateTime getTimeSlice(long timestamp, Minutes interval) {
+ return getTimeSlice(new DateTime(timestamp), interval);
+ }
+
public DateTime getTimeSlice(DateTime dateTime, Minutes interval) {
Chronology chronology = GregorianChronology.getInstance();
DateTimeField hourField = chronology.hourOfDay();
@@ -61,6 +65,10 @@ public class DateTimeService {
return new DateTime(timestamp);
}
+ public DateTime getTimeSlice(long timestamp, Duration duration) {
+ return getTimeSlice(new DateTime(timestamp), duration);
+ }
+
public DateTime getTimeSlice(DateTime dt, Duration duration) {
Period p = duration.toPeriod();
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsConfiguration.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsConfiguration.java
index 16ac2b6..80fc401 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsConfiguration.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsConfiguration.java
@@ -150,4 +150,17 @@ public class MetricsConfiguration {
public void setSixHourTimeSliceDuration(Duration sixHourTimeSliceDuration) {
this.sixHourTimeSliceDuration = sixHourTimeSliceDuration;
}
+
+ public Duration getTimeSliceDuration(MetricsTable table) {
+ if (MetricsTable.RAW.equals(table)) {
+ return this.getRawTimeSliceDuration();
+ } else if (MetricsTable.ONE_HOUR.equals(table)) {
+ return this.getOneHourTimeSliceDuration();
+ } else if (MetricsTable.SIX_HOUR.equals(table)) {
+ return this.getSixHourTimeSliceDuration();
+ }
+
+ throw new IllegalArgumentException("Time slice duration for " + table.getTableName()
+ + " table is not supported");
+ }
}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
index e4aa391..952c454 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
@@ -25,13 +25,18 @@ import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
+import java.util.Map;
import java.util.Queue;
+import java.util.Set;
import javax.persistence.EntityManager;
-import javax.persistence.Query;
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.Batch;
@@ -40,8 +45,13 @@ import com.datastax.driver.core.querybuilder.QueryBuilder;
import org.apache.commons.lang.time.StopWatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.hibernate.Query;
import org.hibernate.StatelessSession;
+import org.joda.time.DateTime;
+import org.joda.time.Duration;
+import org.rhq.server.metrics.DateTimeService;
+import org.rhq.server.metrics.MetricsConfiguration;
import org.rhq.server.metrics.domain.AggregateType;
import org.rhq.server.metrics.domain.MetricsTable;
@@ -81,6 +91,11 @@ public class DataMigrator {
COUNT_6H_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
COUNT_1D_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+ MAX_TIMESTAMP_1H_DATA("SELECT MAX(time_stamp) FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
+ MAX_TIMESTAMP_6H_DATA("SELECT MAX(time_stamp) FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
+ MAX_TIMESTAMP_1D_DATA("SELECT MAX(time_stamp) FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+
+
COUNT_RAW("SELECT COUNT(*) FROM %s"),
SELECT_RAW_DATA("SELECT schedule_id, time_stamp, value FROM %s ORDER BY schedule_id, time_stamp"),
DELETE_RAW_ALL_DATA("DELETE FROM %s"),
@@ -311,33 +326,32 @@ public class DataMigrator {
return tables;
}
- private ExistingDataSource getExistingDataSource(EntityManager entityManager, String query, Task task) {
-
+ private ExistingDataSource getExistingDataSource(String query, Task task) {
if (Task.Migrate.equals(task)) {
if (DatabaseType.Oracle.equals(this.databaseType)) {
- return new ScrollableDataSource(entityManager, query);
+ return new ScrollableDataSource(this.entityManager, this.databaseType, query);
} else {
if (!experimentalDataSource) {
- return new ScrollableDataSource(entityManager, query);
+ return new ScrollableDataSource(this.entityManager, this.databaseType, query);
} else {
- return new ExistingPostgresDataBulkExportSource(entityManager, query);
+ return new ExistingPostgresDataBulkExportSource(this.entityManager, query);
}
}
} else if (Task.Estimate.equals(task)) {
int limit = MAX_RECORDS_TO_LOAD_FROM_SQL * (NUMBER_OF_BATCHES_FOR_ESTIMATION + 1);
if (DatabaseType.Oracle.equals(this.databaseType)) {
- return new ScrollableDataSource(entityManager, query, limit);
+ return new ScrollableDataSource(this.entityManager, this.databaseType, query, limit);
} else {
if (!experimentalDataSource) {
- return new ScrollableDataSource(entityManager, query, limit);
+ return new ScrollableDataSource(this.entityManager, this.databaseType, query, limit);
} else {
- return new ExistingPostgresDataBulkExportSource(entityManager, query, limit);
+ return new ExistingPostgresDataBulkExportSource(this.entityManager, query, limit);
}
}
}
- return new ScrollableDataSource(entityManager, query);
+ return new ScrollableDataSource(this.entityManager, this.databaseType, query);
}
private void prepareSQLSession(StatelessSession session) {
@@ -350,6 +364,26 @@ public class DataMigrator {
}
}
+ private StatelessSession getSQLSession() {
+ StatelessSession session = ((org.hibernate.Session) this.entityManager.getDelegate()).getSessionFactory()
+ .openStatelessSession();
+
+ prepareSQLSession(session);
+
+ return session;
+ }
+
+ private void closeSQLSession(StatelessSession session) {
+ try {
+ if (session != null) {
+ session.close();
+ }
+ } catch (Exception e) {
+ log.debug("Unable to close SQL stateless session. " + e);
+ }
+ }
+
+
private enum Task {
Migrate, Estimate
}
@@ -384,8 +418,119 @@ public class DataMigrator {
}
}
- private interface CallableMigrationWorker {
+ private class MetricsIndexUpdateAccumulator {
+ private static final int MAX_SIZE = 3000;
+
+ private final DateTimeService dateTimeService = new DateTimeService();
+ private final MetricsConfiguration configuration = new MetricsConfiguration();
+
+ private final Map<Integer, Set<Long>> accumulator = new HashMap<Integer, Set<Long>>();
+ private final long timeLimit;
+ private final MetricsTable table;
+ private final PreparedStatement updateMetricsIndex;
+ private final Duration sliceDuration;
+ private final boolean validAccumulatorTable;
+ private int currentCount = 0;
+
+ public MetricsIndexUpdateAccumulator(MetricsTable table) {
+ this.table = table;
+
+ if (MetricsTable.RAW.equals(table) || MetricsTable.ONE_HOUR.equals(table)
+ || MetricsTable.SIX_HOUR.equals(table)) {
+ this.sliceDuration = configuration.getTimeSliceDuration(table);
+ this.timeLimit = this.getLastAggregationTime(table) - this.sliceDuration.getMillis();
+ this.updateMetricsIndex = session.prepare("INSERT INTO " + MetricsTable.INDEX.getTableName()
+ + " (bucket, time, schedule_id) VALUES (?, ?, ?)");
+ this.validAccumulatorTable = true;
+ } else {
+ this.timeLimit = Integer.MAX_VALUE;
+ this.updateMetricsIndex = null;
+ this.sliceDuration = null;
+ this.validAccumulatorTable = false;
+ }
+ }
+
+ public void add(int scheduleId, long timestamp) throws Exception {
+ if (validAccumulatorTable && timeLimit <= timestamp) {
+ long alignedTimeSlice = dateTimeService.getTimeSlice(timestamp, sliceDuration).getMillis();
+
+ if (accumulator.containsKey(scheduleId)) {
+ Set<Long> timestamps = accumulator.get(scheduleId);
+ if (!timestamps.contains(alignedTimeSlice)) {
+ timestamps.add(alignedTimeSlice);
+
+ currentCount++;
+ }
+ } else {
+ Set<Long> timestamps = new HashSet<Long>();
+ timestamps.add(timestamp);
+ accumulator.put(scheduleId, timestamps);
+
+ currentCount++;
+ }
+ }
+
+ if (currentCount > MAX_SIZE) {
+ drain();
+ }
+ }
+
+ public void drain() throws Exception {
+ if (log.isDebugEnabled()) {
+ log.debug("Draining metrics index accumulator with " + currentCount + " entries");
+ }
+
+ List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
+
+ for (Map.Entry<Integer, Set<Long>> entry : accumulator.entrySet()) {
+ for (Long timestamp : entry.getValue()) {
+ BoundStatement statement = updateMetricsIndex.bind(this.table.getTableName(), new Date(timestamp),
+ entry.getKey());
+ resultSetFutures.add(session.executeAsync(statement));
+ }
+ }
+
+ for (ResultSetFuture future : resultSetFutures) {
+ future.get();
+ }
+
+ accumulator.clear();
+ currentCount = 0;
+ }
+
+ private long getLastAggregationTime(MetricsTable migratedTable) {
+ StatelessSession session = getSQLSession();
+
+ long aggregationSlice = -1;
+ Duration duration = null;
+ String queryString = null;
+
+ if (MetricsTable.RAW.equals(migratedTable)) {
+ duration = configuration.getRawTimeSliceDuration();
+ queryString = MigrationQuery.MAX_TIMESTAMP_1H_DATA.toString();
+ } else if (MetricsTable.ONE_HOUR.equals(migratedTable)) {
+ duration = configuration.getOneHourTimeSliceDuration();
+ queryString = MigrationQuery.MAX_TIMESTAMP_6H_DATA.toString();
+ } else if (MetricsTable.SIX_HOUR.equals(migratedTable)) {
+ duration = configuration.getSixHourTimeSliceDuration();
+ queryString = MigrationQuery.MAX_TIMESTAMP_1D_DATA.toString();
+ }
+
+ if (duration != null && queryString != null) {
+ Query query = session.createSQLQuery(queryString);
+ String queryResult = query.uniqueResult().toString();
+ Long timestamp = Long.parseLong(queryResult);
+ aggregationSlice = dateTimeService.getTimeSlice(new DateTime(timestamp), duration).getMillis();
+ }
+
+ closeSQLSession(session);
+
+ return aggregationSlice;
+ }
+ }
+
+ private interface CallableMigrationWorker {
long estimate() throws Exception;
@@ -402,6 +547,7 @@ public class DataMigrator {
private final String deleteQuery;
private final String countQuery;
private final MetricsTable metricsTable;
+ private final MetricsIndexUpdateAccumulator metricsIndexAccumulator;
/**
* @param query
@@ -425,6 +571,8 @@ public class DataMigrator {
} else {
throw new Exception("MetricsTable " + metricsTable.toString() + " not supported by this migrator.");
}
+
+ metricsIndexAccumulator = new MetricsIndexUpdateAccumulator(metricsTable);
}
@Override
@@ -451,27 +599,28 @@ public class DataMigrator {
}
private long getRowCount(String countQuery) {
- StatelessSession session = ((org.hibernate.Session) entityManager.getDelegate())
- .getSessionFactory().openStatelessSession();
-
- prepareSQLSession(session);
+ StatelessSession session = getSQLSession();
org.hibernate.Query query = session.createSQLQuery(countQuery);
query.setReadOnly(true);
query.setTimeout(SQL_TIMEOUT);
+ long count = Long.parseLong(query.uniqueResult().toString());
+ closeSQLSession(session);
- return Long.parseLong(query.uniqueResult().toString());
+ return count;
}
private void deleteTableData() throws Exception {
int failureCount = 0;
while (failureCount < MAX_NUMBER_OF_FAILURES) {
try {
- entityManager.getTransaction().begin();
- Query nativeQuery = entityManager.createNativeQuery(this.deleteQuery);
+ StatelessSession session = getSQLSession();
+ session.getTransaction().begin();
+ org.hibernate.Query nativeQuery = session.createSQLQuery(this.deleteQuery);
nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
+ session.getTransaction().commit();
+ closeSQLSession(session);
log.info("- " + metricsTable.toString() + " - Cleaned -");
} catch (Exception e) {
log.error("Failed to delete " + metricsTable.toString()
@@ -495,7 +644,7 @@ public class DataMigrator {
int failureCount;
int lastMigratedRecord = 0;
- ExistingDataSource dataSource = getExistingDataSource(entityManager, selectQuery, task);
+ ExistingDataSource dataSource = getExistingDataSource(selectQuery, task);
dataSource.initialize();
telemetry.getMigrationTimer().start();
@@ -532,6 +681,9 @@ public class DataMigrator {
break;
}
}
+
+ metricsIndexAccumulator.drain();
+
telemetry.getMigrationTimer().stop();
dataSource.close();
@@ -560,28 +712,33 @@ public class DataMigrator {
itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
if(itemTTLSeconds > 0 ){
+ int scheduleId = Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString());
+ Date time = new Date(creationTimeMillis);
+
batch.add(QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id",Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString()))
- .value("time", new Date(creationTimeMillis))
+ .value("schedule_id", scheduleId)
+ .value("time", time)
.value("type", AggregateType.AVG.ordinal())
.value("value", Double.parseDouble(rawMeasurement[MigrationQuery.VALUE_INDEX].toString()))
.using(ttl((int) itemTTLSeconds)));
batch.add(QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id", Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString()))
- .value("time", new Date(creationTimeMillis))
+ .value("schedule_id", scheduleId)
+ .value("time", time)
.value("type", AggregateType.MIN.ordinal())
.value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MIN_VALUE_INDEX].toString()))
.using(ttl((int) itemTTLSeconds)));
batch.add(QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id", Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString()))
- .value("time", new Date(creationTimeMillis))
+ .value("schedule_id", scheduleId)
+ .value("time", time)
.value("type", AggregateType.MAX.ordinal())
.value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MAX_VALUE_INDEX].toString()))
.using(ttl((int) itemTTLSeconds)));
batchSize += 3;
+
+ metricsIndexAccumulator.add(scheduleId, creationTimeMillis);
}
if (batchSize >= MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
@@ -604,7 +761,12 @@ public class DataMigrator {
private class RawDataMigrator implements CallableMigrationWorker {
- Queue<String> tablesNotProcessed = new LinkedList<String>(Arrays.asList(getRawDataTables()));
+ private final Queue<String> tablesNotProcessed = new LinkedList<String>(Arrays.asList(getRawDataTables()));
+ private final MetricsIndexUpdateAccumulator metricsIndexAccumulator;
+
+ public RawDataMigrator() {
+ this.metricsIndexAccumulator = new MetricsIndexUpdateAccumulator(MetricsTable.RAW);
+ }
public long estimate() throws Exception {
long recordCount = 0;
@@ -631,16 +793,17 @@ public class DataMigrator {
}
private long getRowCount(String countQuery) {
- StatelessSession session = ((org.hibernate.Session) entityManager.getDelegate()).getSessionFactory()
- .openStatelessSession();
-
- prepareSQLSession(session);
+ StatelessSession session = getSQLSession();
org.hibernate.Query query = session.createSQLQuery(countQuery);
query.setReadOnly(true);
query.setTimeout(SQL_TIMEOUT);
- return Long.parseLong(query.uniqueResult().toString());
+ long count = Long.parseLong(query.uniqueResult().toString());
+
+ closeSQLSession(session);
+
+ return count;
}
private Telemetry performMigration(Task task) throws Exception {
@@ -660,7 +823,7 @@ public class DataMigrator {
String selectQuery = String.format(MigrationQuery.SELECT_RAW_DATA.toString(), table);
- ExistingDataSource dataSource = getExistingDataSource(entityManager, selectQuery, task);
+ ExistingDataSource dataSource = getExistingDataSource(selectQuery, task);
dataSource.initialize();
log.info("Start migrating raw table: " + table);
@@ -717,6 +880,10 @@ public class DataMigrator {
tablesNotProcessed.poll();
}
+ telemetry.getMigrationTimer().resume();
+ metricsIndexAccumulator.drain();
+ telemetry.getMigrationTimer().suspend();
+
telemetry.getGeneralTimer().stop();
return telemetry;
}
@@ -726,10 +893,12 @@ public class DataMigrator {
int failureCount = 0;
while (failureCount < MAX_NUMBER_OF_FAILURES) {
try {
- entityManager.getTransaction().begin();
- Query nativeQuery = entityManager.createNativeQuery(deleteQuery);
+ StatelessSession session = getSQLSession();
+ session.getTransaction().begin();
+ org.hibernate.Query nativeQuery = session.createSQLQuery(deleteQuery);
nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
+ session.getTransaction().commit();
+ closeSQLSession(session);
log.info("- " + table + " - Cleaned -");
} catch (Exception e) {
log.error("Failed to delete " + table + " data. Attempting to delete data one more time...");
@@ -761,12 +930,17 @@ public class DataMigrator {
itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
if (itemTTLSeconds > 0) {
+ int scheduleId = Integer.parseInt(rawDataPoint[MigrationQuery.SCHEDULE_INDEX].toString());
+ Date creationTime = new Date(creationTimeMillis);
+
batch.add(QueryBuilder.insertInto(MetricsTable.RAW.toString())
- .value("schedule_id", Integer.parseInt(rawDataPoint[MigrationQuery.SCHEDULE_INDEX].toString()))
- .value("time", new Date(creationTimeMillis))
+ .value("schedule_id", scheduleId)
+ .value("time", creationTime)
.value("value", Double.parseDouble(rawDataPoint[MigrationQuery.VALUE_INDEX].toString()))
.using(ttl((int) itemTTLSeconds)));
batchSize++;
+
+ metricsIndexAccumulator.add(scheduleId, creationTimeMillis);
}
if (batchSize >= MAX_RAW_BATCH_TO_CASSANDRA) {
@@ -790,42 +964,46 @@ public class DataMigrator {
private class DeleteAllData implements CallableMigrationWorker {
public void migrate() {
- Query nativeQuery;
+ org.hibernate.Query nativeQuery;
+
+ StatelessSession session = getSQLSession();
if (run1HAggregateDataMigration) {
- entityManager.getTransaction().begin();
- nativeQuery = entityManager.createNativeQuery(MigrationQuery.DELETE_1H_DATA.toString());
+ session.getTransaction().begin();
+ nativeQuery = session.createSQLQuery(MigrationQuery.DELETE_1H_DATA.toString());
nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
+ session.getTransaction().commit();
log.info("- RHQ_MEASUREMENT_DATA_NUM_1H - Cleaned -");
}
if (run6HAggregateDataMigration) {
- entityManager.getTransaction().begin();
- nativeQuery = entityManager.createNativeQuery(MigrationQuery.DELETE_6H_DATA.toString());
+ session.getTransaction().begin();
+ nativeQuery = session.createSQLQuery(MigrationQuery.DELETE_6H_DATA.toString());
nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
+ session.getTransaction().commit();
log.info("- RHQ_MEASUREMENT_DATA_NUM_6H - Cleaned -");
}
if (run1DAggregateDataMigration) {
- entityManager.getTransaction().begin();
- nativeQuery = entityManager.createNativeQuery(MigrationQuery.DELETE_1D_DATA.toString());
+ session.getTransaction().begin();
+ nativeQuery = session.createSQLQuery(MigrationQuery.DELETE_1D_DATA.toString());
nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
+ session.getTransaction().commit();
log.info("- RHQ_MEASUREMENT_DATA_NUM_1D - Cleaned -");
}
if (runRawDataMigration) {
for (String table : getRawDataTables()) {
- entityManager.getTransaction().begin();
+ session.getTransaction().begin();
String deleteAllData = String.format(MigrationQuery.DELETE_RAW_ALL_DATA.toString(), table);
- nativeQuery = entityManager.createNativeQuery(deleteAllData);
+ nativeQuery = session.createSQLQuery(deleteAllData);
nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
+ session.getTransaction().commit();
log.info("- " + table + " - Cleaned -");
}
}
+
+ closeSQLSession(session);
}
@Override
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ScrollableDataSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ScrollableDataSource.java
index 09215b8..42161a4 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ScrollableDataSource.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ScrollableDataSource.java
@@ -32,6 +32,8 @@ import org.hibernate.ScrollableResults;
import org.hibernate.Session;
import org.hibernate.StatelessSession;
+import org.rhq.server.metrics.migrator.DataMigrator.DatabaseType;
+
/**
* @author Stefan Negrea
*/
@@ -40,6 +42,7 @@ public class ScrollableDataSource implements ExistingDataSource {
private static final Log log = LogFactory.getLog(ScrollableDataSource.class);
private final EntityManager entityManager;
+ private final DatabaseType databaseType;
private final String selectNativeQuery;
private final int maxResults;
@@ -48,12 +51,15 @@ public class ScrollableDataSource implements ExistingDataSource {
private int lastMigratedItemIndex;
- public ScrollableDataSource(EntityManager entityManager, String selectNativeQuery) {
- this(entityManager, selectNativeQuery, -1);
+ public ScrollableDataSource(EntityManager entityManager, DatabaseType databaseType,
+ String selectNativeQuery) {
+ this(entityManager, databaseType, selectNativeQuery, -1);
}
- public ScrollableDataSource(EntityManager entityManager, String selectNativeQuery, int maxResults) {
+ public ScrollableDataSource(EntityManager entityManager, DatabaseType databaseType, String selectNativeQuery,
+ int maxResults) {
this.entityManager = entityManager;
+ this.databaseType = databaseType;
this.selectNativeQuery = selectNativeQuery;
this.maxResults = maxResults;
}
@@ -95,12 +101,18 @@ public class ScrollableDataSource implements ExistingDataSource {
session = ((Session) entityManager.getDelegate()).getSessionFactory().openStatelessSession();
+ this.prepareSQLSession();
+
if (log.isDebugEnabled()) {
- log.debug("Preparing the query with " + maxResults + " results.");
+ if (maxResults >= 0) {
+ log.debug("Preparing the query with " + maxResults + " results.");
+ } else {
+ log.debug("Preparing the query with all the results.");
+ }
}
Query query = session.createSQLQuery(selectNativeQuery);
- if (maxResults > 0) {
+ if (maxResults >= 0) {
query.setMaxResults(maxResults);
}
query.setFetchSize(30000);
@@ -111,7 +123,11 @@ public class ScrollableDataSource implements ExistingDataSource {
lastMigratedItemIndex = -1;
if (log.isDebugEnabled()) {
- log.debug("Query prepared with " + maxResults + " results.");
+ if (maxResults >= 0) {
+ log.debug("Query prepared with " + maxResults + " results.");
+ } else {
+ log.debug("Query prepared with all the results.");
+ }
}
}
@@ -127,4 +143,14 @@ public class ScrollableDataSource implements ExistingDataSource {
session = null;
}
}
+
+ private void prepareSQLSession() {
+ if (DatabaseType.Postgres.equals(this.databaseType)) {
+ log.debug("Preparing SQL connection with timeout: " + DataMigrator.SQL_TIMEOUT);
+
+ Query query = session.createSQLQuery("SET LOCAL statement_timeout = " + DataMigrator.SQL_TIMEOUT);
+ query.setReadOnly(true);
+ query.executeUpdate();
+ }
+ }
}
commit 1266a549591f7143cdc6063bbe26393f36c5dfe0
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue May 21 12:10:57 2013 -0500
One more attempt to enforce session timeouts for Postgres due to driver ignoring hibernate imposed settings.
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
index 148f82f..e4aa391 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
@@ -314,7 +314,7 @@ public class DataMigrator {
private ExistingDataSource getExistingDataSource(EntityManager entityManager, String query, Task task) {
if (Task.Migrate.equals(task)) {
- if (this.databaseType == DatabaseType.Oracle) {
+ if (DatabaseType.Oracle.equals(this.databaseType)) {
return new ScrollableDataSource(entityManager, query);
} else {
if (!experimentalDataSource) {
@@ -326,7 +326,7 @@ public class DataMigrator {
} else if (Task.Estimate.equals(task)) {
int limit = MAX_RECORDS_TO_LOAD_FROM_SQL * (NUMBER_OF_BATCHES_FOR_ESTIMATION + 1);
- if (this.databaseType == DatabaseType.Oracle) {
+ if (DatabaseType.Oracle.equals(this.databaseType)) {
return new ScrollableDataSource(entityManager, query, limit);
} else {
if (!experimentalDataSource) {
@@ -340,6 +340,16 @@ public class DataMigrator {
return new ScrollableDataSource(entityManager, query);
}
+ private void prepareSQLSession(StatelessSession session) {
+ if (DatabaseType.Postgres.equals(this.databaseType)) {
+ log.debug("Preparing SQL connection with timeout: " + SQL_TIMEOUT);
+
+ org.hibernate.Query query = session.createSQLQuery("SET LOCAL statement_timeout = " + SQL_TIMEOUT);
+ query.setReadOnly(true);
+ query.executeUpdate();
+ }
+ }
+
private enum Task {
Migrate, Estimate
}
@@ -420,6 +430,7 @@ public class DataMigrator {
@Override
public long estimate() throws Exception {
long recordCount = this.getRowCount(this.countQuery);
+ log.debug("Retrieved record count for table " + metricsTable.toString() + " -- " + recordCount);
Telemetry telemetry = this.performMigration(Task.Estimate);
long estimatedTimeToMigrate = telemetry.getMigrationTime();
@@ -443,10 +454,13 @@ public class DataMigrator {
StatelessSession session = ((org.hibernate.Session) entityManager.getDelegate())
.getSessionFactory().openStatelessSession();
+ prepareSQLSession(session);
+
org.hibernate.Query query = session.createSQLQuery(countQuery);
query.setReadOnly(true);
query.setTimeout(SQL_TIMEOUT);
+
return Long.parseLong(query.uniqueResult().toString());
}
@@ -596,7 +610,11 @@ public class DataMigrator {
long recordCount = 0;
for (String table : getRawDataTables()) {
String countQuery = String.format(MigrationQuery.COUNT_RAW.toString(), table);
- recordCount += this.getRowCount(countQuery);
+ long tableRecordCount = this.getRowCount(countQuery);
+
+ log.debug("Retrieved record count for table " + table + " -- " + tableRecordCount);
+
+ recordCount += tableRecordCount;
}
Telemetry telemetry = this.performMigration(Task.Estimate);
@@ -616,6 +634,8 @@ public class DataMigrator {
StatelessSession session = ((org.hibernate.Session) entityManager.getDelegate()).getSessionFactory()
.openStatelessSession();
+ prepareSQLSession(session);
+
org.hibernate.Query query = session.createSQLQuery(countQuery);
query.setReadOnly(true);
query.setTimeout(SQL_TIMEOUT);
commit ca03d63081757aca343fc0b99b31f92505fa9dbb
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue May 21 11:26:06 2013 -0500
Add debug logging to server metrics classes. Also, a little cleanup for the metrics server class.
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java
index 235e157..3aa5441 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java
@@ -27,6 +27,9 @@ package org.rhq.server.metrics;
import java.util.ArrayList;
import java.util.List;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
import org.rhq.core.domain.measurement.MeasurementBaseline;
import org.rhq.core.domain.measurement.MeasurementSchedule;
import org.rhq.server.metrics.domain.AggregateSimpleNumericMetric;
@@ -37,11 +40,9 @@ import org.rhq.server.metrics.domain.AggregateType;
*/
public class MetricsBaselineCalculator {
- private MetricsDAO metricsDAO;
+ private final Log log = LogFactory.getLog(MetricsServer.class);
-// public MetricsBaselineCalculator(Session session) {
-// this.metricsDAO = new MetricsDAO(session);
-// }
+ private MetricsDAO metricsDAO;
public MetricsBaselineCalculator(MetricsDAO metricsDAO) {
this.metricsDAO = metricsDAO;
@@ -105,6 +106,10 @@ public class MetricsBaselineCalculator {
baseline.setMean(mean.getArithmeticMean());
baseline.setSchedule(schedule);
+ if (log.isDebugEnabled()) {
+ log.debug("Calculated baseline: " + baseline.toString());
+ }
+
return baseline;
}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java
index 882ab15..0e07d4d 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java
@@ -251,11 +251,11 @@ public class MetricsServer {
updates.put(rawMetric.getScheduleId(), dateTimeService.getTimeSlice(
new DateTime(rawMetric.getTimestamp()), configuration.getRawTimeSliceDuration()).getMillis());
}
- Set<Date> dates = new HashSet<Date>();
- for (Long ts : updates.values()) {
- dates.add(new Date(ts));
+
+ if (log.isDebugEnabled()) {
+ log.debug("Updating one hour index with time slices " + StringUtil.collectionToString(updates.values()));
}
- log.info("Updating one hour index with time slices " + dates);
+
dao.updateMetricsIndex(MetricsTable.ONE_HOUR, updates);
}
diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java
index d895753..d34e945 100644
--- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java
+++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java
@@ -101,7 +101,6 @@ public class MetricsServerTest extends CassandraIntegrationTest {
@BeforeMethod
public void initServer() throws Exception {
metricsServer = new MetricsServerStub();
- metricsServer.setSession(session);
metricsServer.setConfiguration(configuration);
dateTimeService = new DateTimeService();
commit e853df11b0759a8318b3c5d9a2b89543b94db8c1
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue May 21 11:24:14 2013 -0500
Update storage installer for Linux to start cassandra with a relative classpath. Before this change the Cassandra deamon was started with all the jar classpaths as absolute.
diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
index 45c85cb..b205aae 100644
--- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
+++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
@@ -442,7 +442,7 @@ public class StorageInstaller {
File basedir = new File(deploymentOptions.getBasedir());
File binDir = new File(basedir, "bin");
- cmdLine = new org.apache.commons.exec.CommandLine(new File(binDir, "cassandra"));
+ cmdLine = new org.apache.commons.exec.CommandLine("./cassandra");
cmdLine.addArgument("-p");
cmdLine.addArgument(new File(binDir, "cassandra.pid").getAbsolutePath());
errOutput = exec(binDir, cmdLine);
11 years
[rhq] Branch 'feature/cassandra-backend' - 3 commits - modules/enterprise modules/plugins
by Jay Shaughnessy
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java | 2
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java | 9 ++
modules/plugins/rhq-storage/pom.xml | 37 ----------
3 files changed, 10 insertions(+), 38 deletions(-)
New commits:
commit fc89e3816cba7467ab4b808d28c47691b5d9a108
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Wed May 22 17:10:12 2013 -0400
CassandraNodes (and StorageNodes) can't defer to JmxDiscoveryComponent.upgrade()
because the resource keys are not JvmResourceKeys. Override upgrade() and
just return null, there is no resource upgrade, these are new resources.
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
index 596d10e..82d8be1 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
@@ -37,9 +37,11 @@ import org.yaml.snakeyaml.Yaml;
import org.rhq.core.domain.configuration.Configuration;
import org.rhq.core.domain.configuration.PropertySimple;
+import org.rhq.core.domain.resource.ResourceUpgradeReport;
import org.rhq.core.pluginapi.inventory.DiscoveredResourceDetails;
import org.rhq.core.pluginapi.inventory.ProcessScanResult;
import org.rhq.core.pluginapi.inventory.ResourceDiscoveryContext;
+import org.rhq.core.pluginapi.upgrade.ResourceUpgradeContext;
import org.rhq.core.system.ProcessInfo;
import org.rhq.plugins.jmx.JMXDiscoveryComponent;
@@ -210,4 +212,11 @@ public class CassandraNodeDiscoveryComponent extends JMXDiscoveryComponent {
pluginConfig, processInfo);
}
+ @Override
+ public ResourceUpgradeReport upgrade(ResourceUpgradeContext inventoriedResource) {
+
+ // don't use super's impl because the resource key is not a JvmResourceKey
+ return null;
+ }
+
}
commit 69580863665836ade6d4c8518108ad84b3e5bf8f
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Wed May 22 17:08:36 2013 -0400
remove placing the dep plugins in the jar lib. The useClasses feature will
get the classes from the previously loaded plugins as needed.
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml
index bd254f6..4a88e0f 100644
--- a/modules/plugins/rhq-storage/pom.xml
+++ b/modules/plugins/rhq-storage/pom.xml
@@ -37,43 +37,6 @@
</dependency>
</dependencies>
- <build>
- <plugins>
- <plugin>
- <artifactId>maven-dependency-plugin</artifactId>
- <executions>
- <execution>
- <id>copy-deps</id>
- <phase>process-resources</phase>
- <goals>
- <goal>copy</goal>
- </goals>
- <configuration>
- <artifactItems>
- <artifactItem>
- <groupId>${rhq.groupId}</groupId>
- <artifactId>rhq-cassandra-plugin</artifactId>
- <version>${project.version}</version>
- </artifactItem>
- <artifactItem>
- <groupId>${rhq.groupId}</groupId>
- <artifactId>rhq-jmx-plugin</artifactId>
- <version>${project.version}</version>
- </artifactItem>
- <artifactItem>
- <groupId>mc4j</groupId>
- <artifactId>org-mc4j-ems</artifactId>
- <version>${ems.version}</version>
- </artifactItem>
- </artifactItems>
- <outputDirectory>${project.build.outputDirectory}/lib</outputDirectory>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-
<profiles>
<profile>
<id>integration-tests</id>
commit 85826fcd4b15b37bc7199c7c865affa03ca68330
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Wed May 22 16:26:07 2013 -0400
trivial - fix type in log message
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java
index fd285ee..882ab15 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsServer.java
@@ -255,7 +255,7 @@ public class MetricsServer {
for (Long ts : updates.values()) {
dates.add(new Date(ts));
}
- log.info("Updating one hour index wtih time slices " + dates);
+ log.info("Updating one hour index with time slices " + dates);
dao.updateMetricsIndex(MetricsTable.ONE_HOUR, updates);
}
11 years
[rhq] Branch 'feature/cassandra-backend' - modules/common
by Jay Shaughnessy
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java | 39 ++++++++--
1 file changed, 35 insertions(+), 4 deletions(-)
New commits:
commit 65734cb0b6e03a3dc3da7e6594dee0b23f531c57
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Wed May 22 16:20:17 2013 -0400
- Fix issue with storage node setup
- Go back to using pid files to kill cassandra processes on linux
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
index 222406d..7496d08 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
@@ -29,8 +29,11 @@ import static org.rhq.core.util.StringUtil.collectionToString;
import java.io.ByteArrayInputStream;
import java.io.File;
+import java.io.FileReader;
import java.io.IOException;
+import java.io.StringWriter;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -48,6 +51,7 @@ import org.rhq.core.system.ProcessExecutionResults;
import org.rhq.core.system.SystemInfo;
import org.rhq.core.system.SystemInfoFactory;
import org.rhq.core.util.file.FileUtil;
+import org.rhq.core.util.stream.StreamUtil;
/**
* @author John Sanda
@@ -159,14 +163,14 @@ public class CassandraClusterManager {
}
private String getLocalIPAddress(int i) {
- Set<String> addresses = new HashSet<String>();
String seeds = deploymentOptions.getSeeds();
- if (null == seeds || seeds.isEmpty()) {
+ if (null == seeds || seeds.isEmpty() || "localhost".equals(seeds)) {
return "127.0.0." + i;
}
- return seeds.split(",")[i - 1];
+ String[] seedsArray = seeds.split(",");
+ return i <= seedsArray.length ? seedsArray[i - 1] : ("127.0.0." + i);
}
private List<StorageNode> calculateNodes() {
@@ -223,14 +227,17 @@ public class CassandraClusterManager {
File binDir = new File(basedir, "bin");
File startScript;
SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
+ ProcessExecution startScriptExe;
if (systemInfo.getOperatingSystemType() == OperatingSystemType.WINDOWS) {
startScript = new File(binDir, "cassandra.bat");
+ startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript);
} else {
startScript = new File(binDir, "cassandra");
+ startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript);
+ startScriptExe.addArguments(Arrays.asList("-p", "cassandra.pid"));
}
- ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript);
startScriptExe.setWaitForCompletion(0);
ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe);
@@ -262,6 +269,18 @@ public class CassandraClusterManager {
continue;
}
+ try {
+ killNode(nodeDir);
+ } catch (Throwable t) {
+ log.warn("Unable to kill nodeDir [" + nodeDir + "]", t);
+ }
+
+ // This nodeProcess stuff is unlikely to be useful. I added it for Windows
+ // support but we don't actually use this code anymore for Windows, we
+ // use an external storage node. I'll leave it on the very off chance that
+ // it kills some hanging around process. On Linux it will not kill
+ // the cassandra process (see killNode above) because this is the launching
+ // process, not the cassandra process itself.
Process nodeProcess = nodeProcessMap.get(nodeId);
if (null != nodeProcess) {
try {
@@ -284,4 +303,16 @@ public class CassandraClusterManager {
return nodeIds;
}
+ private void killNode(File nodeDir) throws Exception {
+ long pid = getPid(nodeDir);
+ CLibrary.kill((int) pid, 9);
+ }
+
+ private long getPid(File nodeDir) throws IOException {
+ File binDir = new File(nodeDir, "bin");
+ StringWriter writer = new StringWriter();
+ StreamUtil.copy(new FileReader(new File(binDir, "cassandra.pid")), writer);
+
+ return Long.parseLong(writer.getBuffer().toString());
+ }
}
11 years
[rhq] etc/rhq-ircBot
by Jiri Kremser
etc/rhq-ircBot/pom.xml | 13
etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBot.java | 270 ++-----
etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBotListener.java | 357 ++++++++++
etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks |binary
etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/rhq-ircbot.properties | 2
5 files changed, 474 insertions(+), 168 deletions(-)
New commits:
commit e725ed0658bae7bd5f33dbbc4a2b30eb02a6057f
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Wed May 22 19:36:09 2013 +0200
Improvements to RHQ irc bot. New set of commands is now supported.
diff --git a/etc/rhq-ircBot/pom.xml b/etc/rhq-ircBot/pom.xml
index 40b6952..6e66187 100644
--- a/etc/rhq-ircBot/pom.xml
+++ b/etc/rhq-ircBot/pom.xml
@@ -41,7 +41,7 @@
<dependency>
<groupId>org.pircbotx</groupId>
<artifactId>pircbotx</artifactId>
- <version>1.7</version>
+ <version>1.9</version>
</dependency>
<dependency>
@@ -50,6 +50,17 @@
<version>2.0</version>
</dependency>
+ <dependency>
+ <groupId>org.jsoup</groupId>
+ <artifactId>jsoup</artifactId>
+ <version>1.7.2</version>
+ </dependency>
+
+ <dependency>
+ <groupId>commons-codec</groupId>
+ <artifactId>commons-codec</artifactId>
+ <version>1.4</version>
+ </dependency>
</dependencies>
<build>
diff --git a/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBot.java b/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBot.java
index ce7535f..7e8f563 100644
--- a/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBot.java
+++ b/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBot.java
@@ -1,163 +1,63 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
package org.rhq.etc.ircbot;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.j2bugzilla.base.Bug;
-import com.j2bugzilla.base.BugzillaConnector;
-import com.j2bugzilla.base.BugzillaException;
-import com.j2bugzilla.rpc.GetBug;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.KeyManagementException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.CertificateException;
+import java.util.Properties;
+
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.TrustManagerFactory;
-import org.apache.xmlrpc.XmlRpcException;
import org.pircbotx.PircBotX;
-import org.pircbotx.User;
-import org.pircbotx.hooks.Listener;
-import org.pircbotx.hooks.ListenerAdapter;
-import org.pircbotx.hooks.events.DisconnectEvent;
-import org.pircbotx.hooks.events.MessageEvent;
-import org.pircbotx.hooks.events.PrivateMessageEvent;
/**
- * An IRC bot for doing helpful stuff on the Freenode #rhq channel.
+ * @author Jirka Kremser
*
- * @author Ian Springer
*/
-public class RhqIrcBot extends ListenerAdapter {
-
- private static final Pattern BUG_PATTERN = Pattern.compile("(?i)(bz|bug)[ ]*(\\d{6,7})");
- private static final Pattern ECHO_PATTERN = Pattern.compile("(?i)echo[ ]+(.+)");
-
- private static final Set<String> JON_DEVS = new HashSet<String>();
- static {
- JON_DEVS.add("ccrouch");
- JON_DEVS.add("ips");
- JON_DEVS.add("jkremser");
- JON_DEVS.add("jsanda");
- JON_DEVS.add("jshaughn");
- JON_DEVS.add("lkrejci");
- JON_DEVS.add("mazz");
- JON_DEVS.add("mtho11");
- JON_DEVS.add("pilhuhn");
- JON_DEVS.add("spinder");
- JON_DEVS.add("stefan_n");
- }
-
- private String server;
- private String channel;
- private BugzillaConnector bzConnector = new BugzillaConnector();
- private Map<Integer, Long> bugLogTimestamps = new HashMap<Integer, Long>();
-
- public RhqIrcBot(String server, String channel) {
- this.server = server;
- this.channel = channel;
- }
-
- @Override
- public void onMessage(MessageEvent event) throws Exception {
- PircBotX bot = event.getBot();
- if (!bot.getNick().equals(bot.getName())) {
- bot.changeNick(bot.getName());
- }
-
- String message = event.getMessage();
- Matcher bugMatcher = BUG_PATTERN.matcher(message);
- while (bugMatcher.find()) {
- int bugId = Integer.valueOf(bugMatcher.group(2));
- GetBug getBug = new GetBug(bugId);
- try {
- bzConnector.executeMethod(getBug);
- } catch (Exception e) {
- bzConnector = new BugzillaConnector();
- bzConnector.connectTo("https://bugzilla.redhat.com");
- try {
- bzConnector.executeMethod(getBug);
- } catch (BugzillaException e1) {
- //e1.printStackTrace();
- Throwable cause = e1.getCause();
- String details = (cause instanceof XmlRpcException) ? cause.getMessage() : e1.getMessage();
- bot.sendMessage(event.getChannel(), "Failed to access BZ " + bugId + ": " + details);
- continue;
- }
- }
- Bug bug = getBug.getBug();
- if (bug != null) {
- String product = bug.getProduct();
- if (product.equals("RHQ Project")) {
- product = "RHQ";
- } else if (product.equals("JBoss Operations Network")) {
- product = "JON";
- }
- Long timestamp = bugLogTimestamps.get(bugId);
- if ((timestamp == null) || ((System.currentTimeMillis() - timestamp) > (5 * 60 * 1000L))) {
- bot.sendMessage(event.getChannel(), "BZ " + bugId + " [product=" + product
- + ", priority=" + bug.getPriority() + ", status=" + bug.getStatus() + "] "
- + bug.getSummary() + " [ https://bugzilla.redhat.com/" + bugId + " ]");
- }
- bugLogTimestamps.put(bugId, System.currentTimeMillis());
- } else {
- bot.sendMessage(event.getChannel(), "BZ " + bugId + " does not exist.");
- }
- }
-
- if (message.matches(".*\\b(jon-team|jboss-on-team)\\b.*")) {
- Set<User> users = bot.getUsers(event.getChannel());
- StringBuilder presentJonDevs = new StringBuilder();
- for (User user : users) {
- String nick = user.getNick();
- if (JON_DEVS.contains(nick) && !nick.equals(event.getUser().getNick())) {
- presentJonDevs.append(nick).append(' ');
- }
- }
- bot.sendMessage(event.getChannel(), presentJonDevs + ": see message from "
- + event.getUser().getNick() + " above");
- }
+public class RhqIrcBot extends PircBotX {
+
+ private static final String TRUSTSTORE_NAME = "cacerts.jks";
+
+ public RhqIrcBot(RhqIrcBotListener rhqBot) {
+ setName("rhq-bot");
+ setVersion("1.1");
+ setFinger("RHQ IRC bot (source code in RHQ git under etc/rhq-ircBot/)");
+
+ setVerbose(true);
+ setAutoNickChange(true);
+
+ getListenerManager().addListener(rhqBot);
+ setSocketTimeout(1 * 60 * 1000); // 1 minute
}
-
- @Override
- public void onPrivateMessage(PrivateMessageEvent privateMessageEvent) throws Exception {
- PircBotX bot = privateMessageEvent.getBot();
- String message = privateMessageEvent.getMessage();
- Matcher echoMatcher = ECHO_PATTERN.matcher(message);
- if (echoMatcher.matches()) {
- String echoMessage = echoMatcher.group(1);
- bot.sendMessage(this.channel, echoMessage);
- } else {
- bot.sendMessage(privateMessageEvent.getUser(), "Hi, I am " + bot.getFinger() + ".");
- }
- // TODO: Implement a HELP command.
- }
-
- @Override
- public void onDisconnect(DisconnectEvent disconnectEvent) throws Exception {
- boolean connected = false;
- while (!connected) {
- Thread.sleep(60 * 1000L); // 1 minute
- try {
- PircBotX newBot = createBot(this);
- newBot.connect(this.server);
- newBot.joinChannel(this.channel);
-
- connected = true;
- } catch (Exception e) {
- System.err.println("Failed to reconnect to " + disconnectEvent.getBot().getServer() + " IRC server: " + e);
- }
- }
-
- // Try to clean up the old bot, so it can release any memory, sockets, etc. it's using.
- PircBotX oldBot = disconnectEvent.getBot();
- Set<Listener> oldListeners = new HashSet<Listener>(oldBot.getListenerManager().getListeners());
- for (Listener oldListener : oldListeners) {
- oldBot.getListenerManager().removeListener(oldListener);
- }
- }
-
+
public static void main(String[] args) throws Exception {
- if (args.length != 2) {
- System.err.println("Usage: RhqIrcBot IRC_SERVER IRC_CHANNEL");
+ if (args.length != 2 && args.length != 3) {
+ System.err.println("Usage: RhqIrcBot IRC_SERVER IRC_CHANNEL [rhq-ircBot.properties]");
System.err.println(" e.g.: RhqIrcBot irc.freenode.net '#rhq'");
System.exit(1);
}
@@ -167,26 +67,62 @@ public class RhqIrcBot extends ListenerAdapter {
channel = '#' + channel;
}
- RhqIrcBot rhqBot = new RhqIrcBot(server, channel);
+ RhqIrcBotListener rhqBotListener = new RhqIrcBotListener(server, channel);
+ if (args.length == 3) {
+ File propertyFile = new File(args[2]);
+ if (!propertyFile.exists()) {
+ System.err.println("Provided property file [" + args[2] + "] does not exist");
+ System.exit(2);
+ }
+ Properties properties = new Properties();
+ FileInputStream fis = new FileInputStream(propertyFile);
+ properties.load(fis);
+ String docspaceLogin = properties.getProperty("docspace_login");
+ String docspacePassword = properties.getProperty("docspace_password");
+ if (docspaceLogin == null || docspaceLogin.isEmpty() || docspacePassword == null || docspacePassword.isEmpty()) {
+ System.err.println("The property format has bad format");
+ System.err.println("It must contain following key-value pairs\n");
+ System.err.println("docspace_login=X");
+ System.err.println("docspace_password=Y");
+ System.exit(3);
+ }
+ fis.close();
+
+ setupTrustStore();
+
+ rhqBotListener.setDocspaceLogin(docspaceLogin);
+ rhqBotListener.setDocspacePassword(docspacePassword);
+ }
- PircBotX bot = createBot(rhqBot);
+ PircBotX bot = new RhqIrcBot(rhqBotListener);
bot.connect(server);
bot.joinChannel(channel);
}
-
- private static PircBotX createBot(RhqIrcBot rhqBot) {
- PircBotX bot = new PircBotX();
-
- bot.setName("rhq-bot");
- bot.setVersion("1.0");
- bot.setFinger("RHQ IRC bot (source code in RHQ git under etc/rhq-ircBot/)");
-
- bot.setVerbose(true);
- bot.setAutoNickChange(true);
-
- bot.getListenerManager().addListener(rhqBot);
- bot.setSocketTimeout(1 * 60 * 1000); // 1 minute
- return bot;
+
+ private static void setupTrustStore() {
+ TrustManagerFactory trustManagerFactory;
+ try {
+ trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
+ KeyStore keystore;
+ keystore = KeyStore.getInstance(KeyStore.getDefaultType());
+ InputStream keystoreStream = RhqIrcBot.class.getResourceAsStream(TRUSTSTORE_NAME);
+ keystore.load(keystoreStream, "rhqirc".toCharArray());
+ trustManagerFactory.init(keystore);
+ TrustManager[] trustManagers = trustManagerFactory.getTrustManagers();
+ SSLContext ctx = SSLContext.getInstance("SSL");
+ ctx.init(null, trustManagers, null);
+
+ SSLContext.setDefault(ctx);
+ } catch (NoSuchAlgorithmException e) {
+ e.printStackTrace();
+ } catch (KeyStoreException e) {
+ e.printStackTrace();
+ } catch (CertificateException e) {
+ e.printStackTrace();
+ } catch (IOException e) {
+ e.printStackTrace();
+ } catch (KeyManagementException e) {
+ e.printStackTrace();
+ }
}
-
}
diff --git a/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBotListener.java b/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBotListener.java
new file mode 100644
index 0000000..8a7e578
--- /dev/null
+++ b/etc/rhq-ircBot/src/main/java/org/rhq/etc/ircbot/RhqIrcBotListener.java
@@ -0,0 +1,357 @@
+package org.rhq.etc.ircbot;
+
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.j2bugzilla.base.Bug;
+import com.j2bugzilla.base.BugzillaConnector;
+import com.j2bugzilla.base.BugzillaException;
+import com.j2bugzilla.rpc.GetBug;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.xmlrpc.XmlRpcException;
+import org.jsoup.Jsoup;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+import org.pircbotx.PircBotX;
+import org.pircbotx.User;
+import org.pircbotx.hooks.Listener;
+import org.pircbotx.hooks.ListenerAdapter;
+import org.pircbotx.hooks.events.DisconnectEvent;
+import org.pircbotx.hooks.events.MessageEvent;
+import org.pircbotx.hooks.events.NickChangeEvent;
+import org.pircbotx.hooks.events.PrivateMessageEvent;
+
+/**
+ * An IRC bot for doing helpful stuff on the Freenode #rhq channel.
+ *
+ * @author Ian Springer
+ * @author Jiri Kremser
+ */
+public class RhqIrcBotListener extends ListenerAdapter<RhqIrcBot> {
+
+ private static final Pattern BUG_PATTERN = Pattern.compile("(?i)(bz|bug)[ ]*(\\d{6,7})");
+ private static final Pattern COMMIT_PATTERN = Pattern.compile("(?i)(\\!commit|cm)[ ]*([0-9a-f]{3,40})");
+ private static final Pattern ECHO_PATTERN = Pattern.compile("(?i)echo[ ]+(.+)");
+ private static final String SUPPORT_LINK = "https://docspace.corp.redhat.com/docs/DOC-124477";
+ private static final String COMMIT_LINK = "https://git.fedorahosted.org/cgit/rhq/rhq.git/commit/?id=%s";
+ private static final String PTO_LINK = "https://mail.corp.redhat.com/home/ccrouch@redhat.com/JBoss%20ON%20OOO?fmt...";
+ private static final DateFormat monthFormat = new SimpleDateFormat("MMM");
+ private static final DateFormat dayInMonthFormat = new SimpleDateFormat("d");
+
+ private static enum Command {
+
+ FORUM("Our forum is available from https://community.jboss.org/en/rhq?view=discussions", true),
+ HELP("You can use one of the following commands: ", true),
+ LISTS("Feel free to enroll to the user list https://lists.fedorahosted.org/mailman/listinfo/rhq-users"
+ + " or the devel list https://lists.fedorahosted.org/mailman/listinfo/rhq-devel", true),
+ LOGS("IRC logs are available from http://transcripts.jboss.org/channel/irc.freenode.org/%23rhq/index.html", true),
+ PTO,
+ SOURCE("The code could be viewed/cloned on https://github.com/rhq-project or https://git.fedorahosted.org/cgit/rhq/rhq.git/", true),
+ SUPPORT,
+ WIKI("Our wiki is available from https://docs.jboss.org/author/display/RHQ/Home", true);
+
+ public static final char PREFIX = '!';
+ private final String staticRespond;
+ private final boolean includeInHelp;
+
+ Command(String staticRespond, boolean includeInHelp) {
+ this.staticRespond = staticRespond;
+ this.includeInHelp = includeInHelp;
+ }
+
+ Command() {
+ this(null, false);
+ }
+ }
+
+ private static final Set<String> JON_DEVS = new HashSet<String>();
+ static {
+ JON_DEVS.add("ccrouch");
+ JON_DEVS.add("ips");
+ JON_DEVS.add("jkremser");
+ JON_DEVS.add("jsanda");
+ JON_DEVS.add("jshaughn");
+ JON_DEVS.add("lkrejci");
+ JON_DEVS.add("mazz");
+ JON_DEVS.add("mtho11");
+ JON_DEVS.add("pilhuhn");
+ JON_DEVS.add("spinder");
+ JON_DEVS.add("stefan_n");
+ JON_DEVS.add("tsegismont");
+ }
+
+ private final String server;
+ private final String channel;
+ private String docspaceLogin;
+ private String docspacePassword;
+ private BugzillaConnector bzConnector = new BugzillaConnector();
+ private final Map<Integer, Long> bugLogTimestamps = new HashMap<Integer, Long>();
+ private final Map<String, String> names = new HashMap<String, String>();
+ private final Pattern commandPattern;
+
+ public RhqIrcBotListener(String server, String channel) {
+ this.server = server;
+ this.channel = channel;
+ StringBuilder commandRegExp = new StringBuilder();
+ commandRegExp.append("(?i)\\").append(Command.PREFIX).append("[ ]*(");
+ for (Command command : Command.values()) {
+ commandRegExp.append(command.name()).append('|');
+ }
+ commandRegExp.deleteCharAt(commandRegExp.length() - 1);
+ commandRegExp.append(')');
+ commandPattern = Pattern.compile(commandRegExp.toString());
+ }
+
+ @Override
+ public void onMessage(MessageEvent<RhqIrcBot> event) throws Exception {
+ PircBotX bot = event.getBot();
+ if (!bot.getNick().equals(bot.getName())) {
+ bot.changeNick(bot.getName());
+ }
+
+ // react to BZs in the messages
+ String message = event.getMessage();
+ Matcher bugMatcher = BUG_PATTERN.matcher(message);
+ while (bugMatcher.find()) {
+ int bugId = Integer.valueOf(bugMatcher.group(2));
+ GetBug getBug = new GetBug(bugId);
+ try {
+ bzConnector.executeMethod(getBug);
+ } catch (Exception e) {
+ bzConnector = new BugzillaConnector();
+ bzConnector.connectTo("https://bugzilla.redhat.com");
+ try {
+ bzConnector.executeMethod(getBug);
+ } catch (BugzillaException e1) {
+ //e1.printStackTrace();
+ Throwable cause = e1.getCause();
+ String details = (cause instanceof XmlRpcException) ? cause.getMessage() : e1.getMessage();
+ bot.sendMessage(event.getChannel(), "Failed to access BZ " + bugId + ": " + details);
+ continue;
+ }
+ }
+ Bug bug = getBug.getBug();
+ if (bug != null) {
+ String product = bug.getProduct();
+ if (product.equals("RHQ Project")) {
+ product = "RHQ";
+ } else if (product.equals("JBoss Operations Network")) {
+ product = "JON";
+ }
+ Long timestamp = bugLogTimestamps.get(bugId);
+ if ((timestamp == null) || ((System.currentTimeMillis() - timestamp) > (5 * 60 * 1000L))) {
+ bot.sendMessage(
+ event.getChannel(),
+ "BZ " + bugId + " [product=" + product + ", priority=" + bug.getPriority() + ", status="
+ + bug.getStatus() + "] " + bug.getSummary() + " [ https://bugzilla.redhat.com/" + bugId
+ + " ]");
+ }
+ bugLogTimestamps.put(bugId, System.currentTimeMillis());
+ } else {
+ bot.sendMessage(event.getChannel(), "BZ " + bugId + " does not exist.");
+ }
+ }
+
+ // react to the commit hashs included in the messages
+ Matcher commitMatcher = COMMIT_PATTERN.matcher(message);
+ while (commitMatcher.find()) {
+ String shaHash = commitMatcher.group(2);
+ String response = String.format(COMMIT_LINK, shaHash);
+ bot.sendMessage(event.getChannel(), event.getUser().getNick() + ": " + response);
+ }
+
+ // react to commands included in the messages
+ Matcher commandMatcher = commandPattern.matcher(message);
+ while (commandMatcher.find()) {
+ Command command = Command.valueOf(commandMatcher.group(1).toUpperCase());
+ String response = prepareResponseForCommand(command);
+ bot.sendMessage(event.getChannel(), event.getUser().getNick() + ": " + response);
+ }
+
+ // ping JON devs
+ if (message.matches(".*\\b(jon-team|jboss-on-team)\\b.*")) {
+ Set<User> users = bot.getUsers(event.getChannel());
+ StringBuilder presentJonDevs = new StringBuilder();
+ for (User user : users) {
+ String nick = user.getNick();
+ if (JON_DEVS.contains(nick) && !nick.equals(event.getUser().getNick())) {
+ presentJonDevs.append(nick).append(' ');
+ }
+ }
+ bot.sendMessage(event.getChannel(), presentJonDevs + ": see message from " + event.getUser().getNick()
+ + " above");
+ }
+ }
+
+ @Override
+ public void onPrivateMessage(PrivateMessageEvent<RhqIrcBot> privateMessageEvent) throws Exception {
+ PircBotX bot = privateMessageEvent.getBot();
+ String message = privateMessageEvent.getMessage();
+ Matcher echoMatcher = ECHO_PATTERN.matcher(message);
+ if (echoMatcher.matches()) {
+ String echoMessage = echoMatcher.group(1);
+ bot.sendMessage(this.channel, echoMessage);
+ } else if (message.equalsIgnoreCase(Command.PREFIX + "listrenames")) {
+ //Generate a list of renames in the form of old1 changed to new1, old2 changed to new2, etc
+ StringBuilder users = new StringBuilder();
+ for (Map.Entry<String, String> curUser : names.entrySet()) {
+ users.append(curUser.getKey()).append(" changed to ").append(curUser.getValue()).append(", ");
+ }
+ String usersString = users.substring(0, users.length() - 3);
+ privateMessageEvent.respond("Renamed users: " + usersString);
+ } else {
+ boolean isCommand = false;
+ // react to commands included in the messages
+ Matcher commandMatcher = commandPattern.matcher(message);
+ while (commandMatcher.find()) {
+ Command command = Command.valueOf(commandMatcher.group(1).toUpperCase());
+ String response = prepareResponseForCommand(command);
+ bot.sendMessage(privateMessageEvent.getUser(), response);
+ }
+ if (!isCommand) {
+ bot.sendMessage(privateMessageEvent.getUser(), "Hi, I am " + bot.getFinger() + ".\n"
+ + prepareResponseForCommand(Command.HELP));
+ }
+ }
+ }
+
+ @Override
+ public void onDisconnect(DisconnectEvent<RhqIrcBot> disconnectEvent) throws Exception {
+ boolean connected = false;
+ while (!connected) {
+ Thread.sleep(60 * 1000L); // 1 minute
+ try {
+ PircBotX newBot = new RhqIrcBot(this);
+ newBot.connect(this.server);
+ newBot.joinChannel(this.channel);
+
+ connected = true;
+ } catch (Exception e) {
+ System.err.println("Failed to reconnect to " + disconnectEvent.getBot().getServer() + " IRC server: "
+ + e);
+ }
+ }
+
+ // Try to clean up the old bot, so it can release any memory, sockets, etc. it's using.
+ PircBotX oldBot = disconnectEvent.getBot();
+ Set<Listener> oldListeners = new HashSet<Listener>(oldBot.getListenerManager().getListeners());
+ for (Listener oldListener : oldListeners) {
+ oldBot.getListenerManager().removeListener(oldListener);
+ }
+ }
+
+ @Override
+ public void onNickChange(NickChangeEvent<RhqIrcBot> event) throws Exception {
+ //Store the result
+ names.put(event.getOldNick(), event.getNewNick());
+ }
+
+ private String prepareResponseForCommand(Command command) {
+ if (command.staticRespond != null) {
+ String response = command.staticRespond;
+ if (command == Command.HELP) {
+ for (Command com : Command.values()) {
+ if (com.includeInHelp) {
+ response += Command.PREFIX + com.toString().toLowerCase() + " ";
+ }
+ }
+ }
+ return response;
+ }
+ switch (command) {
+ case SUPPORT:
+ return whoIsOnSupport(SUPPORT_LINK);
+ case PTO:
+ return whoIsOnPto(PTO_LINK);
+ default:
+ System.err.println("Unknown command:" + command);
+ break;
+ }
+ return null;
+ }
+
+ private String whoIsOnSupport(String link) {
+ if (docspaceLogin == null || docspaceLogin.isEmpty() || docspacePassword == null || docspacePassword.isEmpty()) {
+ return "This command is not supported.";
+ }
+ String month = monthFormat.format(new Date());
+ String dayInMonth = dayInMonthFormat.format(new Date());
+ int dayInMonthInt = Integer.parseInt(dayInMonth);
+ try {
+ boolean monthFound = false;
+ String login = docspaceLogin + ":" + docspacePassword;
+ String base64login = new String(Base64.encodeBase64(login.getBytes()));
+ Document doc = Jsoup.connect(link).header("Authorization", "Basic " + base64login).get();
+ Elements cells = doc.select("tr td");
+ for (Element cell : cells) {
+ String cellText = cell.text().toLowerCase();
+ if (cellText.startsWith(month.toLowerCase())) {
+ monthFound = true;
+ if (cellText.substring(cellText.length() - 1, cellText.length()).equals(dayInMonth)) {
+ return cell.firstElementSibling().text() + " is on support this week";
+ }
+ continue;
+ }
+ if (monthFound && cellText.equals(dayInMonth)) {
+ return cell.firstElementSibling().text() + " is on support this week";
+ } else if (monthFound) {
+ if (cell.equals(cell.firstElementSibling()) || cell.equals(cell.lastElementSibling())) {
+ continue; //the first row with name or the last row with a comment
+ }
+ int day = -1;
+ try {
+ day = Integer.parseInt(cellText);
+ if (day > dayInMonthInt) {
+ return cell.parent().previousElementSibling().child(0).text() + " is on support this week";
+ }
+ } catch (NumberFormatException nfe) {
+ break; // next month
+ }
+ }
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ // fallback solution if SSL is not set correctly
+ String randomDevel = JON_DEVS.toArray(new String[JON_DEVS.size()])[new Random().nextInt(JON_DEVS.size())];
+ return "404 Developer Not Found, selecting randomly " + randomDevel + ". Check the " + SUPPORT_LINK;
+ }
+
+ private static String whoIsOnPto(String link) {
+ String month = monthFormat.format(new Date());
+ String dayInMonth = dayInMonthFormat.format(new Date());
+ try {
+ String onPto = "";
+ Document doc = Jsoup.connect(link).ignoreContentType(true).get();
+ Elements dates = doc.getElementsContainingOwnText(dayInMonth + " " + month);
+ for (Element date : dates) {
+ onPto += date.firstElementSibling().text() + ", ";
+ }
+ if (!onPto.isEmpty()) {
+ return onPto.substring(0, onPto.length() - 2);
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return "noone is on PTO today";
+ }
+
+ public void setDocspaceLogin(String docspaceLogin) {
+ this.docspaceLogin = docspaceLogin;
+ }
+
+ public void setDocspacePassword(String docspacePassword) {
+ this.docspacePassword = docspacePassword;
+ }
+}
diff --git a/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks b/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks
new file mode 100644
index 0000000..3d73cbc
Binary files /dev/null and b/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/cacerts.jks differ
diff --git a/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/rhq-ircbot.properties b/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/rhq-ircbot.properties
new file mode 100644
index 0000000..25e325c
--- /dev/null
+++ b/etc/rhq-ircBot/src/main/resources/org/rhq/etc/ircbot/rhq-ircbot.properties
@@ -0,0 +1,2 @@
+docspace_login=
+docspace_password=
11 years
[rhq] Changes to 'lkrejci/configuration-builder'
by lkrejci
New branch 'lkrejci/configuration-builder' available with the following commits:
commit 20beed909fc2a647434244bd5bcb00839a683606
Author: Lukas Krejci <lkrejci(a)redhat.com>
Date: Wed May 22 19:24:00 2013 +0200
A configuration instance builder.
This should greatly simplify the process of creating complex configuration
objects.
11 years
[rhq] Branch 'feature/cassandra-backend' - 11 commits - .classpath modules/common modules/core modules/enterprise modules/plugins
by Jay Shaughnessy
.classpath | 2
modules/common/cassandra-ccm/cassandra-ccm-arquillian/pom.xml | 2
modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java | 131 ++++++----
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java | 51 ++-
modules/core/dbutils/pom.xml | 2
modules/core/native-system/src/main/java/org/rhq/core/system/JavaSystemInfo.java | 1
modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecution.java | 32 +-
modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecutionResults.java | 56 ++--
modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutor.java | 10
modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutorResults.java | 56 ++--
modules/enterprise/agent/src/etc/java-service-wrapper/rhq-agent-wrapper.conf | 5
modules/enterprise/agent/src/etc/rhq-agent.bat | 3
modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml | 14 -
modules/enterprise/server/ear/pom.xml | 5
modules/enterprise/server/itests-2/pom.xml | 2
modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java | 107 ++++++--
modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.xml | 78 ++++-
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/discovery/DiscoveryBossBean.java | 58 +++-
modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java | 42 +--
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java | 4
20 files changed, 444 insertions(+), 217 deletions(-)
New commits:
commit 2597e0e38686ff127302539d405189870c01242b
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Wed May 22 13:02:50 2013 -0400
Fix artifact name for storage plugin
diff --git a/modules/enterprise/server/ear/pom.xml b/modules/enterprise/server/ear/pom.xml
index a23298a..36e92e1 100644
--- a/modules/enterprise/server/ear/pom.xml
+++ b/modules/enterprise/server/ear/pom.xml
@@ -421,7 +421,7 @@
</artifactItem>
<artifactItem>
<groupId>${project.groupId}</groupId>
- <artifactId>rhq-storage-plugin</artifactId>
+ <artifactId>rhq-rhqstorage-plugin</artifactId>
<version>${project.version}</version>
</artifactItem>
</artifactItems>
commit 1a72eab200bcc460f4e009eeb5cfb52af3d6c253
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Tue May 21 17:57:02 2013 -0400
add rhq storage plugin to ear
diff --git a/modules/enterprise/server/ear/pom.xml b/modules/enterprise/server/ear/pom.xml
index 9d2f7f3..a23298a 100644
--- a/modules/enterprise/server/ear/pom.xml
+++ b/modules/enterprise/server/ear/pom.xml
@@ -419,6 +419,11 @@
<artifactId>rhq-cassandra-plugin</artifactId>
<version>${project.version}</version>
</artifactItem>
+ <artifactItem>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-storage-plugin</artifactId>
+ <version>${project.version}</version>
+ </artifactItem>
</artifactItems>
<outputDirectory>${earDirectory}/rhq-downloads/rhq-plugins</outputDirectory>
</configuration>
commit aac3ec9cef8b317619c09b33d22ff61112d69465
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Tue May 21 17:43:15 2013 -0400
Fix discovery issues
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
index cd5e64e..596d10e 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
@@ -119,7 +119,7 @@ public class CassandraNodeDiscoveryComponent extends JMXDiscoveryComponent {
String[] jmxPortArg = arg.split("=");
jmxPort = jmxPortArg[1];
}
- if (arg.startsWith("-cp")) {
+ if (arg.startsWith("-cp") || (arg.startsWith("-classpath"))) {
classpathIndex = i;
}
@@ -130,7 +130,7 @@ public class CassandraNodeDiscoveryComponent extends JMXDiscoveryComponent {
pluginConfig.put(new PropertySimple("commandLine", commandLineBuilder.toString()));
if (classpathIndex != -1 && classpathIndex + 1 < arguments.length) {
- String[] classpathEntries = arguments[classpathIndex + 1].split(":");
+ String[] classpathEntries = arguments[classpathIndex + 1].split(File.pathSeparator);
File yamlConfigurationPath = null;
for (String classpathEntry : classpathEntries) {
commit deea3c3e324e23a8a0ec3dc769b3e6b289dbd62c
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Tue May 21 15:43:10 2013 -0400
turn off default use of FilePreferences for agent wrapper
diff --git a/modules/enterprise/agent/src/etc/java-service-wrapper/rhq-agent-wrapper.conf b/modules/enterprise/agent/src/etc/java-service-wrapper/rhq-agent-wrapper.conf
index 132a7b8..1fffe6a 100644
--- a/modules/enterprise/agent/src/etc/java-service-wrapper/rhq-agent-wrapper.conf
+++ b/modules/enterprise/agent/src/etc/java-service-wrapper/rhq-agent-wrapper.conf
@@ -70,8 +70,9 @@ wrapper.java.additional.3=-Xmx128m
wrapper.java.additional.4=-Di18nlog.dump-stack-traces=false
wrapper.java.additional.5=-Dsigar.nativeLogging=false
wrapper.java.additional.6="-Djava.endorsed.dirs=%RHQ_AGENT_HOME%/lib/endorsed"
-wrapper.java.additional.7="-Djava.util.prefs.PreferencesFactory=org.rhq.core.util.preferences.FilePreferencesFactory"
-wrapper.java.additional.8="-Drhq.preferences.file=%RHQ_AGENT_HOME%/conf/agent-prefs.properties"
+# To use the file preferences as opposed to the registry, uncomment the next two lines.
+# wrapper.java.additional.7="-Djava.util.prefs.PreferencesFactory=org.rhq.core.util.preferences.FilePreferencesFactory"
+# wrapper.java.additional.8="-Drhq.preferences.file=%RHQ_AGENT_HOME%/conf/agent-prefs.properties"
# We want to make sure the agent starts in its install directory (quotes not needed)
wrapper.working.dir=%RHQ_AGENT_HOME%
diff --git a/modules/enterprise/agent/src/etc/rhq-agent.bat b/modules/enterprise/agent/src/etc/rhq-agent.bat
index f94603c..ee81cc8 100644
--- a/modules/enterprise/agent/src/etc/rhq-agent.bat
+++ b/modules/enterprise/agent/src/etc/rhq-agent.bat
@@ -175,7 +175,8 @@ if not defined RHQ_AGENT_MAINCLASS (
set RHQ_AGENT_MAINCLASS=org.rhq.enterprise.agent.AgentMain
)
-rem note - currently not using custom Java Prefs as the default, use commented command line to activate
+rem note - currently not using custom Java Prefs as the default, use commented command line to activate. If installing
+rem note - the agent as a windows service, you must also uncomment lines in wrapper/rhq-agent-wrapper.conf.
rem set CMD="%RHQ_AGENT_JAVA_EXE_FILE_PATH%" %_JAVA_ENDORSED_DIRS_OPT% %_JAVA_LIBRARY_PATH_OPT% %_JAVA_PREFERENCES_FACTORY_OPT% %RHQ_AGENT_JAVA_OPTS% %RHQ_AGENT_ADDITIONAL_JAVA_OPTS% %_LOG_CONFIG% -cp "%CLASSPATH%" %RHQ_AGENT_MAINCLASS% %RHQ_AGENT_CMDLINE_OPTS%
set CMD="%RHQ_AGENT_JAVA_EXE_FILE_PATH%" %_JAVA_ENDORSED_DIRS_OPT% %_JAVA_LIBRARY_PATH_OPT% %RHQ_AGENT_JAVA_OPTS% %RHQ_AGENT_ADDITIONAL_JAVA_OPTS% %_LOG_CONFIG% -cp "%CLASSPATH%" %RHQ_AGENT_MAINCLASS% %RHQ_AGENT_CMDLINE_OPTS%
commit 7b03bd4664c63d3a3ec950b976695b8d4226bf31
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Tue May 21 09:50:02 2013 -0400
Add support for server itests on Windows. Arquillian+Surefire+Cassandra+Windows
did not like the way we used the suite deployment extension as a hook for
starting and stopping a test cassandra cluster. The spawned cassandra
process seems to confuse things such that the arquillian process and its
deployment (eap) process would hang, and the surefire report would
not be generated. Likely due to the fact that Windows does not deal with
"background" processes the same way as linux. This was after already trying
to trim the test cluster from 2 nodes to 1 node, for similar process-related
issues.
So, on Windows we have decided that to run the i-tests you must use an external
storage node (for example, a dev container instance, or a test product install).
To do this you specify:
-Ditest.use-external-storage-node=true
By default the external storage node seed is 127.0.0.1|7199|9042. To specify the
a different storage node upi can specify:
-Drhq.cassandra.seeds="host|jmxport|cqlport"
This should also work for linux, if desired.
Also:
- Cassandra on Windows will not generate the pid files that were in
place to handle the killing of the test cassandra. So, I added an enhancement
to the ProcessExecution utilities to return the actual Process, which is now
used to destroy the spawned cassandra processes. Note that did not end up
being used in the final windows solution. But I left it as is, because I think
even for linux it's more simple and safer than the pid file approach.
- Added autoimport of "RHQ Storage Node"
- Added test for autoimport of "RHQ Storage Node"
- Also, for windows, I had changed around the way we hooked the storage node
handling into CCMSuiteDeploymentExtension. Again, not used in the final
solution but I think it's a bit safer/clearer so I'll leave as is.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java
index 8bd8206..e6ac0cb 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java
@@ -39,6 +39,8 @@ import org.jboss.arquillian.container.spi.event.DeployDeployment;
import org.jboss.arquillian.container.spi.event.DeploymentEvent;
import org.jboss.arquillian.container.spi.event.UnDeployDeployment;
import org.jboss.arquillian.container.spi.event.container.AfterStart;
+import org.jboss.arquillian.container.spi.event.container.AfterStop;
+import org.jboss.arquillian.container.spi.event.container.BeforeStart;
import org.jboss.arquillian.container.spi.event.container.BeforeStop;
import org.jboss.arquillian.container.test.impl.client.deployment.event.GenerateDeployment;
import org.jboss.arquillian.core.api.Event;
@@ -75,22 +77,8 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
private Class<?> deploymentClass;
private DeploymentScenario suiteDeploymentScenario;
-
private CassandraClusterManager ccm;
- public SuiteDeployer() {
- File basedir = new File("target");
- File clusterDir = new File(basedir, "cassandra");
-
- DeploymentOptionsFactory factory = new DeploymentOptionsFactory();
- DeploymentOptions options = factory.newDeploymentOptions();
- options.setClusterDir(clusterDir.getAbsolutePath());
- options.setUsername("cassandra");
- options.setPassword("cassandra");
-
- ccm = new CassandraClusterManager(options);
- }
-
@Inject
@ClassScoped
private InstanceProducer<DeploymentScenario> classDeploymentScenario;
@@ -102,30 +90,13 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
private Event<GenerateDeployment> generateDeploymentEvent;
@Inject
- // Active some form of ClassContext around our deployments due to assumption bug in AS7 extension.
+ // Active some form of ClassContext around our deployments due to assumption bug in AS7 extension.
private Instance<ClassContext> classContext;
public void startup(@Observes(precedence = -100)
ManagerStarted event, ArquillianDescriptor descriptor) {
deploymentClass = getDeploymentClass(descriptor);
- List<StorageNode> nodes = ccm.createCluster();
- ccm.startCluster(false);
- try {
- ClusterInitService clusterInitService = new ClusterInitService();
- clusterInitService.waitForClusterToStart(nodes, nodes.size(), 1500, 20, 5);
-
- SchemaManager schemaManager = new SchemaManager("cassandra", "cassandra", nodes);
- if (!schemaManager.schemaExists()) {
- schemaManager.createSchema();
- }
- schemaManager.updateSchema();
- schemaManager.shutdown();
- } catch (Exception e) {
- ccm.shutdownCluster();
- throw new RuntimeException("Cassandra cluster initialization failed", e);
- }
-
executeInClassScope(new Callable<Void>() {
public Void call() throws Exception {
generateDeploymentEvent.fire(new GenerateDeployment(new TestClass(deploymentClass)));
@@ -135,20 +106,79 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
});
}
- public void deploy(@Observes
- final AfterStart event, final ContainerRegistry registry) {
+ public void initCassandra(@Observes(precedence = -100)
+ final BeforeStart event, ArquillianDescriptor descriptor) {
+
executeInClassScope(new Callable<Void>() {
public Void call() throws Exception {
+
+ SchemaManager schemaManager;
+
+ if (!Boolean.valueOf(System.getProperty("itest.use-external-storage-node", "false"))) {
+
+ DeploymentOptionsFactory factory = new DeploymentOptionsFactory();
+ DeploymentOptions options = factory.newDeploymentOptions();
+ File basedir = new File("target");
+ File clusterDir = new File(basedir, "cassandra");
+
+ options.setUsername("cassandra");
+ options.setPassword("cassandra");
+ options.setClusterDir(clusterDir.getAbsolutePath());
+
+ ccm = new CassandraClusterManager(options);
+ List<StorageNode> nodes = ccm.createCluster();
+
+ ccm.startCluster(false);
+
+ try {
+ ClusterInitService clusterInitService = new ClusterInitService();
+ clusterInitService.waitForClusterToStart(nodes, nodes.size(), 1500, 20, 5);
+ schemaManager = new SchemaManager("cassandra", "cassandra", nodes);
+
+ } catch (Exception e) {
+ if (null != ccm) {
+ ccm.shutdownCluster();
+ }
+ throw new RuntimeException("Cassandra cluster initialization failed", e);
+ }
+ } else {
+ try {
+ String seed = System.getProperty("rhq.cassandra.seeds", "127.0.0.1|7199|9042");
+ schemaManager = new SchemaManager("cassandra", "cassandra", seed);
+
+ } catch (Exception e) {
+ throw new RuntimeException("External Cassandra initialization failed", e);
+ }
+ }
+
try {
- for (Deployment d : suiteDeploymentScenario.deployments()) {
- deploymentEvent.fire(new DeployDeployment(findContainer(registry,
- event.getDeployableContainer()), d));
+ if (!schemaManager.schemaExists()) {
+ schemaManager.createSchema();
}
- return null;
+ schemaManager.updateSchema();
+ schemaManager.shutdown();
+
} catch (Exception e) {
- ccm.shutdownCluster();
- throw e;
+ if (null != ccm) {
+ ccm.shutdownCluster();
+ }
+ throw new RuntimeException("Cassandra schema initialization failed", e);
}
+
+ return null;
+ }
+ });
+ }
+
+ public void deploy(@Observes
+ final AfterStart event, final ContainerRegistry registry) {
+ executeInClassScope(new Callable<Void>() {
+ public Void call() throws Exception {
+ for (Deployment d : suiteDeploymentScenario.deployments()) {
+ deploymentEvent.fire(new DeployDeployment(findContainer(registry,
+ event.getDeployableContainer()), d));
+ }
+ return null;
}
});
}
@@ -164,12 +194,25 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
return null;
}
});
- ccm.shutdownCluster();
+ }
+
+ public void shutdownCassandra(@Observes
+ final AfterStop event, ArquillianDescriptor descriptor) {
+ executeInClassScope(new Callable<Void>() {
+ public Void call() throws Exception {
+
+ if (null != ccm) {
+ ccm.shutdownCluster();
+ }
+
+ return null;
+ }
+ });
}
public void overrideBefore(@Observes
EventContext<BeforeClass> event) {
- // Don't continue TestClass's BeforeClass context as normal.
+ // Don't continue TestClass's BeforeClass context as normal.
// No DeploymentGeneration or Deploy will take place.
classDeploymentScenario.set(suiteDeploymentScenario);
@@ -177,7 +220,7 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
public void overrideAfter(@Observes
EventContext<AfterClass> event) {
- // Don't continue TestClass's AfterClass context as normal.
+ // Don't continue TestClass's AfterClass context as normal.
// No UnDeploy will take place.
}
@@ -217,4 +260,4 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
}
}
}
-}
+}
\ No newline at end of file
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
index 9a25532..222406d 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
@@ -25,17 +25,16 @@
package org.rhq.cassandra;
-import static java.util.Arrays.asList;
import static org.rhq.core.util.StringUtil.collectionToString;
import java.io.ByteArrayInputStream;
import java.io.File;
-import java.io.FileReader;
import java.io.IOException;
-import java.io.StringWriter;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
@@ -49,7 +48,6 @@ import org.rhq.core.system.ProcessExecutionResults;
import org.rhq.core.system.SystemInfo;
import org.rhq.core.system.SystemInfoFactory;
import org.rhq.core.util.file.FileUtil;
-import org.rhq.core.util.stream.StreamUtil;
/**
* @author John Sanda
@@ -60,6 +58,7 @@ public class CassandraClusterManager {
private DeploymentOptions deploymentOptions;
private List<File> installedNodeDirs = new ArrayList<File>();
+ private Map<Integer, Process> nodeProcessMap = new HashMap<Integer, Process>();
public CassandraClusterManager() {
this(new DeploymentOptionsFactory().newDeploymentOptions());
@@ -85,8 +84,8 @@ public class CassandraClusterManager {
public List<StorageNode> createCluster() {
if (log.isDebugEnabled()) {
- log.debug("Installing embedded " + deploymentOptions.getNumNodes() + " node cluster to " +
- deploymentOptions.getClusterDir());
+ log.debug("Installing embedded " + deploymentOptions.getNumNodes() + " node cluster to "
+ + deploymentOptions.getClusterDir());
} else {
log.info("Installing embedded cluster");
}
@@ -124,6 +123,7 @@ public class CassandraClusterManager {
nodeOptions.load();
Deployer deployer = new Deployer();
deployer.setDeploymentOptions(nodeOptions);
+
deployer.unzipDistro();
deployer.applyConfigChanges();
deployer.updateFilePerms();
@@ -141,7 +141,7 @@ public class CassandraClusterManager {
}
}
try {
- FileUtil.writeFile(new ByteArrayInputStream(new byte[]{0}), installedMarker);
+ FileUtil.writeFile(new ByteArrayInputStream(new byte[] { 0 }), installedMarker);
} catch (IOException e) {
log.warn("Failed to write installed file marker to " + installedMarker, e);
}
@@ -150,14 +150,23 @@ public class CassandraClusterManager {
private Set<String> calculateLocalIPAddresses(int numNodes) {
Set<String> addresses = new HashSet<String>();
+
for (int i = 1; i <= numNodes; ++i) {
addresses.add(getLocalIPAddress(i));
}
+
return addresses;
}
private String getLocalIPAddress(int i) {
- return "127.0.0." + i;
+ Set<String> addresses = new HashSet<String>();
+ String seeds = deploymentOptions.getSeeds();
+
+ if (null == seeds || seeds.isEmpty()) {
+ return "127.0.0." + i;
+ }
+
+ return seeds.split(",")[i - 1];
}
private List<StorageNode> calculateNodes() {
@@ -199,6 +208,8 @@ public class CassandraClusterManager {
ProcessExecutionResults results = startNode(nodeDir);
if (results.getError() != null) {
log.warn("An unexpected error occurred while starting the node at " + nodeDir, results.getError());
+ } else {
+ nodeProcessMap.put(nodeId, results.getProcess());
}
}
long end = System.currentTimeMillis();
@@ -220,7 +231,6 @@ public class CassandraClusterManager {
}
ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript);
- startScriptExe.setArguments(asList("-p", "cassandra.pid"));
startScriptExe.setWaitForCompletion(0);
ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe);
@@ -251,7 +261,15 @@ public class CassandraClusterManager {
log.warn("No shutdown to perform. " + nodeDir + " does not exist.");
continue;
}
- killNode(nodeDir);
+
+ Process nodeProcess = nodeProcessMap.get(nodeId);
+ if (null != nodeProcess) {
+ try {
+ nodeProcess.destroy();
+ } catch (Throwable t) {
+ log.warn("Failed to kill Cassandra node " + nodeDir, t);
+ }
+ }
} catch (Exception e) {
log.warn("An error occurred trying to shutdown node at " + nodeDir);
}
@@ -266,17 +284,4 @@ public class CassandraClusterManager {
return nodeIds;
}
- private void killNode(File nodeDir) throws Exception {
- long pid = getPid(nodeDir);
- CLibrary.kill((int) pid, 9);
- }
-
- private long getPid(File nodeDir) throws IOException {
- File binDir = new File(nodeDir, "bin");
- StringWriter writer = new StringWriter();
- StreamUtil.copy(new FileReader(new File(binDir, "cassandra.pid")), writer);
-
- return Long.parseLong(writer.getBuffer().toString());
- }
-
}
diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml
index 76f4e1f..11eb831 100644
--- a/modules/core/dbutils/pom.xml
+++ b/modules/core/dbutils/pom.xml
@@ -280,7 +280,7 @@
self.log('PERFORMING STORAGE NODE SETUP TO LATEST SCHEMA')
username = project.getProperty('rhq.dev.cassandra.username') ?: "cassandra"
password = project.getProperty('rhq.dev.cassandra.password') ?: "cassandra"
- seeds = project.getProperty('rhq.dev.cassandra.seeds') ?: "127.0.0.1|9160|9142"
+ seeds = project.getProperty('rhq.dev.cassandra.seeds') ?: "127.0.0.1|7199|9142"
schemaManager = new SchemaManager(username, password, seeds)
schemaManager.resetSchema()
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java
index e4dbe18..c4df956 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java
@@ -98,6 +98,10 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
private ResourceType serviceType2;
+ private ResourceType storagePlatformType;
+
+ private ResourceType storageServerType;
+
private Agent agent;
private TestServerCommunicationsService agentServiceContainer;
@@ -117,6 +121,9 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
serviceType2 = getEntityManager().find(ResourceType.class, 15644);
agent = getEntityManager().find(Agent.class, 15641);
+ storagePlatformType = getEntityManager().find(ResourceType.class, 15651);
+ storageServerType = getEntityManager().find(ResourceType.class, 15652);
+
agentServiceContainer = prepareForTestAgents();
agentServiceContainer.discoveryService = Mockito.mock(DiscoveryAgentService.class);
when(
@@ -454,6 +461,39 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
assertEquals(platformSyncInfo.getChildSyncInfos().size(), 0); // notice there are no server children now
}
+ @Test(groups = "integration.ejb3")
+ public void testAutoImportStorageNode() throws Exception {
+
+ // create an inventory report for a storage node
+ InventoryReport inventoryReport = new InventoryReport(agent);
+
+ Resource storagePlatform = new Resource(prefix("storagePlatform"), prefix("storagePlatform"),
+ storagePlatformType);
+ storagePlatform.setUuid(String.valueOf(new Random().nextInt()));
+
+ Resource storageNode = new Resource(prefix("storageNode"), prefix("storageNode"), storageServerType);
+ storageNode.setUuid(String.valueOf(new Random().nextInt()));
+ storagePlatform.addChildResource(storageNode);
+
+ inventoryReport.addAddedRoot(storagePlatform);
+
+ // Merge this inventory report
+ MergeInventoryReportResults mergeResults = discoveryBoss.mergeInventoryReport(serialize(inventoryReport));
+ assert mergeResults != null;
+ assert mergeResults.getIgnoredResourceTypes() == null : "nothing should have been ignored";
+ ResourceSyncInfo platformSyncInfo = mergeResults.getResourceSyncInfo();
+ assert platformSyncInfo != null;
+
+ // Check merge result
+ assertEquals(InventoryStatus.COMMITTED, platformSyncInfo.getInventoryStatus());
+ assertEquals(storagePlatform.getChildResources().size(), platformSyncInfo.getChildSyncInfos().size());
+
+ storageNode = resourceManager.getResourceById(subjectManager.getOverlord(), platformSyncInfo
+ .getChildSyncInfos().iterator().next().getId());
+ assertNotNull(storageNode);
+ assertEquals(InventoryStatus.COMMITTED, storageNode.getInventoryStatus());
+ }
+
/**
* Use this to fake like your remoting objects. Can be used to keep your own copy of objects locally transient.
*
@@ -500,8 +540,11 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
Query q;
List<?> doomed;
- q = em.createQuery("SELECT r FROM Resource r WHERE r.resourceType.name LIKE '" + getPrefix()
- + "%' ORDER BY r.id DESC");
+ q = em.createQuery("" //
+ + "SELECT r FROM Resource r" //
+ + " WHERE r.resourceType.name LIKE '" + getPrefix() + "%'" //
+ + " OR r.resourceType.name = 'RHQ Storage Node'" //
+ + " ORDER BY r.id DESC");
doomed = q.getResultList();
for (Object removeMe : doomed) {
Resource res = em.getReference(Resource.class, ((Resource) removeMe).getId());
diff --git a/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.xml b/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.xml
index aeaa239..2bc51ec 100644
--- a/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.xml
+++ b/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.xml
@@ -56,12 +56,34 @@
plugin="DiscoveryBossBeanTest-test"
deleted="0"/>
+ <rhq_resource_type id="15651"
+ name="DiscoveryBossBeanTest-test storage platform"
+ category="PLATFORM"
+ creation_data_type="CONFIGURATION"
+ create_delete_policy="BOTH"
+ supports_manual_add="1"
+ singleton="0"
+ plugin="RHQStorage"
+ deleted="0"/>
+ <rhq_resource_type id="15652"
+ name="RHQ Storage Node"
+ category="SERVER"
+ creation_data_type="CONFIGURATION"
+ create_delete_policy="BOTH"
+ supports_manual_add="1"
+ singleton="1"
+ plugin="RHQStorage"
+ deleted="0"/>
+
<rhq_resource_type_parents resource_type_id="15642"
parent_resource_type_id="15641"/>
<rhq_resource_type_parents resource_type_id="15643"
parent_resource_type_id="15642"/>
<rhq_resource_type_parents resource_type_id="15644"
parent_resource_type_id="15642"/>
+
+ <rhq_resource_type_parents resource_type_id="15652"
+ parent_resource_type_id="15651"/>
<rhq_process_scan/>
<rhq_event_def/>
@@ -88,11 +110,23 @@
<rhq_plugin id="15641"
deployment="AGENT"
name="DiscoveryBossBeanTest-test"
- display_name="test"
+ display_name="DiscoveryBossBeanTest-test"
+ enabled="1"
+ status="INSTALLED"
+ path="/plugins/test.jar"
+ md5="1234567"
+ ctime="12345"
+ mtime="123456"/>
+
+ <rhq_plugin id="15651"
+ deployment="AGENT"
+ name="RHQStorage"
+ display_name="DiscoveryBossBeanTest-RHQStorage"
enabled="1"
status="INSTALLED"
path="/plugins/test.jar"
md5="1234567"
ctime="12345"
mtime="123456"/>
+
</dataset>
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/discovery/DiscoveryBossBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/discovery/DiscoveryBossBean.java
index eb4c593..f1ea109 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/discovery/DiscoveryBossBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/discovery/DiscoveryBossBean.java
@@ -1171,9 +1171,10 @@ public class DiscoveryBossBean implements DiscoveryBossLocal, DiscoveryBossRemot
// Find the parent resource entity
parentResource = entityManager.find(Resource.class, parentId);
}
- // if the parent exists, create the parent-child relationship
+ // if the parent exists, create the parent-child relationship and add it to the map
if (null != parentResource) {
parentResource.addChildResource(resource);
+ parentMap.put(parentId, parentResource);
}
}
@@ -1192,23 +1193,56 @@ public class DiscoveryBossBean implements DiscoveryBossLocal, DiscoveryBossRemot
resource.setItime(System.currentTimeMillis());
resource.setModifiedBy(overlord.getName());
- // Ensure the new resource has the proper inventory status
- if ((null != parentResource)
- && (parentResource.getInventoryStatus() == InventoryStatus.COMMITTED)
- && ((resource.getResourceType().getCategory() == ResourceCategory.SERVICE) || (parentResource
- .getResourceType().getCategory() == ResourceCategory.SERVER))) {
+ setInventoryStatus(parentResource, resource);
- // Auto-commit services whose parent resources have already been imported by the user
- resource.setInventoryStatus(InventoryStatus.COMMITTED);
+ // Extend implicit (recursive) group membership of the parent to the new child
+ if (null != parentResource) {
+ groupManager.updateImplicitGroupMembership(overlord, resource);
+ }
+ }
- } else {
+ // Resources are set to either NEW or COMMITTED
+ // We autocommit in the following scenarios:
+ // - The parent resource is not a platform and is already committed
+ // - The resource is a platform and has an RHQ Storage Node child
+ // - The resource is an RHQ Storage Node child
+ // Ensure the new resource has the proper inventory status
+ private void setInventoryStatus(Resource parentResource, Resource resource) {
+ // never autocommit a platform
+ if (null == parentResource) {
resource.setInventoryStatus(InventoryStatus.NEW);
+ return;
}
- // Extend implicit (recursive) group membership of the parent to the new child
- if (null != parentResource) {
- groupManager.updateImplicitGroupMembership(overlord, resource);
+ ResourceType resourceType = resource.getResourceType();
+ boolean isParentCommitted = InventoryStatus.COMMITTED == parentResource.getInventoryStatus();
+ boolean isService = ResourceCategory.SERVICE == resourceType.getCategory();
+ boolean isParentServer = ResourceCategory.SERVER == parentResource.getResourceType().getCategory();
+
+ // always autocommit non-top-level-server children of committed parents
+ if (isParentCommitted && (isService || isParentServer)) {
+ resource.setInventoryStatus(InventoryStatus.COMMITTED);
+ return;
+ }
+
+ // always autocommit top-level-server if it's an RHQ Storage Node (and the platform, if necessary)
+ boolean isStorageNodePlugin = "RHQStorage".equals(resourceType.getPlugin());
+ boolean isStorageNode = (isStorageNodePlugin && "RHQ Storage Node".equals(resourceType.getName()));
+
+ if (isStorageNode) {
+ resource.setInventoryStatus(InventoryStatus.COMMITTED);
+
+ if (!isParentCommitted) {
+ parentResource.setInventoryStatus(InventoryStatus.COMMITTED);
+ }
+
+ return;
}
+
+ // otherwise, set NEW
+ resource.setInventoryStatus(InventoryStatus.NEW);
+
+ return;
}
public void importResources(Subject subject, int[] resourceIds) {
commit 2cb4a33f2e09a9e44fe323e8d3573ae368fe2dbd
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Tue May 21 09:24:54 2013 -0400
eclipse: update stax cassandra driver version
diff --git a/.classpath b/.classpath
index c967876..2bd1e82 100644
--- a/.classpath
+++ b/.classpath
@@ -375,7 +375,7 @@
<classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/byteman/byteman-submit/1.5.2/byteman-submit-1.5.2.jar" sourcepath="M2_REPO/org/jboss/byteman/byteman-submit/1.5.2/byteman-submit-1.5.2-sources.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/byteman/byteman-bmunit/1.5.2/byteman-bmunit-1.5.2.jar" sourcepath="M2_REPO/org/jboss/byteman/byteman-bmunit/1.5.2/byteman-bmunit-1.5.2-sources.jar"/>
<classpathentry kind="var" path="M2_REPO/org/apache/cassandra/cassandra-all/1.2.2/cassandra-all-1.2.2.jar"/>
- <classpathentry kind="var" path="M2_REPO/com/datastax/cassandra/cassandra-driver-core/1.0.0-beta2-rhq-1.2.2-2/cassandra-driver-core-1.0.0-beta2-rhq-1.2.2-2.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/datastax/cassandra/cassandra-driver-core/1.0.0-rhq-1.2.4/cassandra-driver-core-1.0.0-rhq-1.2.4.jar"/>
<classpathentry kind="var" path="M2_REPO/org/apache/thrift/libthrift/0.7.0/libthrift-0.7.0.jar"/>
<classpathentry kind="var" path="M2_REPO/commons-cli/commons-cli/1.2/commons-cli-1.2.jar"/>
<classpathentry kind="var" path="M2_REPO/com/google/guava/guava/12.0/guava-12.0.jar"/>
commit 1c8098677aba1098289eebd485c330a3a4ca5fed
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon May 20 15:27:18 2013 -0400
Make sure we always use forward slashes when generating the dev-container
rhq-storage.properties file. Backslashes are treated as escapes by
Properties.load().
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
index 05717ae..c88298d 100644
--- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
+++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
@@ -779,11 +779,17 @@ rhq.cassandra.cluster.num-nodes=${rhq.cassandra.cluster.num-nodes}
rhq.cassandra.logging.level=${rhq.cassandra.logging.level}
</echo>
+<!-- We must use forward slashes when writing paths to a properties files. Properties.load()
+ treats the values as Java Strings so backslashes will be parsed as escapes. -->
+<path id="path.datadir">
+ <pathelement location="${rhq.dev.data.dir}" />
+</path>
+<pathconvert targetos="unix" property="safe.datadir" refid="path.datadir"/>
+
<echo file="${project.build.outputDirectory}/bin/rhq-storage.properties" append="true"># storage installer options for dev deployment
-hostname=127.0.0.1
-commitlog=${rhq.dev.data.dir}/storage/commit_log
-data=${rhq.dev.data.dir}/storage/data
-saved-caches=${rhq.dev.data.dir}/storage/saved_caches
+commitlog=${safe.datadir}/storage/commit_log
+data=${safe.datadir}/storage/data
+saved-caches=${safe.datadir}/storage/saved_caches
heap-size=256M
heap-new-size=64M
</echo>
commit 1642947db7ad70112346db8cd2b2f3e634966611
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon May 20 09:23:17 2013 -0400
- fix spelling of artifactId: rhq-cassandra-ccm-arquillian
- add ProcessExecution.addArguments() which is needed for Windows, as
setArguments() will blow away the internally added [cmd.exe, /c] args.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/pom.xml b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/pom.xml
index ce6b7ac..c1f864d 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/pom.xml
+++ b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/pom.xml
@@ -9,7 +9,7 @@
<version>4.8.0-SNAPSHOT</version>
</parent>
- <artifactId>rhq-cassandra-ccm-arquiliian</artifactId>
+ <artifactId>rhq-cassandra-ccm-arquillian</artifactId>
<name>RHQ Cassandra CCM Arquillian</name>
<dependencies>
diff --git a/modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecution.java b/modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecution.java
index 9e4acf2..6ab53bc 100644
--- a/modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecution.java
+++ b/modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecution.java
@@ -76,7 +76,8 @@ public class ProcessExecution {
*
* @param executable the full path to the executable that will be run
*/
- public void setExecutable(@NotNull String executable) {
+ public void setExecutable(@NotNull
+ String executable) {
this.executable = executable;
}
@@ -107,19 +108,36 @@ public class ProcessExecution {
/**
* Sets an optional set of arguments to pass to the executable.
- *
+ * <p/>
+ * Windows Note! This will overwrite internal arguments set by the constructor. Use {@link #addArguments} on Windows.
+ *
* @param arguments an optional set of arguments to pass to the executable
*/
- public void setArguments(@Nullable List<String> arguments) {
+ public void setArguments(@Nullable
+ List<String> arguments) {
this.arguments = arguments;
}
/**
+ * Adds an optional set of arguments to the current arguments passed to the executable.
+ *
+ * @param arguments an optional set of arguments to pass to the executable. Not null.
+ */
+ public void addArguments(List<String> arguments) {
+ if (null == this.arguments) {
+ this.arguments = arguments;
+ } else {
+ this.arguments.addAll(arguments);
+ }
+ }
+
+ /**
* Sets an optional set of arguments to pass to the executable.
*
* @param arguments an optional set of arguments to pass to the executable
*/
- public void setArguments(@Nullable String[] arguments) {
+ public void setArguments(@Nullable
+ String[] arguments) {
this.arguments = new ArrayList<String>(Arrays.asList(arguments));
}
@@ -157,7 +175,8 @@ public class ProcessExecution {
*
* @param environmentVariables an optional set of environment variables to pass to the process
*/
- public void setEnvironmentVariables(@Nullable Map<String, String> environmentVariables) {
+ public void setEnvironmentVariables(@Nullable
+ Map<String, String> environmentVariables) {
this.environmentVariables = environmentVariables;
}
@@ -172,7 +191,8 @@ public class ProcessExecution {
*
* @param workingDirectory The directory the process should get as working directory.
*/
- public void setWorkingDirectory(@Nullable String workingDirectory) {
+ public void setWorkingDirectory(@Nullable
+ String workingDirectory) {
this.workingDirectory = workingDirectory;
}
diff --git a/modules/enterprise/server/itests-2/pom.xml b/modules/enterprise/server/itests-2/pom.xml
index 4def101..20df0ad 100644
--- a/modules/enterprise/server/itests-2/pom.xml
+++ b/modules/enterprise/server/itests-2/pom.xml
@@ -645,7 +645,7 @@
<dependency>
<groupId>org.rhq</groupId>
- <artifactId>rhq-cassandra-ccm-arquiliian</artifactId>
+ <artifactId>rhq-cassandra-ccm-arquillian</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
commit f7d5f3dfa58601d185196944b1e2b3fda7b95462
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Thu May 16 15:27:36 2013 -0400
Add the actual Process to the ProcessExecutionResults and
ProcessExecutorResults. This allows the caller to inspect or detroy
the process if necessary.
Can be useful when spawning a process that does not run to completion, like
a Cassandra instance, and then stopping it later.
diff --git a/modules/core/native-system/src/main/java/org/rhq/core/system/JavaSystemInfo.java b/modules/core/native-system/src/main/java/org/rhq/core/system/JavaSystemInfo.java
index 61ede0c..4659945 100644
--- a/modules/core/native-system/src/main/java/org/rhq/core/system/JavaSystemInfo.java
+++ b/modules/core/native-system/src/main/java/org/rhq/core/system/JavaSystemInfo.java
@@ -198,6 +198,7 @@ public class JavaSystemInfo implements SystemInfo {
ProcessExecutorResults javaExecResults = javaExec.execute(process);
executionResults.setExitCode(javaExecResults.getExitCode());
executionResults.setError(javaExecResults.getError());
+ executionResults.setProcess(javaExecResults.getProcess());
return executionResults;
}
diff --git a/modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecutionResults.java b/modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecutionResults.java
index a8e8023..65dc41d 100644
--- a/modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecutionResults.java
+++ b/modules/core/native-system/src/main/java/org/rhq/core/system/ProcessExecutionResults.java
@@ -1,25 +1,25 @@
- /*
- * RHQ Management Platform
- * Copyright (C) 2005-2008 Red Hat, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation, and/or the GNU Lesser
- * General Public License, version 2.1, also as published by the Free
- * Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License and the GNU Lesser General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * and the GNU Lesser General Public License along with this program;
- * if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2008 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation, and/or the GNU Lesser
+ * General Public License, version 2.1, also as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License and the GNU Lesser General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * and the GNU Lesser General Public License along with this program;
+ * if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
package org.rhq.core.system;
import java.io.ByteArrayOutputStream;
@@ -33,6 +33,7 @@ public class ProcessExecutionResults {
private Integer exitCode;
private Throwable error;
private ByteArrayOutputStream output;
+ private Process process;
/**
* If the process finished, this is its exit code. Its numeric value has specific meaning that is custom to the
@@ -84,6 +85,17 @@ public class ProcessExecutionResults {
this.output = output;
}
+ /**
+ * @return The spawned Process. typically used only to destroy the process, if necessary.
+ */
+ public Process getProcess() {
+ return process;
+ }
+
+ public void setProcess(Process process) {
+ this.process = process;
+ }
+
@Override
public String toString() {
return "ProcessExecutionResults: exit-code=[" + exitCode + "], error=[" + error + "]";
diff --git a/modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutor.java b/modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutor.java
index e74bcce..eb64ecb 100644
--- a/modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutor.java
+++ b/modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutor.java
@@ -68,8 +68,8 @@ public class ProcessExecutor {
ProcessExecutorResults results = new ProcessExecutorResults();
try {
- Integer exitCode = startProgram(processToStart);
- results.setExitCode(exitCode);
+ startProgram(processToStart, results);
+
} catch (Throwable t) {
results.setError(t);
}
@@ -89,7 +89,7 @@ public class ProcessExecutor {
* start the process but not wait or was to wait and the wait time expired before the process exited
* @throws Exception if any error occurs while trying to start the child process
*/
- protected Integer startProgram(final ProcessToStart process) throws Exception {
+ protected void startProgram(final ProcessToStart process, ProcessExecutorResults results) throws Exception {
// prepare the process comand line and environment
String[] cmdline = getCommandLine(process);
File workingDir = getWorkingDirectory(process);
@@ -97,6 +97,7 @@ public class ProcessExecutor {
// execute the program
final Process childProcess = Runtime.getRuntime().exec(cmdline, environment, workingDir);
+ results.setProcess(childProcess);
// redirect the program's streams
final RedirectThreads redirect = redirectAllStreams(process, childProcess);
@@ -138,7 +139,8 @@ public class ProcessExecutor {
}
}
- return exitCode;
+ results.setExitCode(exitCode);
+ ;
}
/**
diff --git a/modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutorResults.java b/modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutorResults.java
index 63ed82e..fc8fcbd 100644
--- a/modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutorResults.java
+++ b/modules/core/util/src/main/java/org/rhq/core/util/exec/ProcessExecutorResults.java
@@ -1,25 +1,25 @@
- /*
- * RHQ Management Platform
- * Copyright (C) 2005-2008 Red Hat, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation, and/or the GNU Lesser
- * General Public License, version 2.1, also as published by the Free
- * Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License and the GNU Lesser General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * and the GNU Lesser General Public License along with this program;
- * if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2008 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation, and/or the GNU Lesser
+ * General Public License, version 2.1, also as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License and the GNU Lesser General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * and the GNU Lesser General Public License along with this program;
+ * if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
package org.rhq.core.util.exec;
/**
@@ -32,6 +32,7 @@ package org.rhq.core.util.exec;
public class ProcessExecutorResults {
private Integer exitCode;
private Throwable error;
+ private Process process;
/**
* The exit code of the process. Note that if the {@link ProcessToStart} did not want to wait for the process to
@@ -62,6 +63,17 @@ public class ProcessExecutorResults {
error = t;
}
+ /**
+ * @return The spawned Process. typically used only to destroy the process, if necessary.
+ */
+ public Process getProcess() {
+ return process;
+ }
+
+ public void setProcess(Process process) {
+ this.process = process;
+ }
+
public String toString() {
return "ProcessExecResults: exit-code=[" + exitCode + "], error=[" + error + "]";
}
commit 7dec4afb33f177b8b1b6adb5e9b26082d80a8101
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Thu May 16 15:22:23 2013 -0400
Fix issues in DiscoveryBossBeanTest that could cause test interaction
problems.
- Incorporate test class name into plugin/entity naming
- use unusual ids
- delete entities based on unique naming, not in ways that could wipe another
test class's data
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java
index 04c3417..e4dbe18 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.java
@@ -111,11 +111,11 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
initDB();
- platformType = getEntityManager().find(ResourceType.class, 1);
- serverType = getEntityManager().find(ResourceType.class, 2);
- serviceType1 = getEntityManager().find(ResourceType.class, 3);
- serviceType2 = getEntityManager().find(ResourceType.class, 4);
- agent = getEntityManager().find(Agent.class, 1);
+ platformType = getEntityManager().find(ResourceType.class, 15641);
+ serverType = getEntityManager().find(ResourceType.class, 15642);
+ serviceType1 = getEntityManager().find(ResourceType.class, 15643);
+ serviceType2 = getEntityManager().find(ResourceType.class, 15644);
+ agent = getEntityManager().find(Agent.class, 15641);
agentServiceContainer = prepareForTestAgents();
agentServiceContainer.discoveryService = Mockito.mock(DiscoveryAgentService.class);
@@ -128,8 +128,8 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
ResourceType resourceType = (ResourceType) invocation.getArguments()[0];
resource.setResourceType(resourceType);
long randomLong = UUID.randomUUID().getLeastSignificantBits();
- resource.setResourceKey(String.valueOf("key-" + randomLong));
- resource.setName("name-" + randomLong);
+ resource.setResourceKey(prefix("key-" + randomLong));
+ resource.setName(prefix("name-" + randomLong));
int parentResourceId = (Integer) invocation.getArguments()[1];
Resource parentResource = resourceManager.getResource(subjectManager.getOverlord(), parentResourceId);
resource.setParentResource(parentResource);
@@ -156,11 +156,11 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
public void testBasicInventoryReport() throws Exception {
InventoryReport inventoryReport = new InventoryReport(agent);
- Resource platform = new Resource("alpha", "platform", platformType);
- Resource server = new Resource("bravo", "server", serverType);
+ Resource platform = new Resource(prefix("alpha"), prefix("platform"), platformType);
+ Resource server = new Resource(prefix("bravo"), prefix("server"), serverType);
platform.addChildResource(server);
- Resource service1 = new Resource("charlie", "service 1", serviceType1);
- Resource service2 = new Resource("delta", "service 2", serviceType2);
+ Resource service1 = new Resource(prefix("charlie"), prefix("service 1"), serviceType1);
+ Resource service2 = new Resource(prefix("delta"), prefix("service 2"), serviceType2);
server.addChildResource(service1);
server.addChildResource(service2);
@@ -182,7 +182,7 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
public void testUpdateInventoryReport() throws Exception {
// First just submit the platform
InventoryReport inventoryReport = new InventoryReport(agent);
- Resource platform = new Resource("alpha", "platform", platformType);
+ Resource platform = new Resource(prefix("alpha"), prefix("platform"), platformType);
platform.setUuid(String.valueOf(new Random().nextInt()));
inventoryReport.addAddedRoot(platform);
MergeInventoryReportResults results = discoveryBoss.mergeInventoryReport(serialize(inventoryReport));
@@ -195,10 +195,10 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
// Now submit the server and its children as an update report
inventoryReport = new InventoryReport(agent);
- Resource server = new Resource("bravo", "server", serverType);
+ Resource server = new Resource(prefix("bravo"), prefix("server"), serverType);
platform.addChildResource(server);
- Resource service1 = new Resource("charlie", "service 1", serviceType1);
- Resource service2 = new Resource("delta", "service 2", serviceType2);
+ Resource service1 = new Resource(prefix("charlie"), prefix("service 1"), serviceType1);
+ Resource service2 = new Resource(prefix("delta"), prefix("service 2"), serviceType2);
server.addChildResource(service1);
server.addChildResource(service2);
@@ -219,10 +219,10 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
public void testManuallyAddResource() throws Exception {
InventoryReport inventoryReport = new InventoryReport(agent);
- Resource platform = new Resource("alpha", "platform", platformType);
- Resource server = new Resource("bravo", "server", serverType);
+ Resource platform = new Resource(prefix("alpha"), prefix("platform"), platformType);
+ Resource server = new Resource(prefix("bravo"), prefix("server"), serverType);
platform.addChildResource(server);
- Resource service2 = new Resource("delta", "service 2", serviceType2);
+ Resource service2 = new Resource(prefix("delta"), prefix("service 2"), serviceType2);
server.addChildResource(service2);
platform.setUuid(String.valueOf(new Random().nextInt()));
@@ -258,10 +258,10 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
InventoryReport inventoryReport = new InventoryReport(agent);
- Resource platform = new Resource("platform", "platform", platformType);
+ Resource platform = new Resource(prefix("platform"), prefix("platform"), platformType);
platform.setUuid(String.valueOf(new Random().nextInt()));
for (int i = 0; i < 17; i++) {
- String serverString = "server " + String.valueOf(i);
+ String serverString = prefix("server " + String.valueOf(i));
Resource server = new Resource(serverString, serverString, serverType);
server.setUuid(String.valueOf(new Random().nextInt()));
platform.addChildResource(server);
@@ -333,9 +333,9 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
// create an inventory report with a platform and a server - the server will be of the ignored type
InventoryReport inventoryReport = new InventoryReport(agent);
- Resource platform = new Resource("platform", "platform", platformType);
+ Resource platform = new Resource(prefix("platform"), prefix("platform"), platformType);
platform.setUuid(String.valueOf(new Random().nextInt()));
- Resource server = new Resource("server0", "server0", serverType);
+ Resource server = new Resource(prefix("server0"), prefix("server0"), serverType);
server.setUuid(String.valueOf(new Random().nextInt()));
platform.addChildResource(server);
inventoryReport.addAddedRoot(platform);
@@ -365,9 +365,9 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
// create an inventory report with just a platform and a server
InventoryReport inventoryReport = new InventoryReport(agent);
- Resource platform = new Resource("platform", "platform", platformType);
+ Resource platform = new Resource(prefix("platform"), prefix("platform"), platformType);
platform.setUuid(String.valueOf(new Random().nextInt()));
- Resource server = new Resource("server0", "server0", serverType);
+ Resource server = new Resource(prefix("server0"), prefix("server0"), serverType);
server.setUuid(String.valueOf(new Random().nextInt()));
platform.addChildResource(server);
inventoryReport.addAddedRoot(platform);
@@ -391,10 +391,10 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
// First create an inventory report for a new platform with servers - nothing is ignored yet
InventoryReport inventoryReport = new InventoryReport(agent);
- Resource platform = new Resource("platform", "platform", platformType);
+ Resource platform = new Resource(prefix("platform"), prefix("platform"), platformType);
platform.setUuid(String.valueOf(new Random().nextInt()));
for (int i = 0; i < 17; i++) {
- String serverString = "server " + String.valueOf(i);
+ String serverString = prefix("server " + String.valueOf(i));
Resource server = new Resource(serverString, serverString, serverType);
server.setUuid(String.valueOf(new Random().nextInt()));
platform.addChildResource(server);
@@ -500,7 +500,8 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
Query q;
List<?> doomed;
- q = em.createQuery("SELECT r FROM Resource r WHERE r.resourceType.id <= 4 ORDER BY r.id DESC");
+ q = em.createQuery("SELECT r FROM Resource r WHERE r.resourceType.name LIKE '" + getPrefix()
+ + "%' ORDER BY r.id DESC");
doomed = q.getResultList();
for (Object removeMe : doomed) {
Resource res = em.getReference(Resource.class, ((Resource) removeMe).getId());
@@ -558,7 +559,14 @@ public class DiscoveryBossBeanTest extends AbstractEJB3Test {
}
String getDataSetFile() {
- return getClass().getSimpleName() + ".xml";
+ return getPrefix() + ".xml";
}
+ String prefix(String suffix) {
+ return getPrefix() + "-" + suffix;
+ }
+
+ String getPrefix() {
+ return getClass().getSimpleName();
+ }
}
diff --git a/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.xml b/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.xml
index 4891276..aeaa239 100644
--- a/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.xml
+++ b/modules/enterprise/server/itests-2/src/test/resources/org/rhq/enterprise/server/discovery/DiscoveryBossBeanTest.xml
@@ -1,5 +1,5 @@
<dataset>
- <rhq_agent id="1"
+ <rhq_agent id="15641"
name="TestAgent-dbbt"
address="0.0.0.0"
port="16163"
@@ -19,49 +19,49 @@
<rhq_config_property/>
<rhq_config_template/>
- <rhq_resource_type id="1"
- name="test platform"
+ <rhq_resource_type id="15641"
+ name="DiscoveryBossBeanTest-test platform"
category="PLATFORM"
creation_data_type="CONFIGURATION"
create_delete_policy="BOTH"
supports_manual_add="1"
singleton="0"
- plugin="test"
+ plugin="DiscoveryBossBeanTest-test"
deleted="0"/>
- <rhq_resource_type id="2"
- name="test server"
+ <rhq_resource_type id="15642"
+ name="DiscoveryBossBeanTest-test server"
category="SERVER"
creation_data_type="CONFIGURATION"
create_delete_policy="BOTH"
supports_manual_add="1"
singleton="0"
- plugin="test"
+ plugin="DiscoveryBossBeanTest-test"
deleted="0"/>
- <rhq_resource_type id="3"
- name="test service 1"
+ <rhq_resource_type id="15643"
+ name="DiscoveryBossBeanTest-test service 1"
category="SERVICE"
creation_data_type="CONFIGURATION"
create_delete_policy="BOTH"
supports_manual_add="1"
singleton="0"
- plugin="test"
+ plugin="DiscoveryBossBeanTest-test"
deleted="0"/>
- <rhq_resource_type id="4"
- name="test service 2"
+ <rhq_resource_type id="15644"
+ name="DiscoveryBossBeanTest-test service 2"
category="SERVICE"
creation_data_type="CONFIGURATION"
create_delete_policy="BOTH"
supports_manual_add="1"
singleton="1"
- plugin="test"
+ plugin="DiscoveryBossBeanTest-test"
deleted="0"/>
- <rhq_resource_type_parents resource_type_id="2"
- parent_resource_type_id="1"/>
- <rhq_resource_type_parents resource_type_id="3"
- parent_resource_type_id="2"/>
- <rhq_resource_type_parents resource_type_id="4"
- parent_resource_type_id="2"/>
+ <rhq_resource_type_parents resource_type_id="15642"
+ parent_resource_type_id="15641"/>
+ <rhq_resource_type_parents resource_type_id="15643"
+ parent_resource_type_id="15642"/>
+ <rhq_resource_type_parents resource_type_id="15644"
+ parent_resource_type_id="15642"/>
<rhq_process_scan/>
<rhq_event_def/>
@@ -85,9 +85,9 @@
<rhq_bundle/>
<rhq_drift_def_template/>
- <rhq_plugin id="1"
+ <rhq_plugin id="15641"
deployment="AGENT"
- name="test"
+ name="DiscoveryBossBeanTest-test"
display_name="test"
enabled="1"
status="INSTALLED"
commit bef844987458f9c2918cdce9a0bebb5b8d521d1a
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon May 13 16:08:47 2013 -0400
just some name changes to distinguish templates and maybe make things more clear.
diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java
index 76c4e28..ce14f17 100644
--- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java
+++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java
@@ -49,7 +49,7 @@ import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal;
import org.rhq.enterprise.server.util.LookupUtil;
/**
- * An alert def server-side plugin component that the server uses to inject alert defintions.
+ * An alert definition server-side plugin component that the server uses to inject "factory-installed" alert definitions.
*
* @author Jay Shaughnessy
*/
@@ -57,18 +57,18 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
private final Log log = LogFactory.getLog(AlertDefinitionServerPluginComponent.class);
- static private final List<InjectedAlertDef> injectedAlertDefs;
- static private final InjectedAlertDef testTemplate;
+ static private final List<InjectedTemplate> injectedTemplates;
+ static private final InjectedTemplate testTemplate;
static {
- testTemplate = new InjectedAlertDef( //
+ testTemplate = new InjectedTemplate( //
"RHQAgent", //
"RHQ Agent", //
"TestTemplate", //
"A test template injection");
- injectedAlertDefs = new ArrayList<InjectedAlertDef>();
- injectedAlertDefs.add(testTemplate);
+ injectedTemplates = new ArrayList<InjectedTemplate>();
+ injectedTemplates.add(testTemplate);
}
private ServerPluginContext context;
@@ -111,12 +111,12 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
try {
if (name.equals("listInjectedAlertDefinitions")) {
PropertyList result = new PropertyList("injectedAlertDefinitions");
- for (InjectedAlertDef iad : injectedAlertDefs) {
+ for (InjectedTemplate iad : injectedTemplates) {
PropertyMap map = new PropertyMap("injectedAlertDefinition");
- map.put(new PropertySimple(InjectedAlertDef.FIELD_PLUGIN_NAME, iad.getPluginName()));
- map.put(new PropertySimple(InjectedAlertDef.FIELD_RESOURCE_TYPE_NAME, iad.getResourceTypeName()));
- map.put(new PropertySimple(InjectedAlertDef.FIELD_NAME, iad.getName()));
- map.put(new PropertySimple(InjectedAlertDef.FIELD_DESCRIPTION, iad.getDescription()));
+ map.put(new PropertySimple(InjectedTemplate.FIELD_PLUGIN_NAME, iad.getPluginName()));
+ map.put(new PropertySimple(InjectedTemplate.FIELD_RESOURCE_TYPE_NAME, iad.getResourceTypeName()));
+ map.put(new PropertySimple(InjectedTemplate.FIELD_NAME, iad.getName()));
+ map.put(new PropertySimple(InjectedTemplate.FIELD_DESCRIPTION, iad.getDescription()));
result.add(map);
}
controlResults.getComplexResults().put(result);
@@ -125,12 +125,12 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
injectAllAlertDefs(Boolean.valueOf(parameters.getSimpleValue("replaceIfExists")));
} else if (name.equals("injectAlertDefinition")) {
- InjectedAlertDef requestedInjection = new InjectedAlertDef( //
- parameters.getSimpleValue(InjectedAlertDef.FIELD_PLUGIN_NAME), //
- parameters.getSimpleValue(InjectedAlertDef.FIELD_RESOURCE_TYPE_NAME), //
- parameters.getSimpleValue(InjectedAlertDef.FIELD_NAME), null);
+ InjectedTemplate requestedInjection = new InjectedTemplate( //
+ parameters.getSimpleValue(InjectedTemplate.FIELD_PLUGIN_NAME), //
+ parameters.getSimpleValue(InjectedTemplate.FIELD_RESOURCE_TYPE_NAME), //
+ parameters.getSimpleValue(InjectedTemplate.FIELD_NAME), null);
boolean injected = false;
- for (InjectedAlertDef iad : injectedAlertDefs) {
+ for (InjectedTemplate iad : injectedTemplates) {
if (iad.equals(requestedInjection)) {
injectAlertDef(iad, Boolean.valueOf(parameters.getSimpleValue("replaceIfExists", "false")));
injected = true;
@@ -157,7 +157,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
List<AlertDefinition> result = new ArrayList<AlertDefinition>();
- for (InjectedAlertDef iad : injectedAlertDefs) {
+ for (InjectedTemplate iad : injectedTemplates) {
AlertDefinition newAlertDef = injectAlertDef(iad, replaceIfExists);
if (null != newAlertDef) {
result.add(newAlertDef);
@@ -167,7 +167,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
return result;
}
- private AlertDefinition injectAlertDef(InjectedAlertDef injectedAlertDef, boolean replaceIfExists) {
+ private AlertDefinition injectAlertDef(InjectedTemplate injectedAlertDef, boolean replaceIfExists) {
AlertDefinition result = null;
ResourceTypeManagerLocal typeManager = LookupUtil.getResourceTypeManager();
@@ -243,7 +243,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
return newTemplateId;
}
- private static class InjectedAlertDef {
+ private static class InjectedTemplate {
static public final String FIELD_PLUGIN_NAME = "plugin";
static public final String FIELD_RESOURCE_TYPE_NAME = "type";
static public final String FIELD_NAME = "name";
@@ -254,7 +254,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
private String name;
private String description;
- public InjectedAlertDef(String pluginName, String resourceTypeName, String name, String description) {
+ public InjectedTemplate(String pluginName, String resourceTypeName, String name, String description) {
super();
this.pluginName = pluginName;
this.resourceTypeName = resourceTypeName;
@@ -296,7 +296,7 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
return false;
if (getClass() != obj.getClass())
return false;
- InjectedAlertDef other = (InjectedAlertDef) obj;
+ InjectedTemplate other = (InjectedTemplate) obj;
if (name == null) {
if (other.name != null)
return false;
11 years
[rhq] modules/enterprise modules/integration-tests
by Heiko W. Rupp
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java | 49 +++
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertDefinitionHandlerBean.java | 25 +
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertHandlerBean.java | 12
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/EventHandlerBean.java | 57 +++-
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/GroupHandlerBean.java | 7
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java | 16 -
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java | 58 +---
modules/enterprise/server/jar/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationHelperTest.java | 2
modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AlertTest.java | 23 +
modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/EventTest.java | 129 ++++++++++
modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java | 11
modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java | 25 +
12 files changed, 347 insertions(+), 67 deletions(-)
New commits:
commit 6742d1fda892047e45a0b5cffe9df42957318a54
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Wed May 22 14:12:32 2013 +0200
Enable paging on (some) endpoints that return lists
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java
index 0038099..8e4a826 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AbstractRestBean.java
@@ -59,6 +59,8 @@ import org.rhq.core.domain.resource.Resource;
import org.rhq.core.domain.resource.ResourceType;
import org.rhq.core.domain.resource.group.GroupCategory;
import org.rhq.core.domain.resource.group.ResourceGroup;
+import org.rhq.core.domain.util.PageControl;
+import org.rhq.core.domain.util.PageList;
import org.rhq.enterprise.server.resource.ResourceManagerLocal;
import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal;
import org.rhq.enterprise.server.rest.domain.GroupRest;
@@ -419,6 +421,53 @@ public class AbstractRestBean {
}
/**
+ * Create the paging headers for collections and attach them to the passed builder. Those are represented as
+ * <i>Link:</i> http headers that carry the URL for the pages and the respective relation.
+ * <br/>In addition a <i>X-total-size</i> header is created that contains the whole collection size.
+ * @param builder The ResponseBuilder that receives the headers
+ * @param uriInfo The uriInfo of the incoming request to build the urls
+ * @param resultList The collection with its paging information
+ */
+ protected void createPagingHeader(final Response.ResponseBuilder builder, final UriInfo uriInfo, final PageList<?> resultList) {
+
+ UriBuilder uriBuilder;
+
+ PageControl pc = resultList.getPageControl();
+ int page = pc.getPageNumber();
+
+ if (resultList.getTotalSize()> (pc.getPageNumber() +1 ) * pc.getPageSize()) {
+ int nextPage = page+1;
+ uriBuilder = uriInfo.getRequestUriBuilder(); // adds ?q, ?ps and ?category if needed
+ uriBuilder.replaceQueryParam("page",nextPage);
+
+ builder.header("Link",new Link("next",uriBuilder.build().toString()));
+ }
+
+ if (page>0) {
+ int prevPage = page -1;
+ uriBuilder = uriInfo.getRequestUriBuilder(); // adds ?q, ?ps and ?category if needed
+ uriBuilder.replaceQueryParam("page",prevPage);
+ builder.header("Link", new Link("prev",uriBuilder.build().toString()));
+ }
+
+ // A link to the last page
+ if (!pc.isUnlimited()) {
+ int lastPage = resultList.getTotalSize() / pc.getPageSize();
+ uriBuilder = uriInfo.getRequestUriBuilder(); // adds ?q, ?ps and ?category if needed
+ uriBuilder.replaceQueryParam("page",lastPage);
+ builder.header("Link", new Link("last",uriBuilder.build().toString()));
+ }
+
+ // A link to the current page
+ uriBuilder = uriInfo.getRequestUriBuilder(); // adds ?q, ?ps and ?category if needed
+ builder.header("Link", new Link("current",uriBuilder.build().toString()));
+
+
+ // Create a total size header
+ builder.header("X-collection-size",resultList.getTotalSize());
+ }
+
+ /**
* Fetch the group with the passed id
*
* @param groupId id of the resource group
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertDefinitionHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertDefinitionHandlerBean.java
index d203cd3..6ea3d03 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertDefinitionHandlerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertDefinitionHandlerBean.java
@@ -77,6 +77,7 @@ import org.rhq.core.domain.criteria.AlertDefinitionCriteria;
import org.rhq.core.domain.measurement.MeasurementDefinition;
import org.rhq.core.domain.resource.ResourceType;
import org.rhq.core.domain.resource.group.ResourceGroup;
+import org.rhq.core.domain.util.PageList;
import org.rhq.enterprise.server.RHQConstants;
import org.rhq.enterprise.server.alert.AlertConditionManagerLocal;
import org.rhq.enterprise.server.alert.AlertDefinitionManagerLocal;
@@ -143,21 +144,31 @@ public class AlertDefinitionHandlerBean extends AbstractRestBean {
@GZIP
@GET
@Path("/definitions")
- @ApiOperation("List all Alert Definition")
- public List<AlertDefinitionRest> listAlertDefinitions(
- @ApiParam(value = "Page number", defaultValue = "0") @QueryParam("page") int page,
- @ApiParam(value = "Limit to status, UNUSED AT THE MOMENT ") @QueryParam("status") String status,
+ @ApiOperation(value = "List all Alert Definition", responseClass = "AlertDefinitionRest", multiValueResponse = true)
+ public Response listAlertDefinitions(
+ @ApiParam(value = "Page number") @QueryParam("page") Integer page,
+ @ApiParam(value = "Page size") @DefaultValue("20") @QueryParam("ps") int pageSize,
+ @ApiParam(value = "Limit to status, UNUSED AT THE MOMENT ") @QueryParam("status") String status, // TODO
@Context UriInfo uriInfo) {
AlertDefinitionCriteria criteria = new AlertDefinitionCriteria();
- criteria.setPaging(page,20); // TODO add link to next page
- List<AlertDefinition> defs = alertDefinitionManager.findAlertDefinitionsByCriteria(caller, criteria);
+ if (page!=null) {
+ criteria.setPaging(page,pageSize);
+ }
+
+ PageList<AlertDefinition> defs = alertDefinitionManager.findAlertDefinitionsByCriteria(caller, criteria);
List<AlertDefinitionRest> ret = new ArrayList<AlertDefinitionRest>(defs.size());
for (AlertDefinition def : defs) {
AlertDefinitionRest adr = definitionToDomain(def, false, uriInfo);
ret.add(adr);
}
- return ret;
+
+ Response.ResponseBuilder builder = Response.ok(ret);
+ createPagingHeader(builder,uriInfo,defs);
+
+ // TODO media type etc
+
+ return builder.build();
}
@GET
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertHandlerBean.java
index 4c80bd1..5da3fb9 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertHandlerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/AlertHandlerBean.java
@@ -90,10 +90,10 @@ public class AlertHandlerBean extends AbstractRestBean {
@ApiErrors({
@ApiError(code = 406, reason = "There are 'resourceId' and 'definitionId' passed as query parameters"),
@ApiError(code = 406, reason = "Page size was 0"),
- @ApiError(code = 406, reason = "Page number was < 1")
+ @ApiError(code = 406, reason = "Page number was < 0")
})
public Response listAlerts(
- @ApiParam(value = "Page number") @QueryParam("page") @DefaultValue("1") int page,
+ @ApiParam(value = "Page number") @QueryParam("page") @DefaultValue("0") int page,
@ApiParam(value = "Page size; use -1 for 'unlimited'") @QueryParam("size") @DefaultValue("100")int size,
@ApiParam(value = "Limit to priority", allowableValues = "High, Medium, Low, All") @DefaultValue("All") @QueryParam("prio") String prio,
@ApiParam(value = "Should full resources and definitions be sent") @QueryParam("slim") @DefaultValue("false") boolean slim,
@@ -108,7 +108,7 @@ public class AlertHandlerBean extends AbstractRestBean {
if (size==0) {
throw new BadArgumentException("size","Must not be 0");
}
- if (page<1) {
+ if (page<0) {
throw new BadArgumentException("page","Must be >=1");
}
@@ -120,7 +120,7 @@ public class AlertHandlerBean extends AbstractRestBean {
criteria.setPageControl(pageControl);
}
else {
- criteria.setPaging(page-1, size); // TODO implement linking to next page
+ criteria.setPaging(page, size);
}
if (since!=null) {
@@ -157,6 +157,8 @@ public class AlertHandlerBean extends AbstractRestBean {
builder = Response.ok(entity);
}
+ createPagingHeader(builder,uriInfo,alerts);
+
return builder.build();
}
@@ -306,7 +308,7 @@ public class AlertHandlerBean extends AbstractRestBean {
@DELETE
@Path("/{id}")
- @ApiOperation(value = "Remove the alert from the lit of alerts")
+ @ApiOperation(value = "Remove the alert from the list of alerts")
public void purgeAlert(@ApiParam(value = "Id of the alert to remove") @PathParam("id") int id) {
alertManager.deleteAlerts(caller, new int[]{id});
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/EventHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/EventHandlerBean.java
index becb680..7402550 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/EventHandlerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/EventHandlerBean.java
@@ -32,6 +32,7 @@ import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import javax.persistence.Query;
import javax.ws.rs.DELETE;
+import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
@@ -44,6 +45,7 @@ import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
import com.wordnik.swagger.annotations.Api;
import com.wordnik.swagger.annotations.ApiError;
@@ -61,6 +63,7 @@ import org.rhq.core.domain.event.EventSource;
import org.rhq.core.domain.resource.Resource;
import org.rhq.core.domain.resource.ResourceType;
import org.rhq.core.domain.util.PageControl;
+import org.rhq.core.domain.util.PageList;
import org.rhq.enterprise.server.RHQConstants;
import org.rhq.enterprise.server.event.EventManagerLocal;
import org.rhq.enterprise.server.rest.domain.EventDefinitionRest;
@@ -227,9 +230,19 @@ public class EventHandlerBean extends AbstractRestBean {
@QueryParam("endTime") long endTime,
@ApiParam(value="Select the severity to display. Default is to show all",
allowableValues = "DEBUG, INFO, WARN, ERROR, FATAL") @QueryParam("severity") String severity,
- @Context Request request,
+ @ApiParam("Page size for paging") @QueryParam("ps") @DefaultValue("20") int pageSize,
+ @ApiParam("Page for paging, 0-based") @QueryParam("page") Integer page,
+ @Context UriInfo uriInfo,
@Context HttpHeaders headers) {
+ if (severity!=null) {
+ try {
+ EventSeverity.valueOf(severity.toUpperCase());
+ } catch (Exception e) {
+ throw new BadArgumentException("severity",severity + " is bad. Allowed values are DEBUG, INFO, WARN, ERROR, FATAL");
+ }
+ }
+
EventSource source = findEventSourceById(sourceId);
EventCriteria criteria = new EventCriteria();
@@ -240,13 +253,19 @@ public class EventHandlerBean extends AbstractRestBean {
if (endTime>0) {
criteria.addFilterEndTime(endTime);
}
- if (startTime==0 && endTime==0) {
+ if (page!=null) {
+ criteria.setPaging(page,pageSize);
+ }
+ else if (startTime==0 && endTime==0) {
PageControl pageControl = new PageControl();
pageControl.setPageSize(200);
criteria.setPageControl(pageControl);
}
+ if (severity!=null) {
+ criteria.addFilterSeverities(EventSeverity.valueOf(severity.toUpperCase()));
+ }
- Response.ResponseBuilder builder = getEventsAsBuilderForCriteria(headers, criteria);
+ Response.ResponseBuilder builder = getEventsAsBuilderForCriteria(headers, criteria, uriInfo);
return builder.build();
}
@@ -259,10 +278,21 @@ public class EventHandlerBean extends AbstractRestBean {
public Response getEventsForResource(@PathParam("id") int resourceId,
@QueryParam("startTime") long startTime,
@QueryParam("endTime") long endTime,
- @QueryParam("severity") String severity,
- @Context Request request,
+ @ApiParam("Page size for paging") @QueryParam("ps") @DefaultValue("20") int pageSize,
+ @ApiParam("Page for paging, 0-based") @QueryParam("page") Integer page,
+ @ApiParam(value="Select the severity to display. Default is to show all",
+ allowableValues = "DEBUG, INFO, WARN, ERROR, FATAL") @QueryParam("severity") String severity,
+ @Context UriInfo uriInfo,
@Context HttpHeaders headers) {
+ if (severity!=null) {
+ try {
+ EventSeverity.valueOf(severity.toUpperCase());
+ } catch (Exception e) {
+ throw new BadArgumentException("severity",severity + " is bad. Allowed values are DEBUG, INFO, WARN, ERROR, FATAL");
+ }
+ }
+
EventCriteria criteria = new EventCriteria();
criteria.addFilterResourceId(resourceId);
if (startTime>0) {
@@ -271,13 +301,19 @@ public class EventHandlerBean extends AbstractRestBean {
if (endTime>0) {
criteria.addFilterEndTime(endTime);
}
- if (startTime==0 && endTime==0) {
+ if (page!=null) {
+ criteria.setPaging(page,pageSize);
+ }
+ else if (startTime==0 && endTime==0) {
PageControl pageControl = new PageControl();
pageControl.setPageSize(200);
criteria.setPageControl(pageControl);
}
+ if (severity!=null) {
+ criteria.addFilterSeverities(EventSeverity.valueOf(severity.toUpperCase()));
+ }
- Response.ResponseBuilder builder = getEventsAsBuilderForCriteria(headers, criteria);
+ Response.ResponseBuilder builder = getEventsAsBuilderForCriteria(headers, criteria, uriInfo);
return builder.build();
@@ -304,8 +340,8 @@ public class EventHandlerBean extends AbstractRestBean {
}
- private Response.ResponseBuilder getEventsAsBuilderForCriteria(HttpHeaders headers, EventCriteria criteria) {
- List<Event> eventList = eventManager.findEventsByCriteria(caller, criteria);
+ private Response.ResponseBuilder getEventsAsBuilderForCriteria(HttpHeaders headers, EventCriteria criteria, UriInfo uriInfo) {
+ PageList<Event> eventList = eventManager.findEventsByCriteria(caller, criteria);
List<EventRest> restEvents = new ArrayList<EventRest>(eventList.size());
for (Event event : eventList) {
restEvents.add(convertEvent(event));
@@ -320,6 +356,9 @@ public class EventHandlerBean extends AbstractRestBean {
else {
builder = Response.ok(restEvents, mediaType);
}
+
+ createPagingHeader(builder,uriInfo,eventList);
+
return builder;
}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/GroupHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/GroupHandlerBean.java
index 6791bdf..be2419b 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/GroupHandlerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/GroupHandlerBean.java
@@ -116,12 +116,17 @@ public class GroupHandlerBean extends AbstractRestBean {
@Path("/")
@ApiOperation(value = "List all groups", multiValueResponse = true, responseClass = "GroupRest")
public Response getGroups(@ApiParam("String to search in the group name") @QueryParam("q") String q,
+ @ApiParam("Page size for paging") @QueryParam("ps") @DefaultValue("20") int pageSize,
+ @ApiParam("Page for paging, 0-based") @QueryParam("page") Integer page,
@Context HttpHeaders headers, @Context UriInfo uriInfo) {
ResourceGroupCriteria criteria = new ResourceGroupCriteria();
if (q!=null) {
criteria.addFilterName(q);
}
+ if (page!=null) {
+ criteria.setPaging(page,pageSize);
+ }
PageList<ResourceGroup> groups = resourceGroupManager.findResourceGroupsByCriteria(caller, criteria);
@@ -142,6 +147,8 @@ public class GroupHandlerBean extends AbstractRestBean {
builder.entity(ret);
}
+ createPagingHeader(builder,uriInfo,groups);
+
return builder.build();
}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java
index 8bc2cb9..e32ea03 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/OperationsHandlerBean.java
@@ -29,6 +29,7 @@ import javax.ejb.Stateless;
import javax.interceptor.Interceptors;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
+import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
@@ -353,7 +354,6 @@ public class OperationsHandlerBean extends AbstractRestBean {
public Response outcome(
@ApiParam("Name of the submitted job.") @PathParam("id") String jobName,
@Context UriInfo uriInfo,
- @Context Request request,
@Context HttpHeaders httpHeaders) {
MediaType mediaType = httpHeaders.getAcceptableMediaTypes().get(0);
@@ -395,21 +395,26 @@ public class OperationsHandlerBean extends AbstractRestBean {
@Produces({MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML,MediaType.TEXT_HTML})
public Response listHistory(
@ApiParam("Id of a resource to limit to") @QueryParam("resourceId") int resourceId,
+ @ApiParam("Page size for paging") @QueryParam("ps") @DefaultValue("20") int pageSize,
+ @ApiParam("Page for paging, 0-based") @QueryParam("page") Integer page,
@Context UriInfo uriInfo,
- @Context Request request,
@Context HttpHeaders httpHeaders) {
ResourceOperationHistoryCriteria criteria = new ResourceOperationHistoryCriteria();
if (resourceId>0) {
criteria.addFilterResourceIds(resourceId);
}
+ if (page!=null) {
+ criteria.setPaging(page,pageSize);
+ criteria.addSortStartTime(PageOrdering.ASC);
+ }
criteria.addSortEndTime(PageOrdering.DESC);
- PageList<ResourceOperationHistory> list = opsManager.findResourceOperationHistoriesByCriteria(caller, criteria);
+ PageList<ResourceOperationHistory> histories = opsManager.findResourceOperationHistoriesByCriteria(caller, criteria);
List<OperationHistoryRest> result = new ArrayList<OperationHistoryRest>();
- for (ResourceOperationHistory roh : list) {
+ for (ResourceOperationHistory roh : histories) {
OperationHistoryRest historyRest = historyToHistoryRest(roh,uriInfo);
result.add(historyRest);
}
@@ -422,6 +427,9 @@ public class OperationsHandlerBean extends AbstractRestBean {
GenericEntity<List<OperationHistoryRest>> res = new GenericEntity<List<OperationHistoryRest>>(result) {};
builder = Response.ok(res);
}
+
+ createPagingHeader(builder,uriInfo,histories);
+
return builder.build();
}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java
index 70cc588..a783ac8 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java
@@ -211,7 +211,7 @@ public class ResourceHandlerBean extends AbstractRestBean {
}
PageList<Resource> ret = resMgr.findResourcesByCriteria(caller,criteria);
- Response.ResponseBuilder builder = getResponseBuilderForResourceList(headers,uriInfo,ret, page, pageSize);
+ Response.ResponseBuilder builder = getResponseBuilderForResourceList(headers,uriInfo,ret);
return builder.build();
}
@@ -221,13 +221,22 @@ public class ResourceHandlerBean extends AbstractRestBean {
@Path("/platforms")
@Cache(isPrivate = true,maxAge = 300)
@ApiOperation(value = "List all platforms in the system", multiValueResponse = true, responseClass = "ResourceWithType")
- public Response getPlatforms(@Context HttpHeaders headers,
- @Context UriInfo uriInfo) {
+ public Response getPlatforms(
+ @ApiParam("Page size for paging") @QueryParam("ps") @DefaultValue("20") int pageSize,
+ @ApiParam("Page for paging, 0-based") @QueryParam("page") Integer page,
+ @Context HttpHeaders headers, @Context UriInfo uriInfo) {
+
+ PageControl pc;
+ if (page!=null) {
+ pc = new PageControl(page,pageSize);
+ }
+ else {
+ pc = PageControl.getUnlimitedInstance();
+ }
- PageControl pc = new PageControl();
PageList<Resource> ret = resMgr.findResourcesByCategory(caller, ResourceCategory.PLATFORM,
InventoryStatus.COMMITTED, pc);
- Response.ResponseBuilder builder = getResponseBuilderForResourceList(headers, uriInfo, ret, null, 20);
+ Response.ResponseBuilder builder = getResponseBuilderForResourceList(headers, uriInfo, ret);
return builder.build();
}
@@ -239,13 +248,10 @@ public class ResourceHandlerBean extends AbstractRestBean {
* @param headers HttpHeaders from the request
* @param uriInfo Uri from the request
* @param resources List of resources
- * @param page Page of pageSize. If null, paging is ignored
- * @param pageSize number of elements on a page
* @return An initialized ResponseBuilder
*/
private Response.ResponseBuilder getResponseBuilderForResourceList(HttpHeaders headers, UriInfo uriInfo,
- PageList<Resource> resources, Integer page,
- int pageSize) {
+ PageList<Resource> resources) {
List<ResourceWithType> rwtList = new ArrayList<ResourceWithType>(resources.size());
for (Resource r : resources) {
putToCache(r.getId(), Resource.class, r);
@@ -256,25 +262,8 @@ public class ResourceHandlerBean extends AbstractRestBean {
MediaType mediaType = headers.getAcceptableMediaTypes().get(0);
Response.ResponseBuilder builder = Response.ok();
builder.type(mediaType);
- UriBuilder uriBuilder;
- if (page!=null) {
- // TODO look a the page control and check if there is a next page at all
- if (resources.getTotalSize()> page*pageSize) {
- int nextPage = page+1;
- uriBuilder = uriInfo.getRequestUriBuilder(); // adds ?q, ?ps and ?category if needed
- uriBuilder.replaceQueryParam("page",nextPage);
-
- builder.header("Link",new Link("next",uriBuilder.build().toString()));
- }
-
- if (page>1) {
- int prevPage = page -1;
- uriBuilder = uriInfo.getRequestUriBuilder(); // adds ?q, ?ps and ?category if needed
- uriBuilder.replaceQueryParam("page",prevPage);
- builder.header("prev",uriBuilder.build().toString());
- }
- }
+ createPagingHeader(builder,uriInfo,resources);
if (mediaType.equals(MediaType.TEXT_HTML_TYPE)) {
builder.entity(renderTemplate("listResourceWithType", rwtList));
@@ -294,7 +283,7 @@ public class ResourceHandlerBean extends AbstractRestBean {
@ApiError(code = 404, reason = NO_RESOURCE_FOR_ID)
public ResourceWithChildren getHierarchy(@ApiParam("Id of the resource to start with") @PathParam("id")int baseResourceId) {
// TODO optimize to do less recursion
- Resource start = obtainResource(baseResourceId);
+ Resource start = fetchResource(baseResourceId);
return getHierarchy(start);
}
@@ -425,7 +414,7 @@ public class ResourceHandlerBean extends AbstractRestBean {
if (avail.getResourceId() != resourceId)
throw new IllegalArgumentException("Resource Ids do not match");
- Resource resource = obtainResource(resourceId);
+ Resource resource = fetchResource(resourceId);
AvailabilityType at;
at = AvailabilityType.valueOf(avail.getType());
@@ -541,15 +530,6 @@ public class ResourceHandlerBean extends AbstractRestBean {
}
- private Resource obtainResource(int resourceId) {
- Resource resource = resMgr.getResource(caller, resourceId);
- if (resource == null) {
- resource = resMgr.getResource(caller, resourceId);
- if (resource != null)
- putToCache(resourceId, Resource.class, resource);
- }
- return resource;
- }
@GZIP
@AddLinks
@@ -565,7 +545,7 @@ public class ResourceHandlerBean extends AbstractRestBean {
criteria.addFilterResourceIds(resourceId);
- List<Alert> alerts = alertManager.findAlertsByCriteria(caller, criteria);
+ PageList<Alert> alerts = alertManager.findAlertsByCriteria(caller, criteria);
List<Link> links = new ArrayList<Link>(alerts.size());
for (Alert al : alerts) {
Link link = new Link();
diff --git a/modules/enterprise/server/jar/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationHelperTest.java b/modules/enterprise/server/jar/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationHelperTest.java
index 3a7e961..7e20630 100644
--- a/modules/enterprise/server/jar/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationHelperTest.java
+++ b/modules/enterprise/server/jar/src/test/java/org/rhq/enterprise/server/configuration/ConfigurationHelperTest.java
@@ -703,7 +703,7 @@ public class ConfigurationHelperTest {
}
- @Test(enabled = false)
+ @Test
public void testConfigToMapComplexMapWithBadSetupLenient() throws Exception {
Configuration config = new Configuration();
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AlertTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AlertTest.java
index fca0f50..e02a723 100644
--- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AlertTest.java
+++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AlertTest.java
@@ -23,6 +23,7 @@ import com.jayway.restassured.http.ContentType;
import com.jayway.restassured.path.xml.XmlPath;
import com.jayway.restassured.response.Response;
+import org.hamcrest.Matchers;
import org.junit.Test;
import org.rhq.modules.integrationTests.restApi.d.AlertCondition;
@@ -34,7 +35,10 @@ import org.rhq.modules.integrationTests.restApi.d.Group;
import static com.jayway.restassured.RestAssured.delete;
import static com.jayway.restassured.RestAssured.expect;
import static com.jayway.restassured.RestAssured.given;
+import static org.hamcrest.CoreMatchers.anyOf;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.emptyIterable;
import static org.hamcrest.Matchers.instanceOf;
@@ -54,6 +58,7 @@ public class AlertTest extends AbstractBase {
.header(acceptJson)
.expect()
.statusCode(200)
+ .log().ifError()
.when()
.get("/alert");
@@ -66,6 +71,7 @@ public class AlertTest extends AbstractBase {
.header(acceptXml)
.expect()
.statusCode(200)
+ .log().ifError()
.when()
.get("/alert");
}
@@ -95,6 +101,23 @@ public class AlertTest extends AbstractBase {
}
@Test
+ public void testListAlertsWithPaging() throws Exception {
+
+ given()
+ .header(acceptJson)
+ .queryParam("ps", 2)
+ .queryParam("page", 0)
+ .expect()
+ .statusCode(200)
+ .header("Link", anyOf(containsString("current"), Matchers.containsString("last")))
+ .header("X-collection-size", notNullValue())
+ .log().ifError()
+ .when()
+ .get("/alert");
+ }
+
+
+ @Test
public void testGetAlertCountJson() throws Exception {
given()
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/EventTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/EventTest.java
index 2e74181..40ae7b0 100644
--- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/EventTest.java
+++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/EventTest.java
@@ -23,13 +23,17 @@ import java.util.List;
import java.util.Map;
import com.jayway.restassured.http.ContentType;
+import com.jayway.restassured.response.Headers;
import com.jayway.restassured.response.Response;
+import org.hamcrest.CoreMatchers;
import org.junit.Test;
import org.rhq.modules.integrationTests.restApi.d.Event;
import org.rhq.modules.integrationTests.restApi.d.EventSource;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.not;
import static com.jayway.restassured.RestAssured.given;
/**
@@ -261,6 +265,131 @@ public class EventTest extends AbstractBase {
}
@Test
+ public void testAddGetEventOnSourceWithPaging() throws Exception {
+
+ EventSource es = new EventSource();
+ es.setResourceId(_platformId);
+ es.setName("Event Log"); // Name of the event definition
+ es.setLocation("-x-test-location");
+
+ Response response =
+ given()
+ .header(acceptJson)
+ .contentType(ContentType.JSON)
+ .pathParam("id",_platformId)
+ .body(es)
+ .expect()
+ .statusCode(200)
+ .log().ifError()
+ .when()
+ .post("/event/{id}/sources");
+
+ EventSource eventSource = response.as(EventSource.class);
+
+ long now = System.currentTimeMillis();
+ try {
+
+ // Add an event
+ Event event = new Event(eventSource.getId(),now,"Li la lu 1:->");
+ Event event1 = new Event(eventSource.getId(),now,"Li la lu 2:->");
+ Event event2 = new Event(eventSource.getId(),now,"Li la lu 3:->");
+ Event event3 = new Event(eventSource.getId(),now,"Li la lu 4:->");
+ List<Event> events = new ArrayList<Event>(4);
+ events.add(event);
+ events.add(event1);
+ events.add(event2);
+ events.add(event3);
+
+ given()
+ .header(acceptJson)
+ .contentType(ContentType.JSON)
+ .pathParam("id",eventSource.getId())
+ .body(events)
+ .expect()
+ .statusCode(204) // no content returned
+ .log().ifError()
+ .when()
+ .post("/event/source/{id}/events");
+
+
+ // and retrieve it again from the event source
+ response =
+ given()
+ .header(acceptJson)
+ .pathParam("id", eventSource.getId())
+ .queryParam("startTime",now - 10)
+ .queryParam("endTime",now + 10)
+ .expect()
+ .statusCode(200)
+ .log().ifError()
+ .header("X-collection-size", CoreMatchers.is("4"))
+ .when()
+ .get("/event/source/{id}/events");
+ List list = response.as(List.class);
+ assert list.size()>0;
+
+ // Get the list of events from the resource
+ response =
+ given()
+ .header(acceptJson)
+ .pathParam("id", _platformId)
+ .queryParam("startTime",now - 10)
+ .queryParam("endTime",now + 10)
+ .queryParam("page",0)
+ .queryParam("ps",2)
+ .expect()
+ .statusCode(200)
+ .log().ifError()
+ .header("X-collection-size", CoreMatchers.is("4"))
+ .header("Link",not(containsString("prev")))
+ .when()
+ .get("/event/{id}/events");
+ list = response.as(List.class);
+ assert list.size()==2;
+
+ response =
+ given()
+ .header(acceptJson)
+ .pathParam("id", _platformId)
+ .queryParam("startTime",now - 10)
+ .queryParam("endTime",now + 10)
+ .queryParam("page",1)
+ .queryParam("ps",2)
+ .expect()
+ .statusCode(200)
+ .log().ifError()
+ .header("X-collection-size", CoreMatchers.is("4"))
+ .header("Link",not(containsString("next")))
+ .when()
+ .get("/event/{id}/events");
+
+ Headers headers = response.getHeaders();
+ int found = 0;
+ for (String link: headers.getValues("Link")) {
+ if (link.contains("rel='last'"))
+ found++;
+ if (link.contains("rel='prev'"))
+ found++;
+ if (link.contains("rel='current'"))
+ found++;
+ assert !link.contains("rel='next");
+ }
+ assert found == 3;
+
+ }
+ finally {
+
+ // Delete the source again
+ given()
+ .pathParam("id", eventSource.getId())
+ .expect()
+ .statusCode(204)
+ .when()
+ .delete("/event/source/{id}");
+ }
+ }
+
+ @Test
public void testDeleteUnknownSource() throws Exception {
given()
.pathParam("id", 123)
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java
index 05bae96..a4ef048 100644
--- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java
+++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/GroupTest.java
@@ -74,6 +74,17 @@ public class GroupTest extends AbstractBase {
}
@Test
+ public void testGetGroupsWithPaging() throws Exception {
+ given()
+ .queryParam("page",0)
+ .queryParam("ps",2)
+ .expect()
+ .statusCode(200)
+ .when()
+ .get("/group");
+ }
+
+ @Test
public void testGetGroupsQuery() throws Exception {
given()
.queryParam("q","lala")
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java
index 23480a0..b66d9b5 100644
--- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java
+++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java
@@ -44,6 +44,8 @@ import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.not;
/**
* Test the resources part
@@ -242,7 +244,7 @@ public class ResourcesTest extends AbstractBase {
}
@Test
- public void testPaging() throws Exception {
+ public void testGetResourcesWithPaging() throws Exception {
given()
.header("Accept", "application/json")
@@ -252,12 +254,31 @@ public class ResourcesTest extends AbstractBase {
.queryParam("category", "service")
.expect()
.statusCode(200)
- .header("Link", containsString("page=2"))
+ .log().everything()
+ // .header("Link", allOf(containsString("page=2"), containsString("current")))
+ .header("Link",not(containsString("prev")))
.body("links.self", notNullValue())
.when().get("/resource");
}
@Test
+ public void testGetPlatformsWithPaging() throws Exception {
+
+ given()
+ .header("Accept", "application/json")
+ .with()
+ .queryParam("page", 0)
+ .queryParam("ps", 5)
+ .expect()
+ .statusCode(200)
+ .log().ifError()
+ .body("links.self", notNullValue())
+ .header("Link", not(containsString("prev=")))
+ .header("Link", anyOf(containsString("current"),containsString("last")))
+ .when().get("/resource/platforms");
+ }
+
+ @Test
public void testGetPlatformXml() {
assert _platformId!=0 : "Setup did not run or was no success";
11 years
[rhq] 2 commits - modules/enterprise modules/integration-tests
by Heiko W. Rupp
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/domain/ResourceWithType.java | 20 -----
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java | 20 ++---
modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AbstractBase.java | 7 -
modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java | 37 +++++++++-
modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java | 35 +++++++--
modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/UserTest.java | 4 -
6 files changed, 82 insertions(+), 41 deletions(-)
New commits:
commit e5d22cac1559f8f2dc699d3ebe3f54d0abf74a1a
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Tue May 21 22:10:26 2013 +0200
BZ 962853 - make getRresourceId return an int instead of a string.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/domain/ResourceWithType.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/domain/ResourceWithType.java
index 703e897..8273737 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/domain/ResourceWithType.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/domain/ResourceWithType.java
@@ -25,19 +25,11 @@ package org.rhq.enterprise.server.rest.domain;
import java.util.ArrayList;
import java.util.List;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlElementRef;
-import javax.xml.bind.annotation.XmlID;
import javax.xml.bind.annotation.XmlRootElement;
import com.wordnik.swagger.annotations.ApiClass;
import com.wordnik.swagger.annotations.ApiProperty;
-import org.jboss.resteasy.spi.touri.URITemplate;
-
/**
* A (partial) resource with some type information
* @author Heiko W. Rupp
@@ -69,7 +61,6 @@ public class ResourceWithType {
}
@ApiProperty("Name of the resource")
- @XmlElement
public String getResourceName() {
return resourceName;
}
@@ -79,17 +70,15 @@ public class ResourceWithType {
}
@ApiProperty("ID of the resource")
- @XmlID
- public String getResourceId() {
- return String.valueOf(resourceId);
+ public int getResourceId() {
+ return resourceId;
}
public void setResourceId(int resourceId) {
this.resourceId = resourceId;
}
- @ApiProperty("Name of the resource type of teh resource")
- @XmlElement
+ @ApiProperty("Name of the resource type of the resource")
public String getTypeName() {
return typeName;
}
@@ -99,7 +88,6 @@ public class ResourceWithType {
}
@ApiProperty("Id of the resource type of the resource")
- @XmlElement
public Integer getTypeId() {
return typeId;
}
@@ -109,7 +97,6 @@ public class ResourceWithType {
}
@ApiProperty("Name of the plugin defining the resource type")
- @XmlElement
public String getPluginName() {
return pluginName;
}
@@ -136,7 +123,6 @@ public class ResourceWithType {
this.status = status;
}
- @XmlElementRef
public List<Link> getLinks() {
return links;
}
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AbstractBase.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AbstractBase.java
index 9f6a02b..ae4e81d 100644
--- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AbstractBase.java
+++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/AbstractBase.java
@@ -98,8 +98,7 @@ public abstract class AbstractBase {
if (res!=null && res.get(0)!=null) {
- String tmp = ((Map <String,String>)res.get(0)).get("resourceId");
- int pid =Integer.valueOf(tmp);
+ Integer pid = ((Map <String,Integer>)res.get(0)).get("resourceId");
given()
.pathParam("id", pid)
@@ -125,9 +124,9 @@ public abstract class AbstractBase {
assert res != null;
for (Object entry : res) {
if (entry instanceof Map) {
- Map<String,String> map = (Map<String, String>) entry;
+ Map<String,Object> map = (Map<String, Object>) entry;
if (!map.get("resourceName").equals(REST_TEST_DUMMY)) {
- return Integer.valueOf(map.get("resourceId"));
+ return (Integer)map.get("resourceId");
}
}
}
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java
index 7e3f45a..3515fac 100644
--- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java
+++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java
@@ -30,6 +30,7 @@ import com.jayway.restassured.response.Response;
import org.junit.Test;
import org.rhq.modules.integrationTests.restApi.d.CreateCBRRequest;
+import org.rhq.modules.integrationTests.restApi.d.Resource;
import static com.jayway.restassured.RestAssured.given;
import static org.hamcrest.Matchers.*;
@@ -41,6 +42,8 @@ import static org.hamcrest.Matchers.isOneOf;
*/
public class ContentTest extends AbstractBase {
+ private static final String DEPLOYED_WAR_NAME = "test-simple.war";
+
@Test
public void testUpload() throws Exception {
@@ -144,6 +147,8 @@ public class ContentTest extends AbstractBase {
@Test
public void testCreatePackageBasedResource() throws Exception {
+ wipeWarArchiveIfNecessary();
+
InputStream in =
getClass().getClassLoader().getResourceAsStream("test-simple.war");
@@ -187,7 +192,7 @@ public class ContentTest extends AbstractBase {
assert resources.size()>0;
- int as7Id = Integer.valueOf((String)resources.get(0).get("resourceId"));
+ int as7Id = (Integer)resources.get(0).get("resourceId");
int createdResourceId=-1;
// create child of eap6 as deployment
@@ -203,7 +208,7 @@ public class ContentTest extends AbstractBase {
// set plugin config (path) and deploy config (runtime-name)
resource.getPluginConfig().put("path","deployment");
- resource.getResourceConfig().put("runtimeName","test-simple.war");
+ resource.getResourceConfig().put("runtimeName", DEPLOYED_WAR_NAME);
Response response =
given()
@@ -285,6 +290,34 @@ public class ContentTest extends AbstractBase {
}
+
+ private void wipeWarArchiveIfNecessary() {
+
+ @SuppressWarnings("unchecked")
+ List<Resource> resources =
+ given()
+ .queryParam("q",DEPLOYED_WAR_NAME)
+ .queryParam("category", "SERVICE")
+ .header(acceptJson)
+ .expect()
+ .log().everything()
+ .when()
+ .get("/resource")
+ .as(List.class);
+
+ if (resources!=null && resources.size()>0) {
+ int resourceId = resources.get(0).getResourceId();
+
+ given()
+ .pathParam("id", resourceId)
+ .queryParam("physical", "true") // Also remove target on the EAP instance
+ .expect()
+ .statusCode(200)
+ .when()
+ .delete("/resource/{id}");
+ }
+ }
+
@Test
public void testCreateCBRBadHandle() throws Exception {
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java
index cc9558b..23480a0 100644
--- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java
+++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ResourcesTest.java
@@ -69,8 +69,11 @@ public class ResourcesTest extends AbstractBase {
.expect()
.statusCode(200)
.contentType(ContentType.JSON)
- .log().ifError()
+ .log().everything()
.body("links.self", notNullValue())
+ .body("resourceId",is(_platformId))
+ .body("typeId",is(_platformTypeId))
+ .body("parentId",is(0))
.when()
.get("/resource/{id}");
@@ -101,9 +104,9 @@ public class ResourcesTest extends AbstractBase {
.log().everything()
.expect()
.statusCode(200)
- .body("id",is(typeId))
- .body("name",is("Linux"))
- .body("pluginName",is("Platforms"))
+ .body("id", is(typeId))
+ .body("name", is("Linux"))
+ .body("pluginName", is("Platforms"))
.log().everything()
.when()
.get("/resource/type/{typeId}");
@@ -187,7 +190,7 @@ public class ResourcesTest extends AbstractBase {
given()
.header("Accept", "application/json")
.with()
- .queryParam("status","NeW")
+ .queryParam("status", "NeW")
.expect()
.statusCode(200)
.when()
@@ -203,7 +206,7 @@ public class ResourcesTest extends AbstractBase {
.header("Accept", "application/json")
.with()
.queryParam("q", platformName)
- .queryParam("status","Frobnitz")
+ .queryParam("status", "Frobnitz")
.queryParam("category", "platform")
.expect()
.statusCode(406)
@@ -322,6 +325,26 @@ public class ResourcesTest extends AbstractBase {
}
@Test
+ public void testCreatePlatformJson() throws Exception {
+
+ Resource resource = new Resource();
+ resource.setResourceName("dummy-test");
+ resource.setTypeName("Linux");
+
+ given()
+ .header(acceptJson)
+ .contentType(ContentType.JSON)
+ .body(resource)
+ .expect()
+ .statusCode(201)
+ .log().everything()
+ .body("resourceId",instanceOf(Number.class))
+ .when()
+ .post("/resource/platforms");
+
+ }
+
+ @Test
public void testCreatePlatformWithBadType() throws Exception {
Resource resource = new Resource();
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/UserTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/UserTest.java
index ea78d09..e7e1b9e 100644
--- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/UserTest.java
+++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/UserTest.java
@@ -94,7 +94,7 @@ public class UserTest extends AbstractBase {
.when()
.get("/user/favorites/resource");
JsonPath jp = r.jsonPath();
- assert jp.getList("resourceId").contains(String.valueOf(_platformId));
+ assert jp.getList("resourceId").contains(_platformId);
}
finally {
given()
@@ -131,7 +131,7 @@ public class UserTest extends AbstractBase {
.when()
.get("/user/favorites/resource");
JsonPath jp = r.jsonPath();
- assert jp.getList("resourceId").contains(String.valueOf(_platformId));
+ assert jp.getList("resourceId").contains(_platformId);
}
finally {
given()
commit ec4f0228529bf490d44fc3be5407080f9360ef6c
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Tue May 21 21:45:23 2013 +0200
Print the failing class along with the failed field.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java
index d7cc470..867ea16 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java
@@ -67,8 +67,8 @@ public final class CriteriaQueryGenerator {
private static final Log LOG = LogFactory.getLog(CriteriaQueryGenerator.class);
public enum AuthorizationTokenType {
- RESOURCE, // specifies the resource alias to join on for standard res-group-role-subject authorization checking
- GROUP; // specifies the group alias to join on for standard group-role-subject authorization checking
+ RESOURCE, // specifies the resource alias to join on for standard res-group-role-subject authorization checking
+ GROUP; // specifies the group alias to join on for standard group-role-subject authorization checking
}
private Criteria criteria;
@@ -333,8 +333,8 @@ public final class CriteriaQueryGenerator {
results.append("FROM ").append(className).append(' ').append(alias).append(NL);
if (countQuery == false) {
- /*
- * don't fetch in the count query to avoid: "query specified join fetching,
+ /*
+ * don't fetch in the count query to avoid: "query specified join fetching,
* but the owner of the fetched association was not present in the select list"
*/
for (String fetchField : getFetchFields(criteria)) {
@@ -383,7 +383,7 @@ public final class CriteriaQueryGenerator {
/*
* do not prefix the alias when:
- *
+ *
* 1) if the suffix is numerical, which allows us to sort by column ordinal
* 2) if the user wants full control and has explicitly chosen to disable alias prepending
*/
@@ -655,7 +655,7 @@ public final class CriteriaQueryGenerator {
} catch (RuntimeException re) {
LOG.error("Could not get JPQL translation for '" + searchExpression + "': "
+ ThrowableUtil.getAllMessages(re, true));
- throw re; // don't wrap exceptions that are already RuntimeExceptions in another RuntimeException
+ throw re; // don't wrap exceptions that are already RuntimeExceptions in another RuntimeException
} catch (Exception e) {
LOG.error("Could not get JPQL translation for '" + searchExpression + "': "
+ ThrowableUtil.getAllMessages(e, true));
@@ -694,7 +694,7 @@ public final class CriteriaQueryGenerator {
Field field = criteria.getPersistentClass().getDeclaredField(fieldName);
persistentBagFields.add(field);
} catch (NoSuchFieldException e) {
- LOG.warn("Failed to add persistent bag collection.", e);
+ LOG.warn("Failed to add persistent bag collection on class [" + criteria.getPersistentClass().getName() +"]: ", e);
}
}
@@ -703,7 +703,7 @@ public final class CriteriaQueryGenerator {
Field field = criteria.getPersistentClass().getDeclaredField(fieldName);
joinFetchFields.add(field);
} catch (NoSuchFieldException e) {
- LOG.warn("Failed to add join fetch field.", e);
+ LOG.warn("Failed to add join fetch field on class [" + criteria.getPersistentClass().getName() + "]: ", e);
}
}
@@ -741,7 +741,7 @@ public final class CriteriaQueryGenerator {
/**
* The groupBy clause can be set if and only if the projection is altered. The passed argument should not be
- * prefixed with 'group by'; that part of the query will be auto-generated if the argument is non-null. The
+ * prefixed with 'group by'; that part of the query will be auto-generated if the argument is non-null. The
* new projection must follow standard rules as they apply to statements with groupBy clauses.
*/
public void setGroupByClause(String groupByClause) {
@@ -753,7 +753,7 @@ public final class CriteriaQueryGenerator {
/**
* The having clause can be set if and only if the groupBy clause is set. The passed argument should not be
- * prefixed with 'having'; that part of the query will be auto-generated if the argument is non-null. The
+ * prefixed with 'having'; that part of the query will be auto-generated if the argument is non-null. The
* having clause must follow standard rules as they apply to statements with groupBy clauses.
*/
public void setHavingClause(String havingClause) {
11 years
[rhq] .classpath
by mazz
.classpath | 1 +
1 file changed, 1 insertion(+)
New commits:
commit 18aa1bc699a26717acfa2ba9af2ac072aafc2ce3
Author: John Mazzitelli <mazz(a)redhat.com>
Date: Tue May 21 15:06:55 2013 -0400
add jboss logging to eclipse classpath
diff --git a/.classpath b/.classpath
index 53037dd..2487128 100644
--- a/.classpath
+++ b/.classpath
@@ -230,6 +230,7 @@
<classpathentry exported="true" kind="var" path="M2_REPO/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/taglibs/standard/1.1.2/standard-1.1.2.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/jboss-common-core/2.2.17.GA/jboss-common-core-2.2.17.GA.jar"/>
+ <classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/logging/jboss-logging/3.1.2.GA/jboss-logging-3.1.2.GA.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/javax/servlet/jstl/1.1.2/jstl-1.1.2.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/struts-menu/struts-menu/2.3/struts-menu-2.3.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/junit/junit/3.8.2/junit-3.8.2.jar"/>
11 years
[rhq] Branch 'feature/cassandra-backend' - modules/plugins
by Jiri Kremser
modules/plugins/validate-all-plugins/pom.xml | 2 ++
1 file changed, 2 insertions(+)
New commits:
commit 31cd9599c80c6219571fcc6b40ed44beaa35c331
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Tue May 21 16:44:08 2013 +0200
Just adding the cassandra plugins to plugin validator (validating the plugin descriptor, resource type, etc.)
diff --git a/modules/plugins/validate-all-plugins/pom.xml b/modules/plugins/validate-all-plugins/pom.xml
index 63eb6ae..5154e19 100644
--- a/modules/plugins/validate-all-plugins/pom.xml
+++ b/modules/plugins/validate-all-plugins/pom.xml
@@ -60,6 +60,8 @@
<pathelement location="../kickstart/target/rhq-kickstart-plugin-${project.version}.jar" />
<pathelement location="../filetemplate-bundle/target/rhq-filetemplate-bundle-plugin-${project.version}.jar" />
<pathelement location="../ant-bundle/target/rhq-ant-bundle-plugin-${project.version}.jar" />
+ <pathelement location="../cassandra/target/rhq-cassandra-plugin-${project.version}.jar" />
+ <pathelement location="../rhq-storage/target/rhq-rhqstorage-${project.version}.jar" />
</classpath>
<sysproperty key="org.apache.commons.logging.Log" value="org.apache.commons.logging.impl.SimpleLog" />
<!--
11 years