From: Ondrej Lichtner olichtne@redhat.com
The Perf.RecipeConf class now tracks a list of evaluators associated with a given measurement and allows registering them.
At the same time, the Perf.Recipe class will now call these evaluators from the evaluate_results method or log that no evaluators were configured.
This commit also removes the now useless classmethod evaluate_results of the BaseMeasurement class and it's derived classes.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Perf/Evaluators/EvaluationError.py | 4 ++++ .../Perf/Measurements/BaseCPUMeasurement.py | 11 --------- .../Perf/Measurements/BaseFlowMeasurement.py | 5 ---- .../Perf/Measurements/BaseMeasurement.py | 5 ---- lnst/RecipeCommon/Perf/Recipe.py | 24 ++++++++++++++++++- 5 files changed, 27 insertions(+), 22 deletions(-) create mode 100644 lnst/RecipeCommon/Perf/Evaluators/EvaluationError.py
diff --git a/lnst/RecipeCommon/Perf/Evaluators/EvaluationError.py b/lnst/RecipeCommon/Perf/Evaluators/EvaluationError.py new file mode 100644 index 0000000..40aa294 --- /dev/null +++ b/lnst/RecipeCommon/Perf/Evaluators/EvaluationError.py @@ -0,0 +1,4 @@ +from lnst.Common.LnstError import LnstError + +class EvaluationError(LnstError): + pass diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseCPUMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseCPUMeasurement.py index f81e8df..fc47091 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseCPUMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseCPUMeasurement.py @@ -61,17 +61,6 @@ class BaseCPUMeasurement(BaseMeasurement): for host_results in results_by_host.values(): cls._report_host_results(recipe, host_results)
- @classmethod - def evaluate_results(cls, recipe, results): - #TODO split off into a separate evaluator class - hosts = [] - for result in results: - if result.host.hostid not in hosts: - hosts.append(result.host.hostid) - recipe.add_result(True, - "CPU evaluation for results from hosts {} not implemented" - .format(hosts)) - @classmethod def _divide_results_by_host(cls, results): results_by_host = {} diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index eda36a9..aba0e02 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -156,11 +156,6 @@ class BaseFlowMeasurement(BaseMeasurement): for flow_results in results: cls._report_flow_results(recipe, flow_results)
- @classmethod - def evaluate_results(cls, recipe, results): - #TODO split off into a separate evaluator class - recipe.add_result(True, "Flow result evaluation not implemented") - @classmethod def _report_flow_results(cls, recipe, flow_results): generator = flow_results.generator_results diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py index 8059308..782ffa5 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py @@ -19,11 +19,6 @@ class BaseMeasurement(object): def report_results(recipe, results): raise NotImplementedError()
- @classmethod - def evaluate_results(recipe, results): - #TODO split off into separate evaluator classes - raise NotImplementedError() - @classmethod def aggregate_results(first, second): raise NotImplementedError() diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py index e305310..13fc35b 100644 --- a/lnst/RecipeCommon/Perf/Recipe.py +++ b/lnst/RecipeCommon/Perf/Recipe.py @@ -1,3 +1,6 @@ +import logging + +from lnst.Common.LnstError import LnstError from lnst.Controller.Recipe import BaseRecipe from lnst.RecipeCommon.Perf.Results import SequentialPerfResult from lnst.RecipeCommon.Perf.Results import ParallelPerfResult @@ -5,12 +8,23 @@ from lnst.RecipeCommon.Perf.Results import ParallelPerfResult class RecipeConf(object): def __init__(self, measurements, iterations): self._measurements = measurements + self._evaluators = dict() self._iterations = iterations
@property def measurements(self): return self._measurements
+ @property + def evaluators(self): + return dict(self._evaluators) + + def register_evaluators(self, measurement, evaluators): + if measurement not in self.measurements: + raise LnstError("Can't register evaluators for an unknown measurement") + + self._evaluators[measurement] = list(evaluators) + @property def iterations(self): return self._iterations @@ -69,5 +83,13 @@ class Recipe(BaseRecipe): self.add_result(False, "No results available to evaluate.") return
+ perf_conf = recipe_results.perf_conf + for measurement, results in recipe_results.results.items(): - measurement.evaluate_results(self, results) + evaluators = perf_conf.evaluators.get(measurement, []) + for evaluator in evaluators: + evaluator.evaluate_results(self, results) + + if len(evaluators) == 0: + logging.debug("No evaluator registered for measurement {}" + .format(measurement))