From: Ondrej Lichtner <olichtne(a)redhat.com>
Most of this is centered around adding a reference to the overall
configuration of the Recipe to the Perf.* package code. I also started
adding a lot of type hints to make it easier to understand which "conf"
variable refers to which type of "*Conf" instance...
This is a preliminary version that will require at least one more round
of refactoring to actually add all the skipped type hinting, ideally
also splitting the commit into multiple parts.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
lnst/RecipeCommon/BaseResultEvaluator.py | 9 ++-
.../Evaluators/BaselineCPUAverageEvaluator.py | 39 ++++++++--
.../Perf/Evaluators/BaselineEvaluator.py | 77 +++++++++++++++----
.../BaselineFlowAverageEvaluator.py | 25 +++++-
.../Perf/Evaluators/NonzeroFlowEvaluator.py | 18 +++--
lnst/RecipeCommon/Perf/Recipe.py | 64 ++++++++++-----
lnst/Recipes/ENRT/BaseEnrtRecipe.py | 2 +
7 files changed, 185 insertions(+), 49 deletions(-)
diff --git a/lnst/RecipeCommon/BaseResultEvaluator.py
b/lnst/RecipeCommon/BaseResultEvaluator.py
index f8902b9..ef7a170 100644
--- a/lnst/RecipeCommon/BaseResultEvaluator.py
+++ b/lnst/RecipeCommon/BaseResultEvaluator.py
@@ -1,3 +1,10 @@
+from typing import Any
+
+from lnst.Controller.Recipe import BaseRecipe
+
+
class BaseResultEvaluator(object):
- def evaluate_results(self, recipe, results):
+ def evaluate_results(
+ self, recipe: BaseRecipe, recipe_conf: Any, results: Any
+ ):
raise NotImplementedError()
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
index 0125a31..03ac72b 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
@@ -1,18 +1,29 @@
from __future__ import division
+from typing import List, Tuple
+
+from lnst.Controller.Recipe import BaseRecipe
+from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf
+from lnst.RecipeCommon.Perf.Results import result_averages_difference
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import (
+ BaseMeasurementResults as PerfMeasurementResults,
+)
from lnst.RecipeCommon.Perf.Evaluators.BaselineEvaluator import (
BaselineEvaluator,
)
-from lnst.RecipeCommon.Perf.Results import result_averages_difference
-
class BaselineCPUAverageEvaluator(BaselineEvaluator):
def __init__(self, pass_difference, evaluation_filter=None):
self._pass_difference = pass_difference
self._evaluation_filter = evaluation_filter
- def filter_results(self, recipe, results):
+ def filter_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[PerfMeasurementResults]:
if self._evaluation_filter is None:
return results
@@ -25,7 +36,12 @@ def filter_results(self, recipe, results):
filtered.append(result)
return filtered
- def group_results(self, recipe, results):
+ def group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[List[PerfMeasurementResults]]:
results_by_host = self._divide_results_by_host(results)
for host_results in results_by_host.values():
yield host_results
@@ -38,7 +54,12 @@ def _divide_results_by_host(self, results):
results_by_host[result.host].append(result)
return results_by_host
- def describe_group_results(self, recipe, results):
+ def describe_group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[str]:
return [
"CPU Baseline average evaluation for Host {hostid}:".format(
hostid=results[0].host.hostid
@@ -48,7 +69,13 @@ def describe_group_results(self, recipe, results):
),
]
- def compare_result_with_baseline(self, recipe, result, baseline):
+ def compare_result_with_baseline(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ result: PerfMeasurementResults,
+ baseline: PerfMeasurementResults,
+ ) -> Tuple[bool, List[str]]:
comparison = True
text = []
if baseline is None:
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
index 20e3887..d2ff7f8 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
@@ -1,42 +1,89 @@
+from typing import List, Tuple
+from lnst.Controller.Recipe import BaseRecipe
from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
+from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import (
+ BaseMeasurementResults as PerfMeasurementResults,
+)
class BaselineEvaluator(BaseResultEvaluator):
- def evaluate_results(self, recipe, results):
- filtered_results = self.filter_results(recipe, results)
+ def evaluate_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ):
+ filtered_results = self.filter_results(recipe, recipe_conf, results)
- for group in self.group_results(recipe, filtered_results):
- self.evaluate_group_results(recipe, group)
+ for group in self.group_results(recipe, recipe_conf, filtered_results):
+ self.evaluate_group_results(recipe, recipe_conf, group)
- def filter_results(self, recipe, results):
+ def filter_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[PerfMeasurementResults]:
return results
- def group_results(self, recipe, results):
+ def group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[List[PerfMeasurementResults]]:
for result in results:
yield [result]
- def evaluate_group_results(self, recipe, results):
+ def evaluate_group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ):
comparison_result = True
- result_text = self.describe_group_results(recipe, results)
+ result_text = self.describe_group_results(recipe, recipe_conf, results)
- baselines = self.get_baselines(recipe, results)
+ baselines = self.get_baselines(recipe, recipe_conf, results)
for result, baseline in zip(results, baselines):
comparison, text = self.compare_result_with_baseline(
- recipe, result, baseline
+ recipe, recipe_conf, result, baseline
)
comparison_result = comparison_result and comparison
result_text.extend(text)
recipe.add_result(comparison_result, "\n".join(result_text))
- def describe_group_results(self, recipe, results):
+ def describe_group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[str]:
return []
- def get_baselines(self, recipe, results):
- return [self.get_baseline(recipe, result) for result in results]
+ def get_baselines(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[PerfMeasurementResults]:
+ return [self.get_baseline(recipe, recipe_conf, result) for result in results]
- def get_baseline(self, recipe, result):
+ def get_baseline(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ result: PerfMeasurementResults,
+ ) -> PerfMeasurementResults:
return None
- def compare_result_with_baseline(self, recipe, result, baseline):
+ def compare_result_with_baseline(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ result: PerfMeasurementResults,
+ baseline: PerfMeasurementResults,
+ ) -> Tuple[bool, List[str]]:
return False, ["Result to baseline comparison not implemented"]
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
index f7a2fb1..05ee567 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
@@ -1,11 +1,17 @@
from __future__ import division
+from typing import List, Tuple
+from lnst.Controller.Recipe import BaseRecipe
+
+from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf
+from lnst.RecipeCommon.Perf.Results import result_averages_difference
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import (
+ BaseMeasurementResults as PerfMeasurementResults,
+)
from lnst.RecipeCommon.Perf.Evaluators.BaselineEvaluator import (
BaselineEvaluator,
)
-from lnst.RecipeCommon.Perf.Results import result_averages_difference
-
class BaselineFlowAverageEvaluator(BaselineEvaluator):
def __init__(self, pass_difference, metrics_to_evaluate=None):
@@ -21,7 +27,12 @@ def __init__(self, pass_difference, metrics_to_evaluate=None):
"receiver_cpu_stats",
]
- def describe_group_results(self, recipe, results):
+ def describe_group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[str]:
result = results[0]
return [
"Baseline average evaluation of flow:",
@@ -31,7 +42,13 @@ def describe_group_results(self, recipe, results):
),
]
- def compare_result_with_baseline(self, recipe, result, baseline):
+ def compare_result_with_baseline(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ result: PerfMeasurementResults,
+ baseline: PerfMeasurementResults,
+ ) -> Tuple[bool, List[str]]:
comparison_result = True
result_text = []
if baseline is None:
diff --git a/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
index 0d0921c..8837807 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
@@ -1,13 +1,21 @@
-from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
+from typing import List, Tuple
+
+from lnst.Controller.Recipe import BaseRecipe
-from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import (
- FlowMeasurementResults,
- AggregatedFlowMeasurementResults,
+from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import (
+ BaseMeasurementResults as PerfMeasurementResults,
)
+from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
class NonzeroFlowEvaluator(BaseResultEvaluator):
- def evaluate_results(self, recipe, results):
+ def evaluate_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ):
for flow_results in results:
result = True
result_text = [
diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py
index d1098fa..99aa786 100644
--- a/lnst/RecipeCommon/Perf/Recipe.py
+++ b/lnst/RecipeCommon/Perf/Recipe.py
@@ -1,21 +1,30 @@
import logging
from collections import OrderedDict
+from typing import Any, List
from lnst.Common.LnstError import LnstError
from lnst.Controller.Recipe import BaseRecipe
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import BaseMeasurement
from lnst.RecipeCommon.Perf.Results import SequentialPerfResult
from lnst.RecipeCommon.Perf.Results import ParallelPerfResult
from lnst.RecipeCommon.Perf.PerfTestMixins import (
- BasePerfTestTweakMixin,
- BasePerfTestIterationTweakMixin,
+ BasePerfTestTweakMixin,
+ BasePerfTestIterationTweakMixin,
)
+
class RecipeConf(object):
- def __init__(self, measurements, iterations):
+ def __init__(
+ self,
+ measurements: List[BaseMeasurement],
+ iterations: int,
+ parent_recipe_config: Any = None,
+ ):
self._measurements = measurements
self._evaluators = dict()
self._iterations = iterations
+ self._parent_recipe_config = parent_recipe_config
@property
def measurements(self):
@@ -27,7 +36,9 @@ def evaluators(self):
def register_evaluators(self, measurement, evaluators):
if measurement not in self.measurements:
- raise LnstError("Can't register evaluators for an unknown
measurement")
+ raise LnstError(
+ "Can't register evaluators for an unknown measurement"
+ )
self._evaluators[measurement] = list(evaluators)
@@ -35,8 +46,13 @@ def register_evaluators(self, measurement, evaluators):
def iterations(self):
return self._iterations
+ @property
+ def parent_recipe_config(self):
+ return self._parent_recipe_config
+
+
class RecipeResults(object):
- def __init__(self, recipe_conf):
+ def __init__(self, recipe_conf: RecipeConf):
self._recipe_conf = recipe_conf
self._results = OrderedDict()
@@ -51,11 +67,15 @@ def results(self):
def add_measurement_results(self, measurement, new_results):
aggregated_results = self._results.get(measurement, None)
aggregated_results = measurement.aggregate_results(
- aggregated_results, new_results)
+ aggregated_results, new_results
+ )
self._results[measurement] = aggregated_results
-class Recipe(BasePerfTestTweakMixin, BasePerfTestIterationTweakMixin, BaseRecipe):
- def perf_test(self, recipe_conf):
+
+class Recipe(
+ BasePerfTestTweakMixin, BasePerfTestIterationTweakMixin, BaseRecipe
+):
+ def perf_test(self, recipe_conf: RecipeConf):
results = RecipeResults(recipe_conf)
self.apply_perf_test_tweak(recipe_conf)
@@ -69,7 +89,9 @@ def perf_test(self, recipe_conf):
return results
- def perf_test_iteration(self, recipe_conf, results):
+ def perf_test_iteration(
+ self, recipe_conf: RecipeConf, results: RecipeResults
+ ):
self.apply_perf_test_iteration_tweak(recipe_conf)
self.describe_perf_test_iteration_tweak(recipe_conf)
@@ -81,20 +103,23 @@ def perf_test_iteration(self, recipe_conf, results):
for measurement in recipe_conf.measurements:
measurement_results = measurement.collect_results()
results.add_measurement_results(
- measurement, measurement_results)
+ measurement, measurement_results
+ )
finally:
self.remove_perf_test_iteration_tweak(recipe_conf)
- def describe_perf_test_iteration_tweak(self, perf_config):
- description = self.generate_perf_test_iteration_tweak_description(perf_config)
+ def describe_perf_test_iteration_tweak(self, recipe_conf: RecipeConf):
+ description = self.generate_perf_test_iteration_tweak_description(
+ recipe_conf
+ )
self.add_result(True, "\n".join(description))
- def perf_report_and_evaluate(self, results):
+ def perf_report_and_evaluate(self, results: RecipeResults):
self.perf_report(results)
self.perf_evaluate(results)
- def perf_report(self, recipe_results):
+ def perf_report(self, recipe_results: RecipeResults):
if not recipe_results:
self.add_result(False, "No results available to report.")
return
@@ -102,7 +127,7 @@ def perf_report(self, recipe_results):
for measurement, results in list(recipe_results.results.items()):
measurement.report_results(self, results)
- def perf_evaluate(self, recipe_results):
+ def perf_evaluate(self, recipe_results: RecipeResults):
if not recipe_results:
self.add_result(False, "No results available to evaluate.")
return
@@ -112,8 +137,11 @@ def perf_evaluate(self, recipe_results):
for measurement, results in list(recipe_results.results.items()):
evaluators = recipe_conf.evaluators.get(measurement, [])
for evaluator in evaluators:
- evaluator.evaluate_results(self, results)
+ evaluator.evaluate_results(self, recipe_conf, results)
if len(evaluators) == 0:
- logging.debug("No evaluator registered for measurement {}"
- .format(measurement))
+ logging.debug(
+ "No evaluator registered for measurement {}".format(
+ measurement
+ )
+ )
diff --git a/lnst/Recipes/ENRT/BaseEnrtRecipe.py b/lnst/Recipes/ENRT/BaseEnrtRecipe.py
index 110ee88..9813ba6 100644
--- a/lnst/Recipes/ENRT/BaseEnrtRecipe.py
+++ b/lnst/Recipes/ENRT/BaseEnrtRecipe.py
@@ -1,4 +1,5 @@
import pprint
+import copy
from contextlib import contextmanager
from lnst.Common.LnstError import LnstError
@@ -399,6 +400,7 @@ def generate_perf_configurations(self, config):
perf_conf = PerfRecipeConf(
measurements=measurements,
iterations=self.params.perf_iterations,
+ parent_recipe_config=copy.deepcopy(config),
)
self.register_perf_evaluators(perf_conf)
--
2.29.2