From: Ondrej Lichtner <olichtne(a)redhat.com>
Typehinting helps understanding the code in this package due to the
number of "conf" and "measurement" and other types that have similar
names but different uses.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
lnst/RecipeCommon/BaseResultEvaluator.py | 12 +-
.../Evaluators/BaselineCPUAverageEvaluator.py | 45 +++++--
.../Perf/Evaluators/BaselineEvaluator.py | 63 +++++++--
.../BaselineFlowAverageEvaluator.py | 37 +++++-
.../Perf/Evaluators/NonzeroFlowEvaluator.py | 18 ++-
.../Perf/Measurements/BaseMeasurement.py | 4 +-
lnst/RecipeCommon/Perf/Recipe.py | 121 ++++++++++++------
7 files changed, 233 insertions(+), 67 deletions(-)
diff --git a/lnst/RecipeCommon/BaseResultEvaluator.py
b/lnst/RecipeCommon/BaseResultEvaluator.py
index 523a857..6c5e653 100644
--- a/lnst/RecipeCommon/BaseResultEvaluator.py
+++ b/lnst/RecipeCommon/BaseResultEvaluator.py
@@ -1,3 +1,13 @@
+from typing import List, Any
+
+from lnst.Controller.Recipe import BaseRecipe
+
+
class BaseResultEvaluator(object):
- def evaluate_results(self, recipe, recipe_conf, results):
+ def evaluate_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: Any,
+ results: List[Any],
+ ):
raise NotImplementedError()
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
index 3e001b5..ca826a2 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
@@ -1,18 +1,31 @@
from __future__ import division
+from typing import List, Tuple, Dict
+
+from lnst.Controller.Recipe import BaseRecipe
+from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf
+from lnst.RecipeCommon.Perf.Results import result_averages_difference
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import (
+ BaseMeasurementResults as PerfMeasurementResults,
+)
from lnst.RecipeCommon.Perf.Evaluators.BaselineEvaluator import (
BaselineEvaluator,
)
-from lnst.RecipeCommon.Perf.Results import result_averages_difference
-
class BaselineCPUAverageEvaluator(BaselineEvaluator):
- def __init__(self, pass_difference, evaluation_filter=None):
+ def __init__(
+ self, pass_difference: int, evaluation_filter: Dict[str, str] = None
+ ):
self._pass_difference = pass_difference
self._evaluation_filter = evaluation_filter
- def filter_results(self, recipe, recipe_conf, results):
+ def filter_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[PerfMeasurementResults]:
if self._evaluation_filter is None:
return results
@@ -25,12 +38,17 @@ def filter_results(self, recipe, recipe_conf, results):
filtered.append(result)
return filtered
- def group_results(self, recipe, recipe_conf, results):
+ def group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[List[PerfMeasurementResults]]:
results_by_host = self._divide_results_by_host(results)
for host_results in results_by_host.values():
yield host_results
- def _divide_results_by_host(self, results):
+ def _divide_results_by_host(self, results: List[PerfMeasurementResults]):
results_by_host = {}
for result in results:
if result.host not in results_by_host:
@@ -38,7 +56,12 @@ def _divide_results_by_host(self, results):
results_by_host[result.host].append(result)
return results_by_host
- def describe_group_results(self, recipe, recipe_conf, results):
+ def describe_group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[str]:
return [
"CPU Baseline average evaluation for Host {hostid}:".format(
hostid=results[0].host.hostid
@@ -48,7 +71,13 @@ def describe_group_results(self, recipe, recipe_conf, results):
),
]
- def compare_result_with_baseline(self, recipe, recipe_conf, result, baseline):
+ def compare_result_with_baseline(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ result: PerfMeasurementResults,
+ baseline: PerfMeasurementResults,
+ ) -> Tuple[bool, List[str]]:
comparison = True
text = []
if baseline is None:
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
index 096e2ec..d2ff7f8 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
@@ -1,21 +1,47 @@
+from typing import List, Tuple
+from lnst.Controller.Recipe import BaseRecipe
from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
+from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import (
+ BaseMeasurementResults as PerfMeasurementResults,
+)
class BaselineEvaluator(BaseResultEvaluator):
- def evaluate_results(self, recipe, recipe_conf, results):
+ def evaluate_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ):
filtered_results = self.filter_results(recipe, recipe_conf, results)
for group in self.group_results(recipe, recipe_conf, filtered_results):
self.evaluate_group_results(recipe, recipe_conf, group)
- def filter_results(self, recipe, recipe_conf, results):
+ def filter_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[PerfMeasurementResults]:
return results
- def group_results(self, recipe, recipe_conf, results):
+ def group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[List[PerfMeasurementResults]]:
for result in results:
yield [result]
- def evaluate_group_results(self, recipe, recipe_conf, results):
+ def evaluate_group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ):
comparison_result = True
result_text = self.describe_group_results(recipe, recipe_conf, results)
@@ -29,14 +55,35 @@ def evaluate_group_results(self, recipe, recipe_conf, results):
recipe.add_result(comparison_result, "\n".join(result_text))
- def describe_group_results(self, recipe, recipe_conf, results):
+ def describe_group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[str]:
return []
- def get_baselines(self, recipe, recipe_conf, results):
+ def get_baselines(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[PerfMeasurementResults]:
return [self.get_baseline(recipe, recipe_conf, result) for result in results]
- def get_baseline(self, recipe, recipe_conf, result):
+ def get_baseline(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ result: PerfMeasurementResults,
+ ) -> PerfMeasurementResults:
return None
- def compare_result_with_baseline(self, recipe, recipe_conf, result, baseline):
+ def compare_result_with_baseline(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ result: PerfMeasurementResults,
+ baseline: PerfMeasurementResults,
+ ) -> Tuple[bool, List[str]]:
return False, ["Result to baseline comparison not implemented"]
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
index 7b8f168..5d2c014 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
@@ -1,14 +1,23 @@
from __future__ import division
+from typing import List, Tuple
+from lnst.Controller.Recipe import BaseRecipe
+
+from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf
+from lnst.RecipeCommon.Perf.Results import result_averages_difference
+from lnst.RecipeCommon.Perf.Results import SequentialPerfResult
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import (
+ BaseMeasurementResults as PerfMeasurementResults,
+)
from lnst.RecipeCommon.Perf.Evaluators.BaselineEvaluator import (
BaselineEvaluator,
)
-from lnst.RecipeCommon.Perf.Results import result_averages_difference
-
class BaselineFlowAverageEvaluator(BaselineEvaluator):
- def __init__(self, pass_difference, metrics_to_evaluate=None):
+ def __init__(
+ self, pass_difference: int, metrics_to_evaluate: List[str] = None
+ ):
self._pass_difference = pass_difference
if metrics_to_evaluate is not None:
@@ -21,7 +30,12 @@ def __init__(self, pass_difference, metrics_to_evaluate=None):
"receiver_cpu_stats",
]
- def describe_group_results(self, recipe, recipe_conf, results):
+ def describe_group_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ) -> List[str]:
result = results[0]
return [
"Baseline average evaluation of flow:",
@@ -31,7 +45,13 @@ def describe_group_results(self, recipe, recipe_conf, results):
),
]
- def compare_result_with_baseline(self, recipe, recipe_conf, result, baseline):
+ def compare_result_with_baseline(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ result: PerfMeasurementResults,
+ baseline: PerfMeasurementResults,
+ ) -> Tuple[bool, List[str]]:
comparison_result = True
result_text = []
if baseline is None:
@@ -48,7 +68,12 @@ def compare_result_with_baseline(self, recipe, recipe_conf, result,
baseline):
comparison_result = comparison_result and comparison
return comparison_result, result_text
- def _average_diff_comparison(self, name, target, baseline):
+ def _average_diff_comparison(
+ self,
+ name: str,
+ target: SequentialPerfResult,
+ baseline: SequentialPerfResult,
+ ):
difference = result_averages_difference(target, baseline)
result_text = "New {name} is {diff:.2f}% {direction} from the baseline
{name}".format(
name=name,
diff --git a/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
index 259b0b0..897d1e2 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
@@ -1,13 +1,21 @@
-from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
+from typing import List
+
+from lnst.Controller.Recipe import BaseRecipe
-from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import (
- FlowMeasurementResults,
- AggregatedFlowMeasurementResults,
+from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import (
+ BaseMeasurementResults as PerfMeasurementResults,
)
+from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
class NonzeroFlowEvaluator(BaseResultEvaluator):
- def evaluate_results(self, recipe, recipe_conf, results):
+ def evaluate_results(
+ self,
+ recipe: BaseRecipe,
+ recipe_conf: PerfRecipeConf,
+ results: List[PerfMeasurementResults],
+ ):
for flow_results in results:
result = True
result_text = [
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py
b/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py
index b6ef243..4c0b093 100644
--- a/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py
+++ b/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py
@@ -44,11 +44,11 @@ def __repr__(self):
class BaseMeasurementResults(object):
- def __init__(self, measurement):
+ def __init__(self, measurement: BaseMeasurement):
self._measurement = measurement
@property
- def measurement(self):
+ def measurement(self) -> BaseMeasurement:
return self._measurement
def align_data(self, start, end):
diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py
index e8fff4a..82f9649 100644
--- a/lnst/RecipeCommon/Perf/Recipe.py
+++ b/lnst/RecipeCommon/Perf/Recipe.py
@@ -1,18 +1,30 @@
import logging
from collections import OrderedDict
+from typing import Any, List, Dict
from lnst.Common.LnstError import LnstError
from lnst.Controller.Recipe import BaseRecipe
+from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import (
+ BaseMeasurement,
+ BaseMeasurementResults,
+)
from lnst.RecipeCommon.Perf.Results import SequentialPerfResult
from lnst.RecipeCommon.Perf.Results import ParallelPerfResult
from lnst.RecipeCommon.Perf.PerfTestMixins import (
- BasePerfTestTweakMixin,
- BasePerfTestIterationTweakMixin,
+ BasePerfTestTweakMixin,
+ BasePerfTestIterationTweakMixin,
)
+from lnst.RecipeCommon.BaseResultEvaluator import BaseResultEvaluator
+
class RecipeConf(object):
- def __init__(self, measurements, iterations, parent_recipe_config = None):
+ def __init__(
+ self,
+ measurements: List[BaseMeasurement],
+ iterations: int,
+ parent_recipe_config: Any = None,
+ ):
self._measurements = measurements
self._evaluators = dict()
self._iterations = iterations
@@ -26,9 +38,15 @@ def measurements(self):
def evaluators(self):
return dict(self._evaluators)
- def register_evaluators(self, measurement, evaluators):
+ def register_evaluators(
+ self,
+ measurement: BaseMeasurement,
+ evaluators: List[BaseResultEvaluator],
+ ):
if measurement not in self.measurements:
- raise LnstError("Can't register evaluators for an unknown
measurement")
+ raise LnstError(
+ "Can't register evaluators for an unknown measurement"
+ )
self._evaluators[measurement] = list(evaluators)
@@ -42,24 +60,28 @@ def parent_recipe_config(self):
class RecipeResults(object):
- def __init__(self, recipe_conf):
+ def __init__(self, recipe_conf: RecipeConf):
self._recipe_conf = recipe_conf
self._results = OrderedDict()
self._aggregated_results = OrderedDict()
@property
- def recipe_conf(self):
+ def recipe_conf(self) -> RecipeConf:
return self._recipe_conf
@property
- def results(self):
+ def results(self) -> Dict[BaseMeasurement, List[BaseMeasurementResults]]:
return self._results
@property
- def aggregated_results(self):
+ def aggregated_results(
+ self,
+ ) -> Dict[BaseMeasurement, BaseMeasurementResults]:
return self._aggregated_results
- def add_measurement_results(self, measurement, new_results):
+ def add_measurement_results(
+ self, measurement: BaseMeasurement, new_results: BaseMeasurementResults
+ ):
if measurement not in self._results:
self._results[measurement] = [new_results]
else:
@@ -67,39 +89,52 @@ def add_measurement_results(self, measurement, new_results):
aggregated_results = self._aggregated_results.get(measurement, None)
aggregated_results = measurement.aggregate_results(
- aggregated_results, new_results)
+ aggregated_results, new_results
+ )
self._aggregated_results[measurement] = aggregated_results
@property
- def time_aligned_results(self):
+ def time_aligned_results(self) -> "RecipeResults":
timestamps = []
for i in range(self.recipe_conf.iterations):
iteration_results_group = [
- measurement_iteration_result
- for measurement_results in self.results.values()
- for measurement_iteration_result in measurement_results[i]
- ]
-
- timestamps.append((
- max([res.start_timestamp for res in iteration_results_group]),
- min([res.end_timestamp for res in iteration_results_group])
- ))
+ measurement_iteration_result
+ for measurement_results in self.results.values()
+ for measurement_iteration_result in measurement_results[i]
+ ]
+
+ timestamps.append(
+ (
+ max(
+ [res.start_timestamp for res in iteration_results_group]
+ ),
+ min([res.end_timestamp for res in iteration_results_group]),
+ )
+ )
aligned_recipe_results = RecipeResults(self._recipe_conf)
for measurement, measurement_results in self.results.items():
for i, measurement_iteration in enumerate(measurement_results):
aligned_measurement_results = []
for result in measurement_iteration:
- aligned_measurement_result = result.align_data(timestamps[i][0],
timestamps[i][1])
- aligned_measurement_results.append(aligned_measurement_result)
+ aligned_measurement_result = result.align_data(
+ timestamps[i][0], timestamps[i][1]
+ )
+ aligned_measurement_results.append(
+ aligned_measurement_result
+ )
- aligned_recipe_results.add_measurement_results(measurement,
aligned_measurement_results)
+ aligned_recipe_results.add_measurement_results(
+ measurement, aligned_measurement_results
+ )
return aligned_recipe_results
-class Recipe(BasePerfTestTweakMixin, BasePerfTestIterationTweakMixin, BaseRecipe):
- def perf_test(self, recipe_conf):
+class Recipe(
+ BasePerfTestTweakMixin, BasePerfTestIterationTweakMixin, BaseRecipe
+):
+ def perf_test(self, recipe_conf: RecipeConf):
results = RecipeResults(recipe_conf)
self.apply_perf_test_tweak(recipe_conf)
@@ -113,7 +148,9 @@ def perf_test(self, recipe_conf):
return results
- def perf_test_iteration(self, recipe_conf, results):
+ def perf_test_iteration(
+ self, recipe_conf: RecipeConf, results: RecipeResults
+ ):
self.apply_perf_test_iteration_tweak(recipe_conf)
self.describe_perf_test_iteration_tweak(recipe_conf)
@@ -125,40 +162,50 @@ def perf_test_iteration(self, recipe_conf, results):
for measurement in recipe_conf.measurements:
measurement_results = measurement.collect_results()
results.add_measurement_results(
- measurement, measurement_results)
+ measurement, measurement_results
+ )
finally:
self.remove_perf_test_iteration_tweak(recipe_conf)
- def describe_perf_test_iteration_tweak(self, perf_config):
- description = self.generate_perf_test_iteration_tweak_description(perf_config)
+ def describe_perf_test_iteration_tweak(self, recipe_conf: RecipeConf):
+ description = self.generate_perf_test_iteration_tweak_description(
+ recipe_conf
+ )
self.add_result(True, "\n".join(description))
- def perf_report_and_evaluate(self, results):
+ def perf_report_and_evaluate(self, results: RecipeResults):
aligned_results = results.time_aligned_results
self.perf_report(aligned_results)
self.perf_evaluate(aligned_results)
- def perf_report(self, recipe_results):
+ def perf_report(self, recipe_results: RecipeResults):
if not recipe_results:
self.add_result(False, "No results available to report.")
return
- for measurement, results in list(recipe_results.aggregated_results.items()):
+ for measurement, results in list(
+ recipe_results.aggregated_results.items()
+ ):
measurement.report_results(self, results)
- def perf_evaluate(self, recipe_results):
+ def perf_evaluate(self, recipe_results: RecipeResults):
if not recipe_results:
self.add_result(False, "No results available to evaluate.")
return
recipe_conf = recipe_results.recipe_conf
- for measurement, results in list(recipe_results.aggregated_results.items()):
+ for measurement, results in list(
+ recipe_results.aggregated_results.items()
+ ):
evaluators = recipe_conf.evaluators.get(measurement, [])
for evaluator in evaluators:
evaluator.evaluate_results(self, recipe_conf, results)
if len(evaluators) == 0:
- logging.debug("No evaluator registered for measurement {}"
- .format(measurement))
+ logging.debug(
+ "No evaluator registered for measurement {}".format(
+ measurement
+ )
+ )
--
2.30.0