From: Ondrej Lichtner <olichtne(a)redhat.com>
This commit adds "recipe_conf" parameters to the Perf.Evaluators code.
The value of this parameter is passed to the Evaluators code from the
Perf.Recipe::perf_evaluate method which provides the reference to a
recipe configuration that was stored in the *results* that are being
evaluated.
These results should be a *snapshot* of the recipe configuration that
was current at the point in time when the measurement was being run.
As such the "recipe_conf" in the evaluator code can be used to inspect
the *historical* state of the recipe configuration. This is important to
properly understand the result for the purposes of the evaluation.
It is also important to understand that this "recipe_conf" object is of
type lnst.RecipeCommon.Perf.Recipe.RecipeConf so this refers directly to
the configuration of the *Perf.Recipe*, not the overall configuration of
the entire recipe as it is.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
lnst/RecipeCommon/BaseResultEvaluator.py | 2 +-
.../Evaluators/BaselineCPUAverageEvaluator.py | 8 ++---
.../Perf/Evaluators/BaselineEvaluator.py | 30 +++++++++----------
.../BaselineFlowAverageEvaluator.py | 4 +--
.../Perf/Evaluators/NonzeroFlowEvaluator.py | 2 +-
lnst/RecipeCommon/Perf/Recipe.py | 2 +-
6 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/lnst/RecipeCommon/BaseResultEvaluator.py
b/lnst/RecipeCommon/BaseResultEvaluator.py
index f8902b9..523a857 100644
--- a/lnst/RecipeCommon/BaseResultEvaluator.py
+++ b/lnst/RecipeCommon/BaseResultEvaluator.py
@@ -1,3 +1,3 @@
class BaseResultEvaluator(object):
- def evaluate_results(self, recipe, results):
+ def evaluate_results(self, recipe, recipe_conf, results):
raise NotImplementedError()
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
index 0125a31..3e001b5 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
@@ -12,7 +12,7 @@ def __init__(self, pass_difference, evaluation_filter=None):
self._pass_difference = pass_difference
self._evaluation_filter = evaluation_filter
- def filter_results(self, recipe, results):
+ def filter_results(self, recipe, recipe_conf, results):
if self._evaluation_filter is None:
return results
@@ -25,7 +25,7 @@ def filter_results(self, recipe, results):
filtered.append(result)
return filtered
- def group_results(self, recipe, results):
+ def group_results(self, recipe, recipe_conf, results):
results_by_host = self._divide_results_by_host(results)
for host_results in results_by_host.values():
yield host_results
@@ -38,7 +38,7 @@ def _divide_results_by_host(self, results):
results_by_host[result.host].append(result)
return results_by_host
- def describe_group_results(self, recipe, results):
+ def describe_group_results(self, recipe, recipe_conf, results):
return [
"CPU Baseline average evaluation for Host {hostid}:".format(
hostid=results[0].host.hostid
@@ -48,7 +48,7 @@ def describe_group_results(self, recipe, results):
),
]
- def compare_result_with_baseline(self, recipe, result, baseline):
+ def compare_result_with_baseline(self, recipe, recipe_conf, result, baseline):
comparison = True
text = []
if baseline is None:
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
index 20e3887..096e2ec 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineEvaluator.py
@@ -2,41 +2,41 @@
class BaselineEvaluator(BaseResultEvaluator):
- def evaluate_results(self, recipe, results):
- filtered_results = self.filter_results(recipe, results)
+ def evaluate_results(self, recipe, recipe_conf, results):
+ filtered_results = self.filter_results(recipe, recipe_conf, results)
- for group in self.group_results(recipe, filtered_results):
- self.evaluate_group_results(recipe, group)
+ for group in self.group_results(recipe, recipe_conf, filtered_results):
+ self.evaluate_group_results(recipe, recipe_conf, group)
- def filter_results(self, recipe, results):
+ def filter_results(self, recipe, recipe_conf, results):
return results
- def group_results(self, recipe, results):
+ def group_results(self, recipe, recipe_conf, results):
for result in results:
yield [result]
- def evaluate_group_results(self, recipe, results):
+ def evaluate_group_results(self, recipe, recipe_conf, results):
comparison_result = True
- result_text = self.describe_group_results(recipe, results)
+ result_text = self.describe_group_results(recipe, recipe_conf, results)
- baselines = self.get_baselines(recipe, results)
+ baselines = self.get_baselines(recipe, recipe_conf, results)
for result, baseline in zip(results, baselines):
comparison, text = self.compare_result_with_baseline(
- recipe, result, baseline
+ recipe, recipe_conf, result, baseline
)
comparison_result = comparison_result and comparison
result_text.extend(text)
recipe.add_result(comparison_result, "\n".join(result_text))
- def describe_group_results(self, recipe, results):
+ def describe_group_results(self, recipe, recipe_conf, results):
return []
- def get_baselines(self, recipe, results):
- return [self.get_baseline(recipe, result) for result in results]
+ def get_baselines(self, recipe, recipe_conf, results):
+ return [self.get_baseline(recipe, recipe_conf, result) for result in results]
- def get_baseline(self, recipe, result):
+ def get_baseline(self, recipe, recipe_conf, result):
return None
- def compare_result_with_baseline(self, recipe, result, baseline):
+ def compare_result_with_baseline(self, recipe, recipe_conf, result, baseline):
return False, ["Result to baseline comparison not implemented"]
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
index f7a2fb1..7b8f168 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
@@ -21,7 +21,7 @@ def __init__(self, pass_difference, metrics_to_evaluate=None):
"receiver_cpu_stats",
]
- def describe_group_results(self, recipe, results):
+ def describe_group_results(self, recipe, recipe_conf, results):
result = results[0]
return [
"Baseline average evaluation of flow:",
@@ -31,7 +31,7 @@ def describe_group_results(self, recipe, results):
),
]
- def compare_result_with_baseline(self, recipe, result, baseline):
+ def compare_result_with_baseline(self, recipe, recipe_conf, result, baseline):
comparison_result = True
result_text = []
if baseline is None:
diff --git a/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
index 0d0921c..259b0b0 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py
@@ -7,7 +7,7 @@
class NonzeroFlowEvaluator(BaseResultEvaluator):
- def evaluate_results(self, recipe, results):
+ def evaluate_results(self, recipe, recipe_conf, results):
for flow_results in results:
result = True
result_text = [
diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py
index 9170246..041f3b5 100644
--- a/lnst/RecipeCommon/Perf/Recipe.py
+++ b/lnst/RecipeCommon/Perf/Recipe.py
@@ -151,7 +151,7 @@ def perf_evaluate(self, recipe_results):
for measurement, results in list(recipe_results.aggregated_results.items()):
evaluators = recipe_conf.evaluators.get(measurement, [])
for evaluator in evaluators:
- evaluator.evaluate_results(self, results)
+ evaluator.evaluate_results(self, recipe_conf, results)
if len(evaluators) == 0:
logging.debug("No evaluator registered for measurement {}"
--
2.30.0