From: Ondrej Lichtner <olichtne(a)redhat.com>
The RecipeCommon Perf and Ping modules define BaseRecipe derived classes
(PerfTestAndEvaluate and PingTestAndEvaluate) defining common methods
used by a recipe testing performance or connectivity between two
endpoints. I expect these to be a fairly common pattern in recipes which
is why I wanted to separate them into "common" classes.
A recipe can inherit from both to combine their functionality and on
their own they don't define a fully functional recipe.
The module also defines "*Conf" classes specifying the configuration
that should be used by the Perf or Ping class. The configurations define
the endpoints that should be used and the specific configuration of the
Perf or Ping tool. This serves as an abstraction of the configuration
that is later translated to specific parameters of a test module that is
used by the PerfTestAndEvaluate and PingTestAndEvaluate classes.
The Perf module also defines a PerfMeasurementTool abstract class that
defines the interface of a class that will perform the actual perf test
using a specific measurement tool, e.g. Iperf or Netperf. The goal is to
be able to choose which measurement tool should be used.
The Ping module at this time doesn't need this because there's just one
ping module, but we could extend the code later if required.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
lnst/RecipeCommon/Perf.py | 114 ++++++++++++++++++++++++++++++++++++++
lnst/RecipeCommon/Ping.py | 45 +++++++++++++++
2 files changed, 159 insertions(+)
create mode 100644 lnst/RecipeCommon/Perf.py
create mode 100644 lnst/RecipeCommon/Ping.py
diff --git a/lnst/RecipeCommon/Perf.py b/lnst/RecipeCommon/Perf.py
new file mode 100644
index 0000000..49fa81f
--- /dev/null
+++ b/lnst/RecipeCommon/Perf.py
@@ -0,0 +1,114 @@
+from lnst.Controller.Recipe import BaseRecipe
+from lnst.RecipeCommon.PerfResult import MultiRunPerf
+
+class PerfConf(object):
+ def __init__(self,
+ perf_tool,
+ client, client_bind,
+ server, server_bind,
+ test_type,
+ msg_size, duration, iterations, streams):
+ self._perf_tool = perf_tool
+ self._client = client
+ self._client_bind = client_bind
+ self._server = server
+ self._server_bind = server_bind
+
+ self._test_type = test_type
+
+ self._msg_size = msg_size
+ self._duration = duration
+ self._iterations = iterations
+ self._streams = streams
+
+ @property
+ def perf_tool(self):
+ return self._perf_tool
+
+ @property
+ def client(self):
+ return self._client
+
+ @property
+ def client_bind(self):
+ return self._client_bind
+
+ @property
+ def server(self):
+ return self._server
+
+ @property
+ def server_bind(self):
+ return self._server_bind
+
+ @property
+ def test_type(self):
+ return self._test_type
+
+ @property
+ def msg_size(self):
+ return self._msg_size
+
+ @property
+ def duration(self):
+ return self._duration
+
+ @property
+ def iterations(self):
+ return self._iterations
+
+ @property
+ def streams(self):
+ return self._streams
+
+class PerfMeasurementTool(object):
+ @staticmethod
+ def perf_measure(perf_conf):
+ raise NotImplementedError
+
+class PerfTestAndEvaluate(BaseRecipe):
+ def perf_test(self, perf_conf):
+ client_measurements = MultiRunPerf()
+ server_measurements = MultiRunPerf()
+ for i in range(perf_conf.iterations):
+ client, server = perf_conf.perf_tool.perf_measure(perf_conf)
+
+ client_measurements.append(client)
+ server_measurements.append(server)
+
+ return client_measurements, server_measurements
+
+ def perf_evaluate_and_report(self, perf_conf, results, baseline):
+ self.perf_evaluate(perf_conf, results, baseline)
+
+ self.perf_report(perf_conf, results, baseline)
+
+ def perf_evaluate(self, perf_conf, results, baseline):
+ client, server = results
+
+ if client.average > 0:
+ self.add_result(True, "Client reported non-zero throughput")
+ else:
+ self.add_result(False, "Client reported zero throughput")
+
+ if server.average > 0:
+ self.add_result(True, "Server reported non-zero throughput")
+ else:
+ self.add_result(False, "Server reported zero throughput")
+
+
+ def perf_report(self, perf_conf, results, baseline):
+ client, server = results
+
+ self.add_result(True,
+ "Client measured throughput: {tput} +-{deviation} {unit} per
second"
+ .format(tput=client.average,
+ deviation=client.std_deviation,
+ unit=client.unit),
+ data = client)
+ self.add_result(True,
+ "Server measured throughput: {tput} +-{deviation} {unit} per
second"
+ .format(tput=server.average,
+ deviation=server.std_deviation,
+ unit=server.unit),
+ data = server)
diff --git a/lnst/RecipeCommon/Ping.py b/lnst/RecipeCommon/Ping.py
new file mode 100644
index 0000000..0f9f800
--- /dev/null
+++ b/lnst/RecipeCommon/Ping.py
@@ -0,0 +1,45 @@
+from lnst.Controller.Recipe import BaseRecipe
+from lnst.Tests import Ping
+
+class PingConf(object):
+ def __init__(self,
+ client, client_bind,
+ destination, destination_address):
+ self._client = client
+ self._client_bind = client_bind
+ self._destination = destination
+ self._destination_address = destination_address
+
+ @property
+ def client(self):
+ return self._client
+
+ @property
+ def client_bind(self):
+ return self._client_bind
+
+ @property
+ def destination(self):
+ return self._destination
+
+ @property
+ def destination_address(self):
+ return self._destination_address
+
+class PingTestAndEvaluate(BaseRecipe):
+ def ping_test(self, ping_config):
+ client = ping_config.client
+ destination = ping_config.destination
+
+ ping = Ping(dst = ping_config.destination_address,
+ interface = ping_config.client_bind)
+
+ ping_job = client.run(ping)
+ return ping_job.result
+
+ def ping_evaluate_and_report(self, ping_config, results):
+ # do we want to use the "perf" measurements (store a baseline etc...)
as well?
+ if results["rate"] > 50:
+ self.add_result(True, "Ping succesful", results)
+ else:
+ self.add_result(False, "Ping unsuccesful", results)
--
2.17.0