This patchset is another attempt to implement parallel iperf testing. In comparison to the first RFC the changes are less intrusive and reuses the original Flow concept.
Motivation:
The current parallel implementation that can be achieved by specifying the perf_parallel_streams recipe parameter works correctly however the limitation is that there's only one iperf process that creates multiple connections and that process (and all the connections) can be handled by a single CPU at the same time. In our internal testing this proved to report very variable CPU utilization numbers.
This patchset extends the IperfFlowMeasurementGenerator with additional recipe parameters: * perf_parallel_processes * perf_tool_cpu_policy
Additionaly some of the parameters were modified to support parallelism: * perf_tool_cpu is now a ListParam * dev_intr_cpu is now a ListParam
The patch set includes also update of DevInterruptHWConfigMixin that is required for this test scenario to provide reproducible results.
Jan Tluka (13): RecipeCommon.Perf.Measurements.BaseFlowMeasurement.Flow: add receiver_port RecipeCommon.Perf.Measurements.IperfFlowMeasurement: configure receiver_port Recipes.ENRT.MeasurementGenerators.IperfMeasurementGenerator: adapt to Flow port changes Recipes.ENRT.ConfigMixins.Reversible: adapt to Flow port changes Recipes.ENRT.MeasurementGenerators.IperfMeasurementGenerator: add _create_perf_flows Recipes.ENRT.MeasurementGenerators.IperfMeasurementGenerator: add perf_parallel_processes parameter IperfMeasurementGenerator: adjust cpu parameters for parallel iperf support RecipeCommon.Perf.Measurements.IperfFlowMeasurement: adjust cpupin for both server and client RecipeCommon.Perf.Measurements.IperfFlowMeasurement: add aggregate_multi_flow_results() RecipeCommon.Perf.Evaluators.BaselineFlowAverageEvaluator: override group_results RecipeCommon.Perf.Measurements.BaseFlowMeasurement: report also aggregated results Recipes.ENRT.MeasurementGenerators.IperfMeasurementGenerator: fix issue with unspecified perf_tool_cpu param Recipes.ENRT.ConfigMixins.DevInterruptHWConfigMixin: change dev_intr_cpu to ListParam
.../BaselineFlowAverageEvaluator.py | 10 ++ .../Perf/Measurements/BaseFlowMeasurement.py | 16 +++- .../Perf/Measurements/IperfFlowMeasurement.py | 93 +++++++++++++++---- .../ConfigMixins/DevInterruptHWConfigMixin.py | 26 +++--- .../ConfigMixins/PerfReversibleFlowMixin.py | 34 +++++-- .../IperfMeasurementGenerator.py | 83 +++++++++++++---- 6 files changed, 208 insertions(+), 54 deletions(-)
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../Perf/Measurements/BaseFlowMeasurement.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index a798ca5f..d0ceffcf 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -10,7 +10,7 @@ class Flow(object): def __init__(self, type, generator, generator_bind, generator_nic, - receiver, receiver_bind, receiver_nic, + receiver, receiver_bind, receiver_nic, receiver_port, msg_size, duration, parallel_streams, cpupin): self._type = type
@@ -20,6 +20,7 @@ class Flow(object): self._receiver = receiver self._receiver_bind = receiver_bind self._receiver_nic = receiver_nic + self._receiver_port = receiver_port
self._msg_size = msg_size self._duration = duration @@ -54,6 +55,10 @@ class Flow(object): def receiver_nic(self): return self._receiver_nic
+ @property + def receiver_port(self): + return self._receiver_port + @property def msg_size(self): return self._msg_size @@ -80,6 +85,7 @@ class Flow(object): receiver={receiver}, receiver_bind={receiver_bind}, receiver_nic={receiver_nic}, + receiver_port={receiver_port}, msg_size={msg_size}, duration={duration}, parallel_streams={parallel_streams}, @@ -92,6 +98,7 @@ class Flow(object): receiver=str(self.receiver), receiver_bind=self.receiver_bind, receiver_nic=self.receiver_nic, + receiver_port=self.receiver_port, msg_size=self.msg_size, duration=self.duration, parallel_streams=self.parallel_streams,
The iperf server and client params is extended with the Flow receiver_port property.
Signed-off-by: Jan Tluka jtluka@redhat.com --- lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py index 722b49ed..6e6fd64f 100644 --- a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -128,6 +128,9 @@ class IperfFlowMeasurement(BaseFlowMeasurement): elif flow.cpupin is not None: raise RecipeError("Negative perf cpupin value provided.")
+ if flow.receiver_port is not None: + server_params["port"] = flow.receiver_port + return host.prepare_job(IperfServer(**server_params), job_level=ResultLevel.NORMAL)
@@ -161,6 +164,9 @@ class IperfFlowMeasurement(BaseFlowMeasurement): if flow.msg_size: client_params["blksize"] = flow.msg_size
+ if flow.receiver_port is not None: + client_params["port"] = flow.receiver_port + return host.prepare_job(IperfClient(**client_params), job_level=ResultLevel.NORMAL)
is this also needed for other FlowMeasurement classes (Trex?) or will they still work fine, just ignoring the parameter?
-Ondrej
On Thu, Feb 18, 2021 at 01:58:34PM +0100, Jan Tluka wrote:
The iperf server and client params is extended with the Flow receiver_port property.
Signed-off-by: Jan Tluka jtluka@redhat.com
lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py index 722b49ed..6e6fd64f 100644 --- a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -128,6 +128,9 @@ class IperfFlowMeasurement(BaseFlowMeasurement): elif flow.cpupin is not None: raise RecipeError("Negative perf cpupin value provided.")
if flow.receiver_port is not None:server_params["port"] = flow.receiver_portreturn host.prepare_job(IperfServer(**server_params), job_level=ResultLevel.NORMAL)@@ -161,6 +164,9 @@ class IperfFlowMeasurement(BaseFlowMeasurement): if flow.msg_size: client_params["blksize"] = flow.msg_size
if flow.receiver_port is not None:client_params["port"] = flow.receiver_portreturn host.prepare_job(IperfClient(**client_params), job_level=ResultLevel.NORMAL)-- 2.26.2 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos... Do not reply to spam on the list, report it: https://pagure.io/fedora-infrastructure
Tue, Feb 23, 2021 at 01:51:49PM CET, olichtne@redhat.com wrote:
is this also needed for other FlowMeasurement classes (Trex?) or will they still work fine, just ignoring the parameter?
-Ondrej
Good point. I don't think this is an issue of TrexFlowMeasurement as it does not use Flow directly, however a test run of OvS_DPDK_PvP recipe failed with a traceback.
Traceback (most recent call last): File "./do-my-test", line 38, in main ctl.run(recipe) File "/usr/lib/python3.6/site-packages/lnst/Controller/Controller.py", line 160, in run recipe.test() File "/usr/lib/python3.6/site-packages/lnst/RHExtensions/RHRecipeMixin.py", line 73, in test super(RHRecipeMixin, self).test() File "/usr/lib/python3.6/site-packages/lnst/Recipes/ENRT/OvS_DPDK_PvP.py", line 77, in test self.pvp_test(config) File "/usr/lib/python3.6/site-packages/lnst/Recipes/ENRT/BasePvPRecipe.py", line 255, in pvp_test perf_config = self.generate_perf_config(config) File "/usr/lib/python3.6/site-packages/lnst/Recipes/ENRT/OvS_DPDK_PvP.py", line 151, in generate_perf_config cpupin=None)) TypeError: __init__() missing 1 required positional argument: 'receiver_port'
I'll fix this and also take a look at other code that instantiates the Flow objects.
-Jan
On Thu, Feb 18, 2021 at 01:58:34PM +0100, Jan Tluka wrote:
The iperf server and client params is extended with the Flow receiver_port property.
Signed-off-by: Jan Tluka jtluka@redhat.com
lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py index 722b49ed..6e6fd64f 100644 --- a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -128,6 +128,9 @@ class IperfFlowMeasurement(BaseFlowMeasurement): elif flow.cpupin is not None: raise RecipeError("Negative perf cpupin value provided.")
if flow.receiver_port is not None:server_params["port"] = flow.receiver_portreturn host.prepare_job(IperfServer(**server_params), job_level=ResultLevel.NORMAL)@@ -161,6 +164,9 @@ class IperfFlowMeasurement(BaseFlowMeasurement): if flow.msg_size: client_params["blksize"] = flow.msg_size
if flow.receiver_port is not None:client_params["port"] = flow.receiver_portreturn host.prepare_job(IperfClient(**client_params), job_level=ResultLevel.NORMAL)-- 2.26.2 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos... Do not reply to spam on the list, report it: https://pagure.io/fedora-infrastructure
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../ENRT/MeasurementGenerators/IperfMeasurementGenerator.py | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py index 4ccf9227..324fb5b3 100644 --- a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py +++ b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py @@ -100,6 +100,7 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): client_bind, server_nic, server_bind, + None, size, ) ] @@ -121,6 +122,7 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): client_bind, server_nic, server_bind, + server_port, msg_size, ) -> PerfFlow: """ @@ -135,6 +137,7 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): receiver=server_nic.netns, receiver_bind=server_bind, receiver_nic=server_nic, + receiver_port=server_port, msg_size=msg_size, duration=self.params.perf_duration, parallel_streams=self.params.perf_parallel_streams,
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../ConfigMixins/PerfReversibleFlowMixin.py | 34 +++++++++++++++---- 1 file changed, 28 insertions(+), 6 deletions(-)
diff --git a/lnst/Recipes/ENRT/ConfigMixins/PerfReversibleFlowMixin.py b/lnst/Recipes/ENRT/ConfigMixins/PerfReversibleFlowMixin.py index 48210d07..6a157811 100644 --- a/lnst/Recipes/ENRT/ConfigMixins/PerfReversibleFlowMixin.py +++ b/lnst/Recipes/ENRT/ConfigMixins/PerfReversibleFlowMixin.py @@ -24,11 +24,33 @@ class PerfReversibleFlowMixin(object): """ perf_reverse = BoolParam(default=False)
- def _create_perf_flow(self, perf_test, client_nic, client_bind, server_nic, - server_bind, msg_size) -> PerfFlow: + def _create_perf_flow( + self, + perf_test, + client_nic, + client_bind, + server_nic, + server_bind, + server_port, + msg_size, + ) -> PerfFlow: if self.params.perf_reverse: - return super()._create_perf_flow(perf_test, server_nic, server_bind, - client_nic, client_bind, msg_size) + return super()._create_perf_flow( + perf_test, + server_nic, + server_bind, + client_nic, + client_bind, + server_port, + msg_size, + ) else: - return super()._create_perf_flow(perf_test, client_nic, client_bind, - server_nic, server_bind, msg_size) + return super()._create_perf_flow( + perf_test, + client_nic, + client_bind, + server_nic, + server_bind, + server_port, + msg_size, + )
Currently the IperfMeasurementGenerator yields a list of a single Flow for a test configuration. This patch adds a new method that can generate multiple Flows in the list.
This will be used by parallel iperf measurement in the following patches.
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../IperfMeasurementGenerator.py | 45 ++++++++++++++----- 1 file changed, 34 insertions(+), 11 deletions(-)
diff --git a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py index 324fb5b3..904efa34 100644 --- a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py +++ b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py @@ -10,6 +10,8 @@ from lnst.RecipeCommon.Perf.Measurements import IperfFlowMeasurement
from lnst.Recipes.ENRT.MeasurementGenerators.BaseMeasurementGenerator import BaseMeasurementGenerator
+from typing import List + class IperfMeasurementGenerator(BaseMeasurementGenerator): """ :param perf_tests: @@ -93,17 +95,14 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator):
for perf_test in self.params.perf_tests: for size in self.params.perf_msg_sizes: - yield [ - self._create_perf_flow( - perf_test, - client_nic, - client_bind, - server_nic, - server_bind, - None, - size, - ) - ] + yield self._create_perf_flows( + perf_test, + client_nic, + client_bind, + server_nic, + server_bind, + size, + )
def generate_perf_endpoints(self, config): """Generator for perf endpoints @@ -115,6 +114,30 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): """ return []
+ def _create_perf_flows( + self, + perf_test, + client_nic, + client_bind, + server_nic, + server_bind, + msg_size, + ) -> List[PerfFlow]: + flows = [] + flows.append( + self._create_perf_flow( + perf_test, + client_nic, + client_bind, + server_nic, + server_bind, + None, + msg_size, + ) + ) + + return flows + def _create_perf_flow( self, perf_test,
This enables parallel iperf testing for the ENRT recipes. To run multiple iperf measurements at the same time, specify new parameter perf_parallel_processes to a value grater than one. If set to one or unspecified, the behaviour is the same as before.
From the implementation point of view there is one additional change to the previous behaviour where the iperf server port is specified for each generated Flow. The port value starts at 12000 and is increased with each Flow.
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../IperfMeasurementGenerator.py | 29 ++++++++++++------- 1 file changed, 19 insertions(+), 10 deletions(-)
diff --git a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py index 904efa34..5570f9e4 100644 --- a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py +++ b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py @@ -44,6 +44,12 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): measured at the same time. :type perf_parallel_streams: :any:`IntParam` (default 1)
+ :param perf_parallel_processes: + Parameter used by the :any:`generate_flow_combinations` generator. To + specify how many parallel net_perf_tool processes of the same network flow + should be measured at the same time. + :type perf_parallel_processes: :any:`IntParam` (default 1) + :param perf_msg_sizes: Parameter used by the :any:`generate_flow_combinations` generator. To specify what different message sizes (in bytes) used generated for the @@ -58,6 +64,7 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): perf_duration = IntParam(default=60) perf_iterations = IntParam(default=5) perf_parallel_streams = IntParam(default=1) + perf_parallel_processes = IntParam(default=1) perf_msg_sizes = ListParam(default=[123])
net_perf_tool = Param(default=IperfFlowMeasurement) @@ -124,17 +131,19 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): msg_size, ) -> List[PerfFlow]: flows = [] - flows.append( - self._create_perf_flow( - perf_test, - client_nic, - client_bind, - server_nic, - server_bind, - None, - msg_size, + port_offset=12000 + for i in range(self.params.perf_parallel_processes): + flows.append( + self._create_perf_flow( + perf_test, + client_nic, + client_bind, + server_nic, + server_bind, + port_offset + i, + msg_size, + ) ) - )
return flows
This patch introduces two significant changes.
First one is a change of perf_tool_cpu parameter to ListParam type. That enables two new test scenarios: * using multiple cpus for an iperf process - this however cannot be used due to a limitation of iperf * specifying a set of multiple cpus that can be assigned to individual iperf processes based on a "policy"
For the second scenario the patch adds new optional recipe parameter perf_tool_cpu_policy, that currently supports two values: * 'round-robin' - each of the iperf processes will be pinned to exactly ONE cpu from the list defined by perf_tool_cpu on round-robin fashion * 'all' - each of the iperf processes will be pinned to all cpus in the list defined by perf_tool_cpu - due to limitations of iperf this is not possible at the moment * if the recipe parameter is not set, this defaults to 'all'
To match the original beahviour, user needs to modify: perf_tool_cpu=0 to perf_tool_cpu=[0]
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../IperfMeasurementGenerator.py | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-)
diff --git a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py index 5570f9e4..adac62a2 100644 --- a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py +++ b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py @@ -2,6 +2,7 @@ from lnst.Common.Parameters import ( Param, IntParam, ListParam, + StrParam, ) from lnst.Common.IpAddress import AF_INET, AF_INET6
@@ -60,7 +61,8 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator):
# common perf test params perf_tests = Param(default=("tcp_stream", "udp_stream", "sctp_stream")) - perf_tool_cpu = IntParam(mandatory=False) + perf_tool_cpu = ListParam(mandatory=False) + perf_tool_cpu_policy = StrParam(mandatory=False) perf_duration = IntParam(default=60) perf_iterations = IntParam(default=5) perf_parallel_streams = IntParam(default=1) @@ -142,11 +144,26 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): server_bind, port_offset + i, msg_size, + self._cpupin_based_on_policy(i), ) )
return flows
+ def _cpupin_based_on_policy(self, process_no=None): + if process_no is None: + return None + + # TODO: what if cpu related params are None, return None? + + cpus = self.params.perf_tool_cpu + if self.params.perf_tool_cpu_policy == 'round-robin': + return [cpus[process_no % len(cpus)]] + elif self.params.perf_tool_cpu_policy == 'all': + return cpus + else: + return None + def _create_perf_flow( self, perf_test, @@ -156,6 +173,7 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): server_bind, server_port, msg_size, + cpupin, ) -> PerfFlow: """ Wrapper to create a PerfFlow. Mixins that want to change this behavior (for example, to reverse the direction) @@ -173,9 +191,5 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): msg_size=msg_size, duration=self.params.perf_duration, parallel_streams=self.params.perf_parallel_streams, - cpupin=( - self.params.perf_tool_cpu - if "perf_tool_cpu" in self.params - else None - ), + cpupin=cpupin, )
This patch unifies configuration of cpupin parameter for iperf to reduce code duplication.
Additionaly the limitation of specifying cpupin parameter together with perf_parallel_streams has been removed as this should not limit the user from using such configuration.
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../Perf/Measurements/IperfFlowMeasurement.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py index 6e6fd64f..3b24aa58 100644 --- a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -119,14 +119,7 @@ class IperfFlowMeasurement(BaseFlowMeasurement): server_params = dict(bind = ipaddress(flow.receiver_bind), oneoff = True)
- if flow.cpupin is not None and flow.cpupin >= 0: - if flow.parallel_streams == 1: - server_params["cpu_bind"] = flow.cpupin - else: - raise RecipeError("Unsupported combination of single cpupin " - "with parallel perf streams.") - elif flow.cpupin is not None: - raise RecipeError("Negative perf cpupin value provided.") + self._set_cpupin_params(server_params, flow.cpupin)
if flow.receiver_port is not None: server_params["port"] = flow.receiver_port @@ -149,14 +142,7 @@ class IperfFlowMeasurement(BaseFlowMeasurement): else: raise RecipeError("Unsupported flow type '{}'".format(flow.type))
- if flow.cpupin is not None and flow.cpupin >= 0: - if flow.parallel_streams == 1: - client_params["cpu_bind"] = flow.cpupin - else: - raise RecipeError("Unsupported combination of single cpupin " - "with parallel perf streams.") - elif flow.cpupin is not None: - raise RecipeError("Negative perf cpupin value provided.") + self._set_cpupin_params(client_params, flow.cpupin)
if flow.parallel_streams > 1: client_params["parallel"] = flow.parallel_streams @@ -170,6 +156,20 @@ class IperfFlowMeasurement(BaseFlowMeasurement): return host.prepare_job(IperfClient(**client_params), job_level=ResultLevel.NORMAL)
+ def _set_cpupin_params(self, params, cpupin): + if cpupin is not None: + for cpu in cpupin: + if cpu < 0: + raise RecipeError("Negative perf cpupin value provided.") + + # at the moment iperf does not support pinning to multiple cpus + # so pin to the first cpu specified in the list + if len(cpupin) > 1: + raise RecipeError("Cannot pin iperf to the specified list "\ + "of cpus due to missing support in iperf.") + + params["cpu_bind"] = cpupin[0] + def _parse_job_streams(self, job): result = ParallelPerfResult() if not job.passed:
IperfFlowMeasurement can measure multiple Flows at the same time and will produce multiple FlowMeasurementResults.
To be able to report and evaluate sum of the flows a new method aggregate_multi_flow_results is added to support these actions. The method creates a new AggregatedFlowMeasurementResults object that will collect iteration data results of each Flow and transform the data into additional ParallelPerfResult layer.
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../Perf/Measurements/IperfFlowMeasurement.py | 55 ++++++++++++++++++- 1 file changed, 52 insertions(+), 3 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py index 3b24aa58..0e1dff5e 100644 --- a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -9,9 +9,13 @@ from lnst.Controller.RecipeResults import ResultLevel from lnst.RecipeCommon.Perf.Results import PerfInterval from lnst.RecipeCommon.Perf.Results import SequentialPerfResult from lnst.RecipeCommon.Perf.Results import ParallelPerfResult -from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import NetworkFlowTest -from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import BaseFlowMeasurement -from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import FlowMeasurementResults +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import ( + NetworkFlowTest, + BaseFlowMeasurement, + FlowMeasurementResults, + AggregatedFlowMeasurementResults, + Flow, +)
from lnst.RecipeCommon.Perf.Measurements.MeasurementError import MeasurementError
@@ -105,6 +109,51 @@ class IperfFlowMeasurement(BaseFlowMeasurement):
return results
+ def aggregate_multi_flow_results(self, results): + if len(results) == 1: + return results + + sample_result = results[0] + sample_flow = sample_result.flow + dummy_flow = Flow( + type=sample_flow.type, + generator=sample_flow.generator, + generator_bind=sample_flow.generator_bind, + generator_nic=sample_flow.generator_nic, + receiver=sample_flow.receiver, + receiver_bind=sample_flow.receiver_bind, + receiver_nic=sample_flow.receiver_nic, + receiver_port=None, + msg_size=sample_flow.msg_size, + duration=sample_flow.duration, + parallel_streams=sample_flow.parallel_streams, + cpupin=None + ) + + aggregated_result = AggregatedFlowMeasurementResults( + sample_result.measurement, dummy_flow) + + nr_iterations = len(sample_result.individual_results) + for i in range(nr_iterations): + parallel_result = FlowMeasurementResults( + measurement=sample_result.measurement, + flow=dummy_flow) + parallel_result.generator_results = ParallelPerfResult() + parallel_result.generator_cpu_stats = ParallelPerfResult() + parallel_result.receiver_results = ParallelPerfResult() + parallel_result.receiver_cpu_stats = ParallelPerfResult() + + for result in results: + flow_result = result.individual_results[i] + parallel_result.generator_results.append(flow_result.generator_results) + parallel_result.receiver_results.append(flow_result.receiver_results) + parallel_result.generator_cpu_stats.append(flow_result.generator_cpu_stats) + parallel_result.receiver_cpu_stats.append(flow_result.receiver_cpu_stats) + + aggregated_result.add_results(parallel_result) + + return [aggregated_result] + def _prepare_test_flows(self, flows): test_flows = [] for flow in flows:
On Thu, Feb 18, 2021 at 01:58:41PM +0100, Jan Tluka wrote:
IperfFlowMeasurement can measure multiple Flows at the same time and will produce multiple FlowMeasurementResults.
To be able to report and evaluate sum of the flows a new method aggregate_multi_flow_results is added to support these actions. The method creates a new AggregatedFlowMeasurementResults object that will collect iteration data results of each Flow and transform the data into additional ParallelPerfResult layer.
Signed-off-by: Jan Tluka jtluka@redhat.com
.../Perf/Measurements/IperfFlowMeasurement.py | 55 ++++++++++++++++++- 1 file changed, 52 insertions(+), 3 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py index 3b24aa58..0e1dff5e 100644 --- a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -9,9 +9,13 @@ from lnst.Controller.RecipeResults import ResultLevel from lnst.RecipeCommon.Perf.Results import PerfInterval from lnst.RecipeCommon.Perf.Results import SequentialPerfResult from lnst.RecipeCommon.Perf.Results import ParallelPerfResult -from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import NetworkFlowTest -from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import BaseFlowMeasurement -from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import FlowMeasurementResults +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import (
- NetworkFlowTest,
- BaseFlowMeasurement,
- FlowMeasurementResults,
- AggregatedFlowMeasurementResults,
- Flow,
+)
from lnst.RecipeCommon.Perf.Measurements.MeasurementError import MeasurementError
@@ -105,6 +109,51 @@ class IperfFlowMeasurement(BaseFlowMeasurement):
return results
I'm not sure about the placement of the "aggregate_multi_flow_results" method into the IperfFlowMeasurement class because of how you call it in the following two patches.
The BaselineFlowAverageEvaluator class is *generic* - it can be used for results generated by any *FlowMeasurement class (including Neper, Netperf, Trex, whatever else). So if you decide to call "measurement.aggregate_multi_flow_results" here, it means that the "aggregate_multi_flow_results" method should be defined as part of the generic interface for any FlowMeasurement class - so at least an empty method should be part of the BaseFlowMeasurement class.
At the same time, I see that the method doesn't actually do anything specific to Iperf at all - so again, it's probably more generic than what the current placement implies.
One more comment on this is that the method accepts a "self" parameter that isn't used anywhere, and later you call the method with "cls" as the value for it. This again implies that the parameter shouldn't be here at all and the method should be a "staticmethod" and probably defined in a more generic class than here.
-Ondrej
- def aggregate_multi_flow_results(self, results):
if len(results) == 1:return resultssample_result = results[0]sample_flow = sample_result.flowdummy_flow = Flow(type=sample_flow.type,generator=sample_flow.generator,generator_bind=sample_flow.generator_bind,generator_nic=sample_flow.generator_nic,receiver=sample_flow.receiver,receiver_bind=sample_flow.receiver_bind,receiver_nic=sample_flow.receiver_nic,receiver_port=None,msg_size=sample_flow.msg_size,duration=sample_flow.duration,parallel_streams=sample_flow.parallel_streams,cpupin=None)aggregated_result = AggregatedFlowMeasurementResults(sample_result.measurement, dummy_flow)nr_iterations = len(sample_result.individual_results)for i in range(nr_iterations):parallel_result = FlowMeasurementResults(measurement=sample_result.measurement,flow=dummy_flow)parallel_result.generator_results = ParallelPerfResult()parallel_result.generator_cpu_stats = ParallelPerfResult()parallel_result.receiver_results = ParallelPerfResult()parallel_result.receiver_cpu_stats = ParallelPerfResult()for result in results:flow_result = result.individual_results[i]parallel_result.generator_results.append(flow_result.generator_results)parallel_result.receiver_results.append(flow_result.receiver_results)parallel_result.generator_cpu_stats.append(flow_result.generator_cpu_stats)parallel_result.receiver_cpu_stats.append(flow_result.receiver_cpu_stats)aggregated_result.add_results(parallel_result)return [aggregated_result]- def _prepare_test_flows(self, flows): test_flows = [] for flow in flows:
-- 2.26.2 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos... Do not reply to spam on the list, report it: https://pagure.io/fedora-infrastructure
Tue, Feb 23, 2021 at 01:47:51PM CET, olichtne@redhat.com wrote:
On Thu, Feb 18, 2021 at 01:58:41PM +0100, Jan Tluka wrote:
IperfFlowMeasurement can measure multiple Flows at the same time and will produce multiple FlowMeasurementResults.
To be able to report and evaluate sum of the flows a new method aggregate_multi_flow_results is added to support these actions. The method creates a new AggregatedFlowMeasurementResults object that will collect iteration data results of each Flow and transform the data into additional ParallelPerfResult layer.
Signed-off-by: Jan Tluka jtluka@redhat.com
.../Perf/Measurements/IperfFlowMeasurement.py | 55 ++++++++++++++++++- 1 file changed, 52 insertions(+), 3 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py index 3b24aa58..0e1dff5e 100644 --- a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -9,9 +9,13 @@ from lnst.Controller.RecipeResults import ResultLevel from lnst.RecipeCommon.Perf.Results import PerfInterval from lnst.RecipeCommon.Perf.Results import SequentialPerfResult from lnst.RecipeCommon.Perf.Results import ParallelPerfResult -from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import NetworkFlowTest -from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import BaseFlowMeasurement -from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import FlowMeasurementResults +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import (
- NetworkFlowTest,
- BaseFlowMeasurement,
- FlowMeasurementResults,
- AggregatedFlowMeasurementResults,
- Flow,
+)
from lnst.RecipeCommon.Perf.Measurements.MeasurementError import MeasurementError
@@ -105,6 +109,51 @@ class IperfFlowMeasurement(BaseFlowMeasurement):
return resultsI'm not sure about the placement of the "aggregate_multi_flow_results" method into the IperfFlowMeasurement class because of how you call it in the following two patches.
The BaselineFlowAverageEvaluator class is *generic* - it can be used for results generated by any *FlowMeasurement class (including Neper, Netperf, Trex, whatever else). So if you decide to call "measurement.aggregate_multi_flow_results" here, it means that the "aggregate_multi_flow_results" method should be defined as part of the generic interface for any FlowMeasurement class - so at least an empty method should be part of the BaseFlowMeasurement class.
At the same time, I see that the method doesn't actually do anything specific to Iperf at all - so again, it's probably more generic than what the current placement implies.
One more comment on this is that the method accepts a "self" parameter that isn't used anywhere, and later you call the method with "cls" as the value for it. This again implies that the parameter shouldn't be here at all and the method should be a "staticmethod" and probably defined in a more generic class than here.
-Ondrej
Makes sense. I will update the patch.
- def aggregate_multi_flow_results(self, results):
if len(results) == 1:return resultssample_result = results[0]sample_flow = sample_result.flowdummy_flow = Flow(type=sample_flow.type,generator=sample_flow.generator,generator_bind=sample_flow.generator_bind,generator_nic=sample_flow.generator_nic,receiver=sample_flow.receiver,receiver_bind=sample_flow.receiver_bind,receiver_nic=sample_flow.receiver_nic,receiver_port=None,msg_size=sample_flow.msg_size,duration=sample_flow.duration,parallel_streams=sample_flow.parallel_streams,cpupin=None)aggregated_result = AggregatedFlowMeasurementResults(sample_result.measurement, dummy_flow)nr_iterations = len(sample_result.individual_results)for i in range(nr_iterations):parallel_result = FlowMeasurementResults(measurement=sample_result.measurement,flow=dummy_flow)parallel_result.generator_results = ParallelPerfResult()parallel_result.generator_cpu_stats = ParallelPerfResult()parallel_result.receiver_results = ParallelPerfResult()parallel_result.receiver_cpu_stats = ParallelPerfResult()for result in results:flow_result = result.individual_results[i]parallel_result.generator_results.append(flow_result.generator_results)parallel_result.receiver_results.append(flow_result.receiver_results)parallel_result.generator_cpu_stats.append(flow_result.generator_cpu_stats)parallel_result.receiver_cpu_stats.append(flow_result.receiver_cpu_stats)aggregated_result.add_results(parallel_result)return [aggregated_result]- def _prepare_test_flows(self, flows): test_flows = [] for flow in flows:
-- 2.26.2 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos... Do not reply to spam on the list, report it: https://pagure.io/fedora-infrastructure
This will ensure that BaselineFlowAverageEvaluator evaluates the aggregated flow data instead of individual flow data when parallel flows are used.
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../Perf/Evaluators/BaselineFlowAverageEvaluator.py | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py index 5d2c014d..26b6f740 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py @@ -30,6 +30,16 @@ class BaselineFlowAverageEvaluator(BaselineEvaluator): "receiver_cpu_stats", ]
+ def group_results( + self, + recipe: BaseRecipe, + recipe_conf: PerfRecipeConf, + results: List[PerfMeasurementResults], + ) -> List[List[PerfMeasurementResults]]: + new_results = results[0].measurement.aggregate_multi_flow_results(results) + + return [new_results] + def describe_group_results( self, recipe: BaseRecipe,
When parallel flows are used at the same time the results are reported for individual flows. It's useful to see also the aggregated flow results and this patch updates the code to report such results.
Signed-off-by: Jan Tluka jtluka@redhat.com --- lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index d0ceffcf..435e8004 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -246,6 +246,13 @@ class BaseFlowMeasurement(BaseMeasurement): for flow_results in results: cls._report_flow_results(recipe, flow_results)
+ # report aggregated results + if len(results) > 1: + aggregated_flow_results = cls.aggregate_multi_flow_results(cls, results) + for flow_results in aggregated_flow_results: + cls._report_flow_results(recipe, flow_results) + + @classmethod def _report_flow_results(cls, recipe, flow_results): generator = flow_results.generator_results
This fixes the recipe failure when the perf_tool_cpu parameter is not specified.
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../ENRT/MeasurementGenerators/IperfMeasurementGenerator.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py index adac62a2..2a09b70f 100644 --- a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py +++ b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py @@ -154,9 +154,11 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): if process_no is None: return None
- # TODO: what if cpu related params are None, return None? + try: + cpus = self.params.perf_tool_cpu + except: + return None
- cpus = self.params.perf_tool_cpu if self.params.perf_tool_cpu_policy == 'round-robin': return [cpus[process_no % len(cpus)]] elif self.params.perf_tool_cpu_policy == 'all':
This can me squashed into the patch #7 that introduced this method and #TODO
-Ondrej
On Thu, Feb 18, 2021 at 01:58:44PM +0100, Jan Tluka wrote:
This fixes the recipe failure when the perf_tool_cpu parameter is not specified.
Signed-off-by: Jan Tluka jtluka@redhat.com
.../ENRT/MeasurementGenerators/IperfMeasurementGenerator.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py index adac62a2..2a09b70f 100644 --- a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py +++ b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py @@ -154,9 +154,11 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): if process_no is None: return None
# TODO: what if cpu related params are None, return None?
try:cpus = self.params.perf_tool_cpuexcept:return None
cpus = self.params.perf_tool_cpu if self.params.perf_tool_cpu_policy == 'round-robin': return [cpus[process_no % len(cpus)]] elif self.params.perf_tool_cpu_policy == 'all':-- 2.26.2 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos... Do not reply to spam on the list, report it: https://pagure.io/fedora-infrastructure
Tue, Feb 23, 2021 at 01:49:25PM CET, olichtne@redhat.com wrote:
This can me squashed into the patch #7 that introduced this method and #TODO
-Ondrej
Since I'll be updating the patch set I will do that.
On Thu, Feb 18, 2021 at 01:58:44PM +0100, Jan Tluka wrote:
This fixes the recipe failure when the perf_tool_cpu parameter is not specified.
Signed-off-by: Jan Tluka jtluka@redhat.com
.../ENRT/MeasurementGenerators/IperfMeasurementGenerator.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py index adac62a2..2a09b70f 100644 --- a/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py +++ b/lnst/Recipes/ENRT/MeasurementGenerators/IperfMeasurementGenerator.py @@ -154,9 +154,11 @@ class IperfMeasurementGenerator(BaseMeasurementGenerator): if process_no is None: return None
# TODO: what if cpu related params are None, return None?
try:cpus = self.params.perf_tool_cpuexcept:return None
cpus = self.params.perf_tool_cpu if self.params.perf_tool_cpu_policy == 'round-robin': return [cpus[process_no % len(cpus)]] elif self.params.perf_tool_cpu_policy == 'all':-- 2.26.2 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/ List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos... Do not reply to spam on the list, report it: https://pagure.io/fedora-infrastructure
This is a change required by parallel iperf test. To achieve reproducible measurements it's required to turn off irqbalance and spread the NIC IRQs evenly on the available cpus.
The current recipe parameter dev_intr_cpu can specify only one cpu.
This patch changes the type of dev_intr_cpu parameter to ListParam. The value is a list of cpus that will be used for pinning the NIC IRQs.
For the original behaviour a user simply needs to change dev_intr_cpu=0 to dev_intr_cpu=[0] and the recipe will run as before.
Signed-off-by: Jan Tluka jtluka@redhat.com --- .../ConfigMixins/DevInterruptHWConfigMixin.py | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-)
diff --git a/lnst/Recipes/ENRT/ConfigMixins/DevInterruptHWConfigMixin.py b/lnst/Recipes/ENRT/ConfigMixins/DevInterruptHWConfigMixin.py index 1f8f5e8e..d5b9bfc7 100644 --- a/lnst/Recipes/ENRT/ConfigMixins/DevInterruptHWConfigMixin.py +++ b/lnst/Recipes/ENRT/ConfigMixins/DevInterruptHWConfigMixin.py @@ -1,6 +1,6 @@ import re
-from lnst.Common.Parameters import IntParam +from lnst.Common.Parameters import ListParam from lnst.Controller.Recipe import RecipeError from lnst.Controller.RecipeResults import ResultLevel from lnst.Recipes.ENRT.ConfigMixins.BaseHWConfigMixin import BaseHWConfigMixin @@ -16,10 +16,10 @@ class DevInterruptHWConfigMixin(BaseHWConfigMixin): Note that this Mixin also stops the irqbalance service.
:param dev_intr_cpu: - (optional test parameter) CPU id to which the device IRQs should be pinned + (optional test parameter) CPU ids to which the device IRQs should be pinned """
- dev_intr_cpu = IntParam(mandatory=False) + dev_intr_cpu = ListParam(mandatory=False)
@property def dev_interrupt_hw_config_dev_list(self): @@ -80,19 +80,20 @@ class DevInterruptHWConfigMixin(BaseHWConfigMixin): desc.append("Device irq configuration skipped.") return desc
- def _pin_dev_interrupts(self, dev, cpu): + def _pin_dev_interrupts(self, dev, cpus): netns = dev.netns cpu_info = netns.run("lscpu", job_level=ResultLevel.DEBUG).stdout regex = "CPU(s): *([0-9]*)" num_cpus = int(re.search(regex, cpu_info).groups()[0]) - if cpu < 0 or cpu > num_cpus - 1: - raise RecipeError( - "Invalid CPU value given: %d. Accepted value %s." - % ( - cpu, - "is: 0" if num_cpus == 1 else "are: 0..%d" % (num_cpus - 1), + for cpu in cpus: + if cpu < 0 or cpu > num_cpus - 1: + raise RecipeError( + "Invalid CPU value given: %d. Accepted value %s." + % ( + cpu, + "is: 0" if num_cpus == 1 else "are: 0..%d" % (num_cpus - 1), + ) ) - )
res = netns.run( "grep {} /proc/interrupts | cut -f1 -d: | sed 's/ //'".format( @@ -110,9 +111,10 @@ class DevInterruptHWConfigMixin(BaseHWConfigMixin): ) intrs = res.stdout
- for intr in intrs.split("\n"): + for i, intr in enumerate(intrs.split("\n")): try: int(intr) + cpu = cpus[i % len(cpus)] netns.run( "echo -n {} > /proc/irq/{}/smp_affinity_list".format( cpu, intr.strip()
lnst-developers@lists.fedorahosted.org