[PATCH-next] lnst.Common: edit IpAddress
by csfakian@redhat.com
From: Christos Sfakianakis <csfakian(a)redhat.com>
Add "link_local" attribute in Ip6Address to be used for filtering
off ipv6 link-local addresses when this is desirable (e.g by using
the "ips_filter" method of the Device module). The value of this
attribute is determined by the "is_link_local" method.
Signed-off-by: Christos Sfakianakis <csfakian(a)redhat.com>
---
lnst/Common/IpAddress.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/lnst/Common/IpAddress.py b/lnst/Common/IpAddress.py
index e54553d..53dc047 100644
--- a/lnst/Common/IpAddress.py
+++ b/lnst/Common/IpAddress.py
@@ -12,6 +12,8 @@ olichtne(a)redhat.com (Ondrej Lichtner)
import re
from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
+from binascii import hexlify
+import socket
from lnst.Common.LnstError import LnstError
#TODO create various generators for IPNetworks and IPaddresses in the same
@@ -77,6 +79,7 @@ class Ip6Address(BaseIpAddress):
super(Ip6Address, self).__init__(addr)
self.family = AF_INET6
+ self.link_local = self.is_link_local()
@staticmethod
def _parse_addr(addr):
@@ -97,6 +100,10 @@ class Ip6Address(BaseIpAddress):
return addr, prefixlen
+ def is_link_local(self):
+ left_half = hexlify(socket.inet_pton(socket.AF_INET6, str(self)))[:16]
+ return left_half == 'fe80000000000000'
+
def ipaddress(addr):
"""Factory method to create a BaseIpAddress object"""
if isinstance(addr, BaseIpAddress):
--
2.17.1
5 years, 2 months
[PATCH-next] lnst.RecipeCommon.Ping: redesign to handle parallel scenarios
by csfakian@redhat.com
From: Christos Sfakianakis <csfakian(a)redhat.com>
Add/edit methods in PingTestAndEvaluate to handle parallel scenarios:
a) added "ping_init" to initiate a Tests.Ping instance for all
scenarios
b) edited "ping_test" to split between default and parallel
scenarios
c) added "parallel_ping_evaluate_and_report" as the analogous of
"ping_evaluate_and_report" for the parallel case
d) added "single_ping_evaluate_and_report" to report which ip's
are used each time in the parallel case
In lnst.Recipes.BaseEnrtRecipe, allow the user to specify ping
interval, count, packet size. Include additional parameters for
specifying parallel scenarios, as well as bidirectional cases.
Modify "generate_ping_configurations" method to account for parallel
scenarios. Assume "ip_versions" and the 2 endpoints are compatible
in that the latter share equal numbers of corresponding ip's and
output error otherwise. Filter off link-local ipv6 addresses.
Generate a list of ping configurations for each ip version specified
in a parallel scenario, or the content of a single-element list in the
default, non-parallel case.
Signed-off-by: Christos Sfakianakis <csfakian(a)redhat.com>
---
lnst/RecipeCommon/Ping.py | 57 ++++++++++++++++++++++++--
lnst/Recipes/ENRT/BaseEnrtRecipe.py | 62 +++++++++++++++++++++++------
2 files changed, 103 insertions(+), 16 deletions(-)
diff --git a/lnst/RecipeCommon/Ping.py b/lnst/RecipeCommon/Ping.py
index f5cd652..194d984 100644
--- a/lnst/RecipeCommon/Ping.py
+++ b/lnst/RecipeCommon/Ping.py
@@ -1,5 +1,8 @@
+from copy import copy
+
from lnst.Controller.Recipe import BaseRecipe
from lnst.Tests import Ping
+from lnst.Controller.RecipeResults import ResultLevel
class PingConf(object):
def __init__(self,
@@ -44,22 +47,68 @@ class PingConf(object):
class PingTestAndEvaluate(BaseRecipe):
def ping_test(self, ping_config):
+ #parallel scenario
+ if isinstance(ping_config, list):
+ results = {}
+
+ running_ping_array = []
+ for pingconf in ping_config:
+ ping, client = self.ping_init(pingconf)
+ running_ping = client.prepare_job(ping)
+ running_ping.start(bg = True)
+ running_ping_array.append((pingconf, running_ping))
+
+ for _, pingjob in running_ping_array:
+ try:
+ pingjob.wait()
+ finally:
+ pingjob.kill()
+
+ for pingconf, pingjob in running_ping_array:
+ result = pingjob.result
+ results[pingconf] = result
+
+ return results
+
+ #non-parallel scenario
+ ping, client = self.ping_init(ping_config)
+ ping_job = client.run(ping)
+ return ping_job.result
+
+ def ping_init(self, ping_config):
client = ping_config.client
destination = ping_config.destination
-
kwargs = self._generate_ping_kwargs(ping_config)
ping = Ping(**kwargs)
-
- ping_job = client.run(ping)
- return ping_job.result
+ return (ping, client)
def ping_evaluate_and_report(self, ping_config, results):
+ if isinstance(ping_config, list):
+ self.parallel_ping_evaluate_and_report(results)
+ return
# do we want to use the "perf" measurements (store a baseline etc...) as well?
if results["rate"] > 50:
self.add_result(True, "Ping succesful", results)
else:
self.add_result(False, "Ping unsuccesful", results)
+ #parallel version of ping_evaluate_and_report
+ def parallel_ping_evaluate_and_report(self, results):
+ for pingconf, result in results.items():
+ self.single_ping_evaluate_and_report(pingconf, result)
+
+ #clarify source/destination in reporting for parallel scenarios
+ def single_ping_evaluate_and_report(self, ping_config, results):
+ fmt = "From: <{0.client.hostid} ({0.client_bind})> To: " \
+ "<{0.destination.hostid} ({0.destination_address})>"
+ description = fmt.format(ping_config)
+ if results["rate"] > 50:
+ message = "Ping successful --- " + description
+ self.add_result(True, message, results)
+ else:
+ message = "Ping unsuccessful --- " + description
+ self.add_result(False, message, results)
+
def _generate_ping_kwargs(self, ping_config):
kwargs = dict(dst=ping_config.destination_address,
interface=ping_config.client_bind)
diff --git a/lnst/Recipes/ENRT/BaseEnrtRecipe.py b/lnst/Recipes/ENRT/BaseEnrtRecipe.py
index d7d1aec..cbab341 100644
--- a/lnst/Recipes/ENRT/BaseEnrtRecipe.py
+++ b/lnst/Recipes/ENRT/BaseEnrtRecipe.py
@@ -65,6 +65,13 @@ class EnrtSubConfiguration(object):
class BaseEnrtRecipe(PingTestAndEvaluate, PerfRecipe):
ip_versions = Param(default=("ipv4", "ipv6"))
+
+ ping_parallel = BoolParam(default=False)
+ ping_bidirect = BoolParam(default=False)
+ ping_count = IntParam(default = 100)
+ ping_interval = StrParam(default = 0.2)
+ ping_psize = IntParam(default = None)
+
perf_tests = Param(default=("tcp_stream", "udp_stream", "sctp_stream"))
offload_combinations = Param(default=(
@@ -158,22 +165,53 @@ class BaseEnrtRecipe(PingTestAndEvaluate, PerfRecipe):
def generate_ping_configurations(self, main_config, sub_config):
client_nic = main_config.endpoint1
server_nic = main_config.endpoint2
- client_netns = client_nic.netns
- server_netns = server_nic.netns
+
+ count = self.params.ping_count
+ interval = self.params.ping_interval
+ size = self.params.ping_psize
+ common_args = {'count' : count, 'interval' : interval, 'size' : size}
for ipv in self.params.ip_versions:
+ kwargs = {}
if ipv == "ipv4":
- family = AF_INET
+ kwargs.update(family = AF_INET)
elif ipv == "ipv6":
- family = AF_INET6
-
- client_bind = client_nic.ips_filter(family=family)[0]
- server_bind = server_nic.ips_filter(family=family)[0]
-
- yield PingConf(client = client_netns,
- client_bind = client_bind,
- destination = server_netns,
- destination_address = server_bind)
+ kwargs.update(family = AF_INET6)
+ kwargs.update(link_local = False)
+
+ client_ips = client_nic.ips_filter(**kwargs)
+ server_ips = server_nic.ips_filter(**kwargs)
+
+ if len(client_ips) != len(server_ips) or len(client_ips) * len(server_ips) == 0:
+ raise LnstError("Source/destination ip lists are of different size or empty.")
+
+ number_of_ips = len(client_ips)
+ ping_conf_list = []
+ client_nic.valid_ips = client_ips
+ server_nic.valid_ips = server_ips
+ for n in range(number_of_ips):
+ for client_nic, server_nic in [(client_nic, server_nic), (server_nic, client_nic)]:
+ client_bind = client_nic.valid_ips[n]
+ server_bind = server_nic.valid_ips[n]
+
+ pconf = PingConf(client = client_nic.netns,
+ client_bind = client_bind,
+ destination = server_nic.netns,
+ destination_address = server_bind,
+ **common_args)
+
+ ping_conf_list.append(pconf)
+
+ if not self.params.ping_bidirect:
+ break
+
+ if not self.params.ping_parallel:
+ break
+
+ if not self.params.ping_bidirect and not self.params.ping_parallel:
+ yield ping_conf_list[0]
+ else:
+ yield ping_conf_list
def generate_perf_configurations(self, main_config, sub_config):
client_nic = main_config.endpoint1
--
2.17.1
5 years, 2 months
[PATCH-next] lnst.Recipes.ENRT.TeamVsBondRecipe: remove erroneous line
by csfakian@redhat.com
From: Christos Sfakianakis <csfakian(a)redhat.com>
Remove line 56 (there is no team for m2).
Signed-off-by: Christos Sfakianakis <csfakian(a)redhat.com>
---
lnst/Recipes/ENRT/TeamVsBondRecipe.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/lnst/Recipes/ENRT/TeamVsBondRecipe.py b/lnst/Recipes/ENRT/TeamVsBondRecipe.py
index 597579b..23bac2d 100644
--- a/lnst/Recipes/ENRT/TeamVsBondRecipe.py
+++ b/lnst/Recipes/ENRT/TeamVsBondRecipe.py
@@ -53,7 +53,6 @@ class TeamVsBondRecipe(BaseEnrtRecipe):
if "mtu" in self.params:
m1.team.mtu = self.params.mtu
- m2.team.mtu = self.params.mtu
net_addr_1 = "192.168.10"
net_addr6_1 = "fc00:0:0:1"
--
2.17.1
5 years, 2 months
[PATCH 1/3] regression_tests: ovs-dpdk-pvp: add guest_nr_hugepages alias
by olichtne@redhat.com
From: Ondrej Lichtner <olichtne(a)redhat.com>
The guest only has 16G ram in our lab, using 13000x2MB=26GB hugepage
memory that can lead to the OOM killer causing problems, e.g. killing
testpmd which will break the setup and then cause deconfiguration
hangs...
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
recipes/regression_tests/phase3/ovs-dpdk-pvp.py | 3 ++-
recipes/regression_tests/phase3/ovs-dpdk-pvp.xml | 1 +
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/recipes/regression_tests/phase3/ovs-dpdk-pvp.py b/recipes/regression_tests/phase3/ovs-dpdk-pvp.py
index b17227c..ab2639c 100644
--- a/recipes/regression_tests/phase3/ovs-dpdk-pvp.py
+++ b/recipes/regression_tests/phase3/ovs-dpdk-pvp.py
@@ -117,6 +117,7 @@ host2_dpdk_cores = ctl.get_alias("host2_dpdk_cores")
guest_testpmd_cores = ctl.get_alias("guest_testpmd_cores")
guest_dpdk_cores = ctl.get_alias("guest_dpdk_cores")
nr_hugepages = int(ctl.get_alias("nr_hugepages"))
+guest_nr_hugepages = int(ctl.get_alias("guest_nr_hugepages"))
socket_mem = int(ctl.get_alias("socket_mem"))
guest_mem_amount = ctl.get_alias("guest_mem_amount")
guest_virtname = ctl.get_alias("guest_virtname")
@@ -318,7 +319,7 @@ g_nic2_out = run_ssh_command_on_guest("ethtool -i %s" % g_nic2_name, guest, h2,
g_nic1_pci = re.search("^bus-info: (\S+)$", g_nic1_out, re.MULTILINE).group(1)
g_nic2_pci = re.search("^bus-info: (\S+)$", g_nic2_out, re.MULTILINE).group(1)
-run_ssh_command_on_guest("echo -n %d >/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" % nr_hugepages, guest, h2, guest_virtname)
+run_ssh_command_on_guest("echo -n %d >/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" % guest_nr_hugepages, guest, h2, guest_virtname)
run_ssh_command_on_guest("modprobe -r vfio_iommu_type1", guest, h2, guest_virtname)
run_ssh_command_on_guest("modprobe -r vfio", guest, h2, guest_virtname)
run_ssh_command_on_guest("modprobe vfio enable_unsafe_noiommu_mode=1", guest, h2, guest_virtname)
diff --git a/recipes/regression_tests/phase3/ovs-dpdk-pvp.xml b/recipes/regression_tests/phase3/ovs-dpdk-pvp.xml
index 4968f7d..1e69d43 100644
--- a/recipes/regression_tests/phase3/ovs-dpdk-pvp.xml
+++ b/recipes/regression_tests/phase3/ovs-dpdk-pvp.xml
@@ -9,6 +9,7 @@
<alias name="guest_testpmd_cores" value="0x7" />
<alias name="guest_dpdk_cores" value="0x6" />
<alias name="nr_hugepages" value="13000" />
+ <alias name="guest_nr_hugepages" value="3000" />
<alias name="socket_mem" value="2048" />
<alias name="trex_dir" value="" />
<alias name="guest_mem_amount" value="16777216" />
--
2.20.1
5 years, 2 months