[PATCH v4] regression_tests: add ipsec_esp_aead test
by Kamil Jerabek
This patch adds new ipsec_esp_aead test to our phase3 regression_tests. It is
similar to ipsec_esp_ah_comp test. There is difference in configuration.
In the test is configured only esp aead with algorithm rfc4106(gcm(aes)),
key length 160bit and IV length 96bit.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
changes in:
*v2:
* add official_result handling for PerfRepo
*v3:
* add accidentally removed result parameters
* remove comment about compression check
*v4:
* removed unused imports
---
.../regression_tests/phase3/ipsec_esp_aead.README | 90 +++++
recipes/regression_tests/phase3/ipsec_esp_aead.py | 433 +++++++++++++++++++++
recipes/regression_tests/phase3/ipsec_esp_aead.xml | 51 +++
3 files changed, 574 insertions(+)
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_aead.README
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_aead.py
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_aead.xml
diff --git a/recipes/regression_tests/phase3/ipsec_esp_aead.README b/recipes/regression_tests/phase3/ipsec_esp_aead.README
new file mode 100644
index 0000000..8738da6
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_aead.README
@@ -0,0 +1,90 @@
+Topology:
+
+ switch
+ +------+
+ | |
+ | |
+ +-------------+ +-------------+
+ | | | |
+ | | | |
+ | +------+ |
+ | |
+ | |
+ +-+--+ +-+--+
++-------|eth1|------+ +-------|eth1|------+
+| +-+--+ | | +-+--+ |
+| | | |
+| | | |
+| | | |
+| | | |
+| | | |
+| host1 | | host2 |
+| | | |
+| | | |
+| | | |
++-------------------+ +-------------------+
+
+Number of hosts: 2
+Host #1 description:
+ One ethernet device configured with ip addresses:
+ 192.168.99.1/24
+ fc00:1::1/64
+
+Host #2 description:
+ One ethernet device configured with ip addresses:
+ 192.168.100.1/24
+ fc00:2::1/64
+
+Test name:
+ ipsec_esp_aead.py
+Test description:
+ Ping:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Ping6:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + ipv4 and ipv6
+ + between ipsec encrypted ethernet interfaces
+ IPsec
+ + tested with esp AEAD
+ + tested with algorithm rfc4106(gcm(aes))
+ + key length - 160bit
+ + IV length - 96bit
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ vxlan_remote.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison agains baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase3/ipsec_esp_aead.py b/recipes/regression_tests/phase3/ipsec_esp_aead.py
new file mode 100644
index 0000000..a6cc67c
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_aead.py
@@ -0,0 +1,433 @@
+from lnst.Common.Utils import bool_it
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.ModuleWrap import netperf
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+
+# ---------------------------
+# ALGORITHM AND CIPHER CONFIG
+# ---------------------------
+
+#lenth param is in bits
+def generate_key(length):
+ key = "0x"
+ key = key + (length/8) * "0b"
+ return key
+
+algorithm = []
+
+algorithm.append(('rfc4106(gcm(aes))', 160, 96))
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("machine1")
+m2 = ctl.get_host("machine2")
+
+m1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf", "Custom"])
+m2.sync_resources(modules=["PacketAssert", "IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+ipsec_mode = ctl.get_alias("ipsec_mode")
+official_result = bool_it(ctl.get_alias("official_result"))
+
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+
+m1_if = m1.get_interface("eth")
+m2_if = m2.get_interface("eth")
+
+m1_if.set_mtu(mtu)
+m2_if.set_mtu(mtu)
+
+m1_if_name = m1_if.get_devname()
+m2_if_name = m2_if.get_devname()
+
+m1_if_addr = m1_if.get_ip()
+m2_if_addr = m2_if.get_ip()
+
+m1_if_addr6 = m1_if.get_ip(1)
+m2_if_addr6 = m2_if.get_ip(1)
+
+
+# add routing rulez ipv4
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr, m2_if_name))
+
+# add routing rulez ipv6
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr6, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr6, m2_if_name))
+
+if nperf_msg_size is None:
+ nperf_msg_size = 16000
+
+if ipsec_mode is None:
+ ipsec_mode = "transport"
+
+res = m1.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m1_key="0x"
+else:
+ m1_key=""
+
+res = m2.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m2_key="0x"
+else:
+ m2_key=""
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+
+ dev_list = [(m1, m1_if), (m2, m2_if)]
+
+ # this will pin devices irqs to cpu #0
+ for m, d in dev_list:
+ pin_dev_irqs(m, d, 0)
+
+nperf_opts = ""
+if nperf_cpupin and nperf_num_parallel == 1:
+ nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+ctl.wait(15)
+
+def configure_ipsec(algo, algo_key, icv_len, ip_version):
+ if ip_version == "ipv4":
+ m1_addr = m1_if_addr
+ m2_addr = m2_if_addr
+ else:
+ m1_addr = m1_if_addr6
+ m2_addr = m2_if_addr6
+
+ # configure policy and state
+ m1.run("ip xfrm policy flush")
+ m1.run("ip xfrm state flush")
+ m2.run("ip xfrm policy flush")
+ m2.run("ip xfrm state flush")
+
+ m1.run("ip xfrm state add src %s dst %s proto esp spi 0x1001 "\
+ "aead '%s' %s %s mode %s "\
+ "sel src %s dst %s"\
+ % (m2_addr, m1_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m2_addr, m1_addr))
+
+ m1.run("ip xfrm policy add src %s dst %s dir in tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode))
+
+ m1.run("ip xfrm state add src %s dst %s proto esp spi 0x1000 "\
+ "aead '%s' %s %s mode %s "\
+ "sel src %s dst %s"\
+ % (m1_addr, m2_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m1_addr, m2_addr))
+
+ m1.run("ip xfrm policy add src %s dst %s dir out tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode))
+
+
+
+
+ m2.run("ip xfrm state add src %s dst %s proto esp spi 0x1000 "\
+ "aead '%s' %s %s mode %s "\
+ "sel src %s dst %s"\
+ % (m1_addr, m2_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m1_addr, m2_addr))
+
+ m2.run("ip xfrm policy add src %s dst %s dir in tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add src %s dst %s proto esp spi 0x1001 "\
+ "aead '%s' %s %s mode %s sel "\
+ "src %s dst %s"\
+ % (m2_addr, m1_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m2_addr, m1_addr))
+
+ m2.run("ip xfrm policy add src %s dst %s dir out tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode))
+
+
+
+for algo, key_len, icv_len in algorithm:
+ # test: TCP netperf, UDP netperf
+ if ipv in [ 'ipv4', 'both']:
+ configure_ipsec(algo,
+ generate_key(key_len),
+ icv_len,
+ "ipv4")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "esp",
+ "grep_for": [ "ESP\(spi=0x00001001" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('ipsec_algorithm', algo)
+ result_tcp.set_parameter('key_length', key_len)
+ result_tcp.set_parameter('iv_length', icv_len)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+ result_tcp.set_parameter('ipsec_mode', ipsec_mode)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp, official_result)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('ipsec_algorithm', algo)
+ result_udp.set_parameter('key_length', key_len)
+ result_udp.set_parameter('iv_length', icv_len)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+ result_udp.set_parameter('ipsec_mode', ipsec_mode)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp, official_result)
+
+ if ipv in [ 'ipv6', 'both']:
+ configure_ipsec(algo,
+ generate_key(key_len),
+ icv_len,
+ "ipv6")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "esp",
+ "grep_for": [ "ESP\(spi=0x00001001" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("Icmp6Ping",
+ options={
+ "addr": m2_if_addr6,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('ipsec_algorithm', algo)
+ result_tcp.set_parameter('key_length', key_len)
+ result_tcp.set_parameter('iv_length', icv_len)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+ result_tcp.set_parameter('ipsec_mode', ipsec_mode)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + " -6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp, official_result)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('ipsec_algorithm', algo)
+ result_udp.set_parameter('key_length', key_len)
+ result_udp.set_parameter('iv_length', icv_len)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+ result_udp.set_parameter('ipsec_mode', ipsec_mode)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + " -6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp, official_result)
+
+m1.run("ip xfrm policy flush")
+m1.run("ip xfrm state flush")
+m2.run("ip xfrm policy flush")
+m2.run("ip xfrm state flush")
+
+if nperf_cpupin:
+ m1.run("service irqbalance start")
+ m2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase3/ipsec_esp_aead.xml b/recipes/regression_tests/phase3/ipsec_esp_aead.xml
new file mode 100644
index 0000000..c37010e
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_aead.xml
@@ -0,0 +1,51 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="mtu" value="1500" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_num_parallel" value="1"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="mapping_file" value="ipsec_esp_aead.mapping"/>
+ <alias name="net_1" value="192.168.99"/>
+ <alias name="net6_1" value="fc00:1::"/>
+ <alias name="net_2" value="192.168.100"/>
+ <alias name="net6_2" value="fc00:2::"/>
+ <alias name="driver" value=""/>
+ <alias name="official_result" value="no" />
+ </define>
+ <network>
+ <host id="machine1">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_1}.1/24"/>
+ <address value="{$net6_1}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="machine2">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_2}.1/24"/>
+ <address value="{$net6_2}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="ipsec_esp_aead.py"/>
+
+</lnstrecipe>
--
2.5.5
5 years, 12 months
[patch lnst-next] Machine: reduce initial resource synchronization and load phase
by Jiri Pirko
From: Jiri Pirko <jiri(a)mellanox.com>
So far, each resource is checked and loaded one by one. That results
in many rpc calls. On my 20ms line, this makes ~4secs for one slave.
Bulk the checks and loads together which allows to be done in ~1sec.
Signed-off-by: Jiri Pirko <jiri(a)mellanox.com>
---
rfc->v1:
- avoid duplicates
- have rpc methods accept either list or a single entry
- store has_resource return value and sync accordingly
- compute res hash in get module info
---
lnst/Controller/Machine.py | 78 +++++++++++++++++++++++++++++-----------------
lnst/Slave/NetTestSlave.py | 33 +++++++++++++++-----
2 files changed, 75 insertions(+), 36 deletions(-)
diff --git a/lnst/Controller/Machine.py b/lnst/Controller/Machine.py
index 38a3646..e8e74d5 100644
--- a/lnst/Controller/Machine.py
+++ b/lnst/Controller/Machine.py
@@ -257,18 +257,17 @@ class Machine(object):
for cls_name, cls in device_classes:
classes.extend(reversed(self._get_base_classes(cls)))
+ minfos = []
for cls in classes:
if cls is object:
continue
- module_name = cls.__module__
- module = sys.modules[module_name]
- filename = module.__file__
-
- if filename[-3:] == "pyc":
- filename = filename[:-1]
+ name = cls.__module__
+ if any(minfo['name'] == name for minfo in minfos):
+ continue
+ minfos.append(self._get_module_info(name))
- res_hash = self.sync_resource(module_name, filename)
- self.rpc_call("load_cached_module", module_name, res_hash)
+ self.sync_resources(minfos)
+ self.rpc_call("load_cached_module", minfos)
for cls_name, cls in device_classes:
module_name = cls.__module__
@@ -348,15 +347,9 @@ class Machine(object):
for cls in reversed(classes):
if cls is object or cls is BaseTestModule:
continue
- m_name = cls.__module__
- m = sys.modules[m_name]
- filename = m.__file__
- if filename[-3:] == "pyc":
- filename = filename[:-1]
-
- res_hash = self.sync_resource(m_name, filename)
-
- self.rpc_call("load_cached_module", m_name, res_hash)
+ minfo = self._get_module_info(cls.__module__)
+ self.sync_resource(minfo)
+ self.rpc_call("load_cached_module", minfo)
logging.info("Host %s executing job %d: %s" %
(self._id, job.id, str(job)))
@@ -518,19 +511,48 @@ class Machine(object):
local_file.close()
self.rpc_call("finish_copy_from", remote_path)
- def sync_resource(self, res_name, file_path):
- digest = sha256sum(file_path)
+ def _get_module_info(self, name):
+ module = sys.modules[name]
+ filepath = module.__file__
+
+ if filepath[-3:] == "pyc":
+ filepath = filepath[:-1]
- if not self.rpc_call("has_resource", digest):
- msg = "Transfering %s to machine %s as '%s'" % (file_path,
- self.get_id(),
- res_name)
- logging.debug(msg)
+ res_hash = sha256sum(filepath)
- remote_path = self.copy_file_to_machine(file_path)
- self.rpc_call("add_resource_to_cache",
- "file", remote_path, res_name)
- return digest
+ return {"name": name, "filepath": filepath, "res_hash": res_hash}
+
+ def _sync_resource(self, minfo):
+ if minfo["has_resource"]:
+ return
+ msg = "Transfering %s to machine %s as '%s'" % (minfo["filepath"],
+ self.get_id(),
+ minfo["name"])
+ logging.debug(msg)
+ remote_path = self.copy_file_to_machine(minfo["filepath"])
+ self.rpc_call("add_resource_to_cache", "file",
+ remote_path, minfo["name"])
+
+ def sync_resource(self, minfo):
+ minfo["has_resource"] = self.rpc_call("has_resource",
+ minfo["res_hash"])
+ self._sync_resource(minfo)
+
+ def sync_resources(self, minfos):
+ res_hashes = list(map(lambda minfo: minfo["res_hash"], minfos))
+
+ # To avoid asking per-resource, ask for all resources in one call.
+ has_resources = self.rpc_call("has_resource", res_hashes)
+
+ from pprint import pprint
+ pprint(has_resources)
+ minfos = list(map(lambda minfo, has_resource:
+ dict(minfo.items() + {"has_resource": has_resource}.items()),
+ minfos, has_resources))
+ pprint(minfos)
+
+ for minfo in minfos:
+ self._sync_resource(minfo)
# def enable_nm(self):
# return self._rpc_call("enable_nm")
diff --git a/lnst/Slave/NetTestSlave.py b/lnst/Slave/NetTestSlave.py
index 908e85c..02a9d18 100644
--- a/lnst/Slave/NetTestSlave.py
+++ b/lnst/Slave/NetTestSlave.py
@@ -148,13 +148,21 @@ class SlaveMethods:
setattr(Devices, cls_name, cls)
- def load_cached_module(self, module_name, res_hash):
- self._cache.renew_entry(res_hash)
- if module_name in self._dynamic_modules:
+ def _load_cached_module(self, minfo):
+ self._cache.renew_entry(minfo["res_hash"])
+ if minfo["name"] in self._dynamic_modules:
return
- module_path = self._cache.get_path(res_hash)
- module = imp.load_source(module_name, module_path)
- self._dynamic_modules[module_name] = module
+ module_path = self._cache.get_path(minfo["res_hash"])
+ module = imp.load_source(minfo["name"], module_path)
+ self._dynamic_modules[minfo["name"]] = module
+
+ def load_cached_module(self, minfos):
+ if isinstance(minfos, list):
+ for minfo in minfos:
+ self._load_cached_module(minfo)
+ elif isinstance(minfos, dict):
+ minfo = minfos
+ self._load_cached_module(minfo)
def init_if_manager(self):
self._if_manager = InterfaceManager(self._server_handler)
@@ -401,12 +409,21 @@ class SlaveMethods:
self._remove_capture_files()
return True
- def has_resource(self, res_hash):
+ def _has_resource(self, res_hash):
if self._cache.query(res_hash):
return True
-
return False
+ def has_resource(self, res_hashes):
+ if isinstance(res_hashes, list):
+ retvals = []
+ for res_hash in res_hashes:
+ retvals.append(self._has_resource(res_hash))
+ return retvals
+ elif isinstance(res_hashes, str):
+ res_hash = res_hashes
+ return self._has_resource(res_hash)
+
def add_resource_to_cache(self, res_type, local_path, name):
if res_type == "file":
self._cache.add_file_entry(local_path, name)
--
2.9.5
6 years
[PATCH v3] regression_tests: add ipsec_esp_aead test
by Kamil Jerabek
This patch adds new ipsec_esp_aead test to our phase3 regression_tests. It is
similar to ipsec_esp_ah_comp test. There is difference in configuration.
In the test is configured only esp aead with algorithm rfc4106(gcm(aes)),
key length 160bit and IV length 96bit.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
changes in:
*v2:
* add official_result handling for PerfRepo
*v3:
* add accidentally removed result parameters
* remove comment about compression check
---
.../regression_tests/phase3/ipsec_esp_aead.README | 90 +++++
recipes/regression_tests/phase3/ipsec_esp_aead.py | 434 +++++++++++++++++++++
recipes/regression_tests/phase3/ipsec_esp_aead.xml | 51 +++
3 files changed, 575 insertions(+)
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_aead.README
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_aead.py
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_aead.xml
diff --git a/recipes/regression_tests/phase3/ipsec_esp_aead.README b/recipes/regression_tests/phase3/ipsec_esp_aead.README
new file mode 100644
index 0000000..8738da6
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_aead.README
@@ -0,0 +1,90 @@
+Topology:
+
+ switch
+ +------+
+ | |
+ | |
+ +-------------+ +-------------+
+ | | | |
+ | | | |
+ | +------+ |
+ | |
+ | |
+ +-+--+ +-+--+
++-------|eth1|------+ +-------|eth1|------+
+| +-+--+ | | +-+--+ |
+| | | |
+| | | |
+| | | |
+| | | |
+| | | |
+| host1 | | host2 |
+| | | |
+| | | |
+| | | |
++-------------------+ +-------------------+
+
+Number of hosts: 2
+Host #1 description:
+ One ethernet device configured with ip addresses:
+ 192.168.99.1/24
+ fc00:1::1/64
+
+Host #2 description:
+ One ethernet device configured with ip addresses:
+ 192.168.100.1/24
+ fc00:2::1/64
+
+Test name:
+ ipsec_esp_aead.py
+Test description:
+ Ping:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Ping6:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + ipv4 and ipv6
+ + between ipsec encrypted ethernet interfaces
+ IPsec
+ + tested with esp AEAD
+ + tested with algorithm rfc4106(gcm(aes))
+ + key length - 160bit
+ + IV length - 96bit
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ vxlan_remote.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison agains baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase3/ipsec_esp_aead.py b/recipes/regression_tests/phase3/ipsec_esp_aead.py
new file mode 100644
index 0000000..5094a09
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_aead.py
@@ -0,0 +1,434 @@
+from lnst.Common.Utils import bool_it
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.ModuleWrap import ping, ping6, netperf
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+import re
+
+# ---------------------------
+# ALGORITHM AND CIPHER CONFIG
+# ---------------------------
+
+#lenth param is in bits
+def generate_key(length):
+ key = "0x"
+ key = key + (length/8) * "0b"
+ return key
+
+algorithm = []
+
+algorithm.append(('rfc4106(gcm(aes))', 160, 96))
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("machine1")
+m2 = ctl.get_host("machine2")
+
+m1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf", "Custom"])
+m2.sync_resources(modules=["PacketAssert", "IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+ipsec_mode = ctl.get_alias("ipsec_mode")
+official_result = bool_it(ctl.get_alias("official_result"))
+
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+
+m1_if = m1.get_interface("eth")
+m2_if = m2.get_interface("eth")
+
+m1_if.set_mtu(mtu)
+m2_if.set_mtu(mtu)
+
+m1_if_name = m1_if.get_devname()
+m2_if_name = m2_if.get_devname()
+
+m1_if_addr = m1_if.get_ip()
+m2_if_addr = m2_if.get_ip()
+
+m1_if_addr6 = m1_if.get_ip(1)
+m2_if_addr6 = m2_if.get_ip(1)
+
+
+# add routing rulez ipv4
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr, m2_if_name))
+
+# add routing rulez ipv6
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr6, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr6, m2_if_name))
+
+if nperf_msg_size is None:
+ nperf_msg_size = 16000
+
+if ipsec_mode is None:
+ ipsec_mode = "transport"
+
+res = m1.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m1_key="0x"
+else:
+ m1_key=""
+
+res = m2.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m2_key="0x"
+else:
+ m2_key=""
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+
+ dev_list = [(m1, m1_if), (m2, m2_if)]
+
+ # this will pin devices irqs to cpu #0
+ for m, d in dev_list:
+ pin_dev_irqs(m, d, 0)
+
+nperf_opts = ""
+if nperf_cpupin and nperf_num_parallel == 1:
+ nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+ctl.wait(15)
+
+def configure_ipsec(algo, algo_key, icv_len, ip_version):
+ if ip_version == "ipv4":
+ m1_addr = m1_if_addr
+ m2_addr = m2_if_addr
+ else:
+ m1_addr = m1_if_addr6
+ m2_addr = m2_if_addr6
+
+ # configure policy and state
+ m1.run("ip xfrm policy flush")
+ m1.run("ip xfrm state flush")
+ m2.run("ip xfrm policy flush")
+ m2.run("ip xfrm state flush")
+
+ m1.run("ip xfrm state add src %s dst %s proto esp spi 0x1001 "\
+ "aead '%s' %s %s mode %s "\
+ "sel src %s dst %s"\
+ % (m2_addr, m1_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m2_addr, m1_addr))
+
+ m1.run("ip xfrm policy add src %s dst %s dir in tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode))
+
+ m1.run("ip xfrm state add src %s dst %s proto esp spi 0x1000 "\
+ "aead '%s' %s %s mode %s "\
+ "sel src %s dst %s"\
+ % (m1_addr, m2_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m1_addr, m2_addr))
+
+ m1.run("ip xfrm policy add src %s dst %s dir out tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode))
+
+
+
+
+ m2.run("ip xfrm state add src %s dst %s proto esp spi 0x1000 "\
+ "aead '%s' %s %s mode %s "\
+ "sel src %s dst %s"\
+ % (m1_addr, m2_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m1_addr, m2_addr))
+
+ m2.run("ip xfrm policy add src %s dst %s dir in tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add src %s dst %s proto esp spi 0x1001 "\
+ "aead '%s' %s %s mode %s sel "\
+ "src %s dst %s"\
+ % (m2_addr, m1_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m2_addr, m1_addr))
+
+ m2.run("ip xfrm policy add src %s dst %s dir out tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode))
+
+
+
+for algo, key_len, icv_len in algorithm:
+ # test: TCP netperf, UDP netperf
+ if ipv in [ 'ipv4', 'both']:
+ configure_ipsec(algo,
+ generate_key(key_len),
+ icv_len,
+ "ipv4")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "esp",
+ "grep_for": [ "ESP\(spi=0x00001001" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('ipsec_algorithm', algo)
+ result_tcp.set_parameter('key_length', key_len)
+ result_tcp.set_parameter('iv_length', icv_len)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+ result_tcp.set_parameter('ipsec_mode', ipsec_mode)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp, official_result)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('ipsec_algorithm', algo)
+ result_udp.set_parameter('key_length', key_len)
+ result_udp.set_parameter('iv_length', icv_len)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+ result_udp.set_parameter('ipsec_mode', ipsec_mode)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp, official_result)
+
+ if ipv in [ 'ipv6', 'both']:
+ configure_ipsec(algo,
+ generate_key(key_len),
+ icv_len,
+ "ipv6")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "esp",
+ "grep_for": [ "ESP\(spi=0x00001001" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("Icmp6Ping",
+ options={
+ "addr": m2_if_addr6,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('ipsec_algorithm', algo)
+ result_tcp.set_parameter('key_length', key_len)
+ result_tcp.set_parameter('iv_length', icv_len)
+ result_tcp.set_parameter('msg_size', nperf_msg_size)
+ result_tcp.set_parameter('ipsec_mode', ipsec_mode)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + " -6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp, official_result)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_udp.set_parameter('ipsec_algorithm', algo)
+ result_udp.set_parameter('key_length', key_len)
+ result_udp.set_parameter('iv_length', icv_len)
+ result_udp.set_parameter('msg_size', nperf_msg_size)
+ result_udp.set_parameter('ipsec_mode', ipsec_mode)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + " -6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp, official_result)
+
+m1.run("ip xfrm policy flush")
+m1.run("ip xfrm state flush")
+m2.run("ip xfrm policy flush")
+m2.run("ip xfrm state flush")
+
+if nperf_cpupin:
+ m1.run("service irqbalance start")
+ m2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase3/ipsec_esp_aead.xml b/recipes/regression_tests/phase3/ipsec_esp_aead.xml
new file mode 100644
index 0000000..c37010e
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_aead.xml
@@ -0,0 +1,51 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="mtu" value="1500" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_num_parallel" value="1"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="mapping_file" value="ipsec_esp_aead.mapping"/>
+ <alias name="net_1" value="192.168.99"/>
+ <alias name="net6_1" value="fc00:1::"/>
+ <alias name="net_2" value="192.168.100"/>
+ <alias name="net6_2" value="fc00:2::"/>
+ <alias name="driver" value=""/>
+ <alias name="official_result" value="no" />
+ </define>
+ <network>
+ <host id="machine1">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_1}.1/24"/>
+ <address value="{$net6_1}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="machine2">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_2}.1/24"/>
+ <address value="{$net6_2}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="ipsec_esp_aead.py"/>
+
+</lnstrecipe>
--
2.5.5
6 years
Re: [PATCH v2] regression_tests: add ipsec_esp_aead test
by Jan Tluka
Thu, Nov 30, 2017 at 08:50:26AM CET, kjerabek(a)redhat.com wrote:
>
>
>----- Original Message -----
>> Tue, Nov 28, 2017 at 01:13:27PM CET, kjerabek(a)redhat.com wrote:
>> >This patch adds new ipsec_esp_aead test to our phase3 regression_tests. It
>> >is
>> >similar to ipsec_esp_ah_comp test. There is difference in configuration.
>> >In the test is configured only esp aead with algorithm rfc4106(gcm(aes)),
>> >key length 160bit and IV length 96bit.
>> >
>> >Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
>> >
>> >---
>> >changes in:
>> >*v2:
>> > * add official_result handling for PerfRepo
>>
>> When I compared first and second version of the patch there are some removals
>> that should not be there IMO. I see that you removed all of these ones (both
>> for tcp and udp, ipv4 and ipv6):
>>
>> - result_tcp.set_parameter('ipsec_algorithm', algo)
>> - result_tcp.set_parameter('key_length', key_len)
>> - result_tcp.set_parameter('iv_length', icv_len)
>> - result_tcp.set_parameter('msg_size', nperf_msg_size)
>> - result_tcp.set_parameter('ipsec_mode', ipsec_mode)
>> -
>>
>Hi,
>this has purpose here, because there is used different configuration.
>So it is not devided into hash and crypto algorithm with key length.
>
>Because of that I used these.
>
>I can use the same parameters as in ipsec_esp_ah_comp test with hash algo,
>length set to None and without iv_len.
>
The loop starts with:
for algo, key_len, icv_len in algorithm:
So how you're going to handle the situation when we add different values
to algorithm? Also how you're going to differentiate the results with
different msg size if it's not set as parameter?
Further I noticed that the mode is always transport, so why you provide
an alias there? Please clean up the code.
While you're at it please remove comment about "compressed check"
because this is not tested.
-Jan
>> Please fix. Thanks,
>> -Jan
>> _______________________________________________
>> LNST-developers mailing list -- lnst-developers(a)lists.fedorahosted.org
>> To unsubscribe send an email to lnst-developers-leave(a)lists.fedorahosted.org
>>
6 years
Fwd: [PATCH v2] regression_tests: add ipsec_esp_aead test
by Kamil Jerabek
----- Forwarded Message -----
>
>
> ----- Original Message -----
> > Tue, Nov 28, 2017 at 01:13:27PM CET, kjerabek(a)redhat.com wrote:
> > >This patch adds new ipsec_esp_aead test to our phase3 regression_tests. It
> > >is
> > >similar to ipsec_esp_ah_comp test. There is difference in configuration.
> > >In the test is configured only esp aead with algorithm rfc4106(gcm(aes)),
> > >key length 160bit and IV length 96bit.
> > >
> > >Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
> > >
> > >---
> > >changes in:
> > >*v2:
> > > * add official_result handling for PerfRepo
> >
> > When I compared first and second version of the patch there are some
> > removals
> > that should not be there IMO. I see that you removed all of these ones
> > (both
> > for tcp and udp, ipv4 and ipv6):
> >
> > - result_tcp.set_parameter('ipsec_algorithm', algo)
> > - result_tcp.set_parameter('key_length', key_len)
> > - result_tcp.set_parameter('iv_length', icv_len)
> > - result_tcp.set_parameter('msg_size', nperf_msg_size)
> > - result_tcp.set_parameter('ipsec_mode', ipsec_mode)
> > -
> >
> Hi,
> this has purpose here, because there is used different configuration.
> So it is not devided into hash and crypto algorithm with key length.
>
> Because of that I used these.
>
> I can use the same parameters as in ipsec_esp_ah_comp test with hash algo,
> length set to None and without iv_len.
>
> > Please fix. Thanks,
> > -Jan
> > _______________________________________________
> > LNST-developers mailing list -- lnst-developers(a)lists.fedorahosted.org
> > To unsubscribe send an email to
> > lnst-developers-leave(a)lists.fedorahosted.org
> >
>
6 years
[PATCH v2] regression_tests: add ipsec_esp_aead test
by Kamil Jerabek
This patch adds new ipsec_esp_aead test to our phase3 regression_tests. It is
similar to ipsec_esp_ah_comp test. There is difference in configuration.
In the test is configured only esp aead with algorithm rfc4106(gcm(aes)),
key length 160bit and IV length 96bit.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
changes in:
*v2:
* add official_result handling for PerfRepo
---
.../regression_tests/phase3/ipsec_esp_aead.README | 90 +++++
recipes/regression_tests/phase3/ipsec_esp_aead.py | 410 +++++++++++++++++++++
recipes/regression_tests/phase3/ipsec_esp_aead.xml | 51 +++
3 files changed, 551 insertions(+)
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_aead.README
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_aead.py
create mode 100644 recipes/regression_tests/phase3/ipsec_esp_aead.xml
diff --git a/recipes/regression_tests/phase3/ipsec_esp_aead.README b/recipes/regression_tests/phase3/ipsec_esp_aead.README
new file mode 100644
index 0000000..8738da6
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_aead.README
@@ -0,0 +1,90 @@
+Topology:
+
+ switch
+ +------+
+ | |
+ | |
+ +-------------+ +-------------+
+ | | | |
+ | | | |
+ | +------+ |
+ | |
+ | |
+ +-+--+ +-+--+
++-------|eth1|------+ +-------|eth1|------+
+| +-+--+ | | +-+--+ |
+| | | |
+| | | |
+| | | |
+| | | |
+| | | |
+| host1 | | host2 |
+| | | |
+| | | |
+| | | |
++-------------------+ +-------------------+
+
+Number of hosts: 2
+Host #1 description:
+ One ethernet device configured with ip addresses:
+ 192.168.99.1/24
+ fc00:1::1/64
+
+Host #2 description:
+ One ethernet device configured with ip addresses:
+ 192.168.100.1/24
+ fc00:2::1/64
+
+Test name:
+ ipsec_esp_aead.py
+Test description:
+ Ping:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Ping6:
+ + count: 10
+ + interval: 0.1s
+ + between ipsec encrypted ethernet interfaces headers check expecting PASS
+ + between ipsec encrypted ethernet interfaces compression check expecting PASS
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + ipv4 and ipv6
+ + between ipsec encrypted ethernet interfaces
+ IPsec
+ + tested with esp AEAD
+ + tested with algorithm rfc4106(gcm(aes))
+ + key length - 160bit
+ + IV length - 96bit
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ vxlan_remote.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison agains baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase3/ipsec_esp_aead.py b/recipes/regression_tests/phase3/ipsec_esp_aead.py
new file mode 100644
index 0000000..bd08d91
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_aead.py
@@ -0,0 +1,410 @@
+from lnst.Common.Utils import bool_it
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.ModuleWrap import ping, ping6, netperf
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+import re
+
+# ---------------------------
+# ALGORITHM AND CIPHER CONFIG
+# ---------------------------
+
+#lenth param is in bits
+def generate_key(length):
+ key = "0x"
+ key = key + (length/8) * "0b"
+ return key
+
+algorithm = []
+
+algorithm.append(('rfc4106(gcm(aes))', 160, 96))
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("machine1")
+m2 = ctl.get_host("machine2")
+
+m1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf", "Custom"])
+m2.sync_resources(modules=["PacketAssert", "IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+ipsec_mode = ctl.get_alias("ipsec_mode")
+official_result = bool_it(ctl.get_alias("official_result"))
+
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+
+m1_if = m1.get_interface("eth")
+m2_if = m2.get_interface("eth")
+
+m1_if.set_mtu(mtu)
+m2_if.set_mtu(mtu)
+
+m1_if_name = m1_if.get_devname()
+m2_if_name = m2_if.get_devname()
+
+m1_if_addr = m1_if.get_ip()
+m2_if_addr = m2_if.get_ip()
+
+m1_if_addr6 = m1_if.get_ip(1)
+m2_if_addr6 = m2_if.get_ip(1)
+
+
+# add routing rulez ipv4
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr, m2_if_name))
+
+# add routing rulez ipv6
+# so the host knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr6, m1_if_name))
+
+# so the host knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr6, m2_if_name))
+
+if nperf_msg_size is None:
+ nperf_msg_size = 16000
+
+if ipsec_mode is None:
+ ipsec_mode = "transport"
+
+res = m1.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m1_key="0x"
+else:
+ m1_key=""
+
+res = m2.run("rpm -qa iproute", save_output=True)
+if (res.get_result()["res_data"]["stdout"].find("iproute-2") != -1):
+ m2_key="0x"
+else:
+ m2_key=""
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+
+ dev_list = [(m1, m1_if), (m2, m2_if)]
+
+ # this will pin devices irqs to cpu #0
+ for m, d in dev_list:
+ pin_dev_irqs(m, d, 0)
+
+nperf_opts = ""
+if nperf_cpupin and nperf_num_parallel == 1:
+ nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+ctl.wait(15)
+
+def configure_ipsec(algo, algo_key, icv_len, ip_version):
+ if ip_version == "ipv4":
+ m1_addr = m1_if_addr
+ m2_addr = m2_if_addr
+ else:
+ m1_addr = m1_if_addr6
+ m2_addr = m2_if_addr6
+
+ # configure policy and state
+ m1.run("ip xfrm policy flush")
+ m1.run("ip xfrm state flush")
+ m2.run("ip xfrm policy flush")
+ m2.run("ip xfrm state flush")
+
+ m1.run("ip xfrm state add src %s dst %s proto esp spi 0x1001 "\
+ "aead '%s' %s %s mode %s "\
+ "sel src %s dst %s"\
+ % (m2_addr, m1_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m2_addr, m1_addr))
+
+ m1.run("ip xfrm policy add src %s dst %s dir in tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode))
+
+ m1.run("ip xfrm state add src %s dst %s proto esp spi 0x1000 "\
+ "aead '%s' %s %s mode %s "\
+ "sel src %s dst %s"\
+ % (m1_addr, m2_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m1_addr, m2_addr))
+
+ m1.run("ip xfrm policy add src %s dst %s dir out tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode))
+
+
+
+
+ m2.run("ip xfrm state add src %s dst %s proto esp spi 0x1000 "\
+ "aead '%s' %s %s mode %s "\
+ "sel src %s dst %s"\
+ % (m1_addr, m2_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m1_addr, m2_addr))
+
+ m2.run("ip xfrm policy add src %s dst %s dir in tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add src %s dst %s proto esp spi 0x1001 "\
+ "aead '%s' %s %s mode %s sel "\
+ "src %s dst %s"\
+ % (m2_addr, m1_addr,
+ algo, algo_key, icv_len, ipsec_mode,
+ m2_addr, m1_addr))
+
+ m2.run("ip xfrm policy add src %s dst %s dir out tmpl "\
+ "src %s dst %s proto esp mode %s action allow"\
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode))
+
+
+
+for algo, key_len, icv_len in algorithm:
+ # test: compressed check, TCP netperf, UDP netperf
+ if ipv in [ 'ipv4', 'both']:
+ configure_ipsec(algo,
+ generate_key(key_len),
+ icv_len,
+ "ipv4")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "esp",
+ "grep_for": [ "ESP\(spi=0x00001001" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp, official_result)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp, official_result)
+
+ if ipv in [ 'ipv6', 'both']:
+ configure_ipsec(algo,
+ generate_key(key_len),
+ icv_len,
+ "ipv6")
+
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "esp",
+ "grep_for": [ "ESP\(spi=0x00001001" ],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("Icmp6Ping",
+ options={
+ "addr": m2_if_addr6,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + " -6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp, official_result)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_udp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
+ (m2, m2_if, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation": nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts" : nperf_opts + " -6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp, official_result)
+
+m1.run("ip xfrm policy flush")
+m1.run("ip xfrm state flush")
+m2.run("ip xfrm policy flush")
+m2.run("ip xfrm state flush")
+
+if nperf_cpupin:
+ m1.run("service irqbalance start")
+ m2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase3/ipsec_esp_aead.xml b/recipes/regression_tests/phase3/ipsec_esp_aead.xml
new file mode 100644
index 0000000..c37010e
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_aead.xml
@@ -0,0 +1,51 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="mtu" value="1500" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_num_parallel" value="1"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="mapping_file" value="ipsec_esp_aead.mapping"/>
+ <alias name="net_1" value="192.168.99"/>
+ <alias name="net6_1" value="fc00:1::"/>
+ <alias name="net_2" value="192.168.100"/>
+ <alias name="net6_2" value="fc00:2::"/>
+ <alias name="driver" value=""/>
+ <alias name="official_result" value="no" />
+ </define>
+ <network>
+ <host id="machine1">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_1}.1/24"/>
+ <address value="{$net6_1}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="machine2">
+ <interfaces>
+ <eth id="eth" label="localnet">
+ <params>
+ <param name="driver" value="{$driver}"/>
+ </params>
+ <addresses>
+ <address value="{$net_2}.1/24"/>
+ <address value="{$net6_2}1/64"/>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="ipsec_esp_aead.py"/>
+
+</lnstrecipe>
--
2.5.5
6 years
[patch lnst-next RFC] Machine: reduce initial resource synchronization and load phase
by Jiri Pirko
From: Jiri Pirko <jiri(a)mellanox.com>
So far, each resource is checked and loaded one by one. That results
in many rpc calls. On my 20ms line, this makes ~4secs for one slave.
Bulk the checks and loads together which allows to be done in ~1sec.
Signed-off-by: Jiri Pirko <jiri(a)mellanox.com>
---
lnst/Controller/Machine.py | 68 +++++++++++++++++++++++++++++-----------------
lnst/Slave/NetTestSlave.py | 10 +++++++
2 files changed, 53 insertions(+), 25 deletions(-)
diff --git a/lnst/Controller/Machine.py b/lnst/Controller/Machine.py
index 38a3646..2290e2f 100644
--- a/lnst/Controller/Machine.py
+++ b/lnst/Controller/Machine.py
@@ -257,18 +257,15 @@ class Machine(object):
for cls_name, cls in device_classes:
classes.extend(reversed(self._get_base_classes(cls)))
+ minfos = []
for cls in classes:
if cls is object:
continue
- module_name = cls.__module__
- module = sys.modules[module_name]
- filename = module.__file__
+ minfos.append(self._get_module_info(cls.__module__))
- if filename[-3:] == "pyc":
- filename = filename[:-1]
+ self.sync_resources(minfos)
- res_hash = self.sync_resource(module_name, filename)
- self.rpc_call("load_cached_module", module_name, res_hash)
+ self.rpc_call("load_cached_modules", minfos)
for cls_name, cls in device_classes:
module_name = cls.__module__
@@ -348,15 +345,10 @@ class Machine(object):
for cls in reversed(classes):
if cls is object or cls is BaseTestModule:
continue
- m_name = cls.__module__
- m = sys.modules[m_name]
- filename = m.__file__
- if filename[-3:] == "pyc":
- filename = filename[:-1]
-
- res_hash = self.sync_resource(m_name, filename)
-
- self.rpc_call("load_cached_module", m_name, res_hash)
+ minfo = self._get_module_info(cls.__module__)
+ self.sync_resource(minfo)
+ self.rpc_call("load_cached_module",
+ minfo["name"], minfo["res_hash"])
logging.info("Host %s executing job %d: %s" %
(self._id, job.id, str(job)))
@@ -518,19 +510,45 @@ class Machine(object):
local_file.close()
self.rpc_call("finish_copy_from", remote_path)
- def sync_resource(self, res_name, file_path):
- digest = sha256sum(file_path)
+ def _get_module_info(self, name):
+ module = sys.modules[name]
+ filepath = module.__file__
+
+ if filepath[-3:] == "pyc":
+ filepath = filepath[:-1]
+
+ return {"name": name, "filepath": filepath}
+
+ def _module_info_res_hash(self, minfo):
+ if not "res_hash" in minfo:
+ minfo["res_hash"] = sha256sum(minfo["filepath"])
- if not self.rpc_call("has_resource", digest):
- msg = "Transfering %s to machine %s as '%s'" % (file_path,
+ def sync_resource(self, minfo):
+ self._module_info_res_hash(minfo)
+
+ if not self.rpc_call("has_resource", minfo["res_hash"]):
+ msg = "Transfering %s to machine %s as '%s'" % (minfo["filepath"],
self.get_id(),
- res_name)
+ minfo["name"])
logging.debug(msg)
- remote_path = self.copy_file_to_machine(file_path)
- self.rpc_call("add_resource_to_cache",
- "file", remote_path, res_name)
- return digest
+ remote_path = self.copy_file_to_machine(minfo["filepath"])
+ self.rpc_call("add_resource_to_cache", "file",
+ remote_path, minfo["name"])
+
+ def sync_resources(self, minfos):
+ for minfo in minfos:
+ self._module_info_res_hash(minfo)
+
+ res_hashes = list(map(lambda minfo: minfo["res_hash"], minfos))
+
+ # To avoid asking per-resource, ask for all resources in one call.
+ # If at least one resource is out of sync,
+ # try to sync them individually.
+ if self.rpc_call("has_resources", res_hashes):
+ return
+ for minfo in minfos:
+ self.sync_resource(minfo)
# def enable_nm(self):
# return self._rpc_call("enable_nm")
diff --git a/lnst/Slave/NetTestSlave.py b/lnst/Slave/NetTestSlave.py
index 908e85c..5623c70 100644
--- a/lnst/Slave/NetTestSlave.py
+++ b/lnst/Slave/NetTestSlave.py
@@ -156,6 +156,10 @@ class SlaveMethods:
module = imp.load_source(module_name, module_path)
self._dynamic_modules[module_name] = module
+ def load_cached_modules(self, minfos):
+ for minfo in minfos:
+ self.load_cached_module(minfo["name"], minfo["res_hash"])
+
def init_if_manager(self):
self._if_manager = InterfaceManager(self._server_handler)
for cls_name in dir(Devices):
@@ -407,6 +411,12 @@ class SlaveMethods:
return False
+ def has_resources(self, res_hashes):
+ for res_hash in res_hashes:
+ if not self.has_resource(res_hash):
+ return False
+ return True
+
def add_resource_to_cache(self, res_type, local_path, name):
if res_type == "file":
self._cache.add_file_entry(local_path, name)
--
2.9.5
6 years
[PATCH 1/2] regression_tests ovs-dpdk-pvp: update README
by olichtne@redhat.com
From: Ondrej Lichtner <olichtne(a)redhat.com>
* small update ot the ASCII art - distinguish eth nics on the guest
* updating the full recipe description to be more indepth
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
.../regression_tests/phase3/ovs-dpdk-pvp.README | 168 +++++++++++++++++----
1 file changed, 140 insertions(+), 28 deletions(-)
diff --git a/recipes/regression_tests/phase3/ovs-dpdk-pvp.README b/recipes/regression_tests/phase3/ovs-dpdk-pvp.README
index edf3e72..2cbbc77 100644
--- a/recipes/regression_tests/phase3/ovs-dpdk-pvp.README
+++ b/recipes/regression_tests/phase3/ovs-dpdk-pvp.README
@@ -30,9 +30,9 @@ Topology:
+------------------------+ +--|vhost1|----|vhost2|--+
+--+---+ +--+---+
| |
- +-+-+ +-+-+
- +---+eth+-------|eth|---+
- | +---+ +---+ |
+ +--+--+ +--+--+
+ +---+ eth1+-----| eth2|-+
+ | +-----+ +-----+ |
| <-----------> |
| testpmd |
| |
@@ -58,9 +58,35 @@ Recipe parameters:
<test_duration> = how long each stream generation is in seconds, default 60
Host #1 description:
- Two ethernet devices bound to the vfio-pci driver for dpdk use
- The TRex generator is configured to generate 2 streams
- The streams are created with scapy as UDP datagrams:
+ Provisioning requirements before recipe execution:
+ * Test was designed for RHEL7 x86_64 Server version >= 7.4 GA release
+ * after installation, the following options are added to the
+ kernel command line:
+ isolcpus=1,2,3,4 intel_iommu=on default_hugepagesz=2M hugepagesz=2M hugepages=2048
+ * packages installed on top of the default installation:
+ wget gcc make vim tcpdump pciutils glibc-headers tar bzip2 git numactl-devel gzip PyYAML tmux NetworkManager-team python-paramiko python-netifaces driverctl
+ * dpdk version 17.08 is installed
+ * trex version 2.28 is installed to <trex_dir>
+
+ * After matching the selected nics are configured with ipv4 addresses:
+ 192.168.1.1/24 to eth1
+ 192.168.1.3/24 to eth2
+ * And a parallel ping is sent from each nic to addresses of Host #2:
+ 192.168.1.2/24 from eth1
+ 192.168.1.4/24 from eth2
+
+ count=100, interval=0.1 and we only check if at least 20% of
+ packets passed.
+ This should teach the lab switch between the two hosts the proper mac
+ address-port mapping
+
+ * irqbalance service is stopped and all irqs are boud to CPU0
+ * The number of hugepages is set to <nr_hugepages> using the sysfs interface
+ * eth1 and eth2 are bound to the vfio-pci driver using driverctl for dpdk use
+ * when Host2 and Guest configuration is finished we configure the TRex
+ server
+ * The TRex generator is configured to generate 2 streams
+ the streams are created with scapy as UDP datagrams:
src_mac = host1.{eth1, eth2}.mac
dst_mac = host2.{eth1, eth2}.mac
src_ip = 192.168.1.{1, 3}
@@ -68,44 +94,130 @@ Host #1 description:
src_port = any
dst_port = any
data = padding so that the entire length of the datagram == <pkt_size>
- TRex then generates 2 streams using 100% on each port and measures the rx
- rate in pps on both ports.
- The measured rx rates for each ports are added together and a standard
- deviation and average from <runs> iterations is calculated.
- In PerfRepo we store the result as:
- rx_rate = average summed rx rate of both ports in pps
- rx_rate_min = rx_rate - 2*std_deviation
- rx_rate_max = rx_rate + 2*std_deviation
- rx_rate_deviation = 2*std_deviation
- port0_rate = average rx rate of the first port in pps
- port1_rate = average rx rate of the second port in pps
+ * TRex then generates 2 streams using 100% on each port and measures the rx
+ rate in pps on both ports.
+ The measured rx rates for each ports are added together and a standard
+ deviation and average from <runs> iterations is calculated.
+ In PerfRepo we store the result as:
+ rx_rate = average summed rx rate of both ports in pps
+ rx_rate_min = rx_rate - 2*std_deviation
+ rx_rate_max = rx_rate + 2*std_deviation
+ rx_rate_deviation = 2*std_deviation
+ port0_rate = average rx rate of the first port in pps
+ port1_rate = average rx rate of the second port in pps
Host #2 description:
- Two ethernet devices bound to the vfio-pci driver for dpdk use
- Two vhostuser ports created by the guest host - the guest being the
- vhostuser server and ovs as the vhostuser client
- OvS bridge br0 configured with 4 ports:
+ Provisioning requirements before recipe execution:
+ * Test was designed for RHEL7 x86_64 Server version >= 7.4 GA release
+ * after installation, the following options are added to the kernel command
+ line:
+ isolcpus=1,2,3,4,5,6,7,8 intel_iommu=on default_hugepagesz=2M hugepagesz=2M hugepages=2048
+ * packages installed on top of the default installation:
+ wget gcc make yum-utils autoconf automake libtool vim pciutils rpmdevtools glibc-headers numactl-devel gzip libhugetlbfs-utils tmux qemu-kvm-rhev python-paramiko python-netifaces driverctl
+
+ qemu version must be >=2.3.0, in our setup this is provided by
+ qemu-kv-rhev which available in RHV-4.0 repositories
+ * guest is installed via libvirt (handled by beaker), using 16G of ram and
+ 4CPUs, mapped to host cpus 5,6,7,8, more info in Guest provisioning
+ * openvswitch is installed as software under test
+ * dpdk version 17.08 is installed
+
+ * After matching the selected nics are configured with ipv4 addresses:
+ 192.168.1.2/24 to eth1
+ 192.168.1.4/24 to eth2
+ * And a parallel ping is sent from each nic to addresses of Host #1:
+ 192.168.1.1/24 from eth1
+ 192.168.1.3/24 from eth2
+
+ count=100, interval=0.1 and we only check if at least 20% of
+ packets passed.
+ This should teach the lab switch between the two hosts the proper mac
+ address-port mapping
+
+ * irqbalance service is stopped and all irqs are boud to CPU0
+ * The number of hugepages is set to <nr_hugepages> using the sysfs interface
+ * openvswitch service is started and configured to enable/use dpdk
+
+ * eth1 and eth2 are bound to the vfio-pci driver using driverctl for dpdk
+ use (in an ovs bridge)
+ * an ovs bridge is created and eth1 and eth2 nics are added as dpdk ports
+ * Guest1 is managed using virsh and the guest XML is edited:
+ * we add 2 vhostuser ports where qemu is in server mode
+ * these nics use the original hw addresses of eth1 and eth2 Host2 nics
+ * additionally under <cpu> we add:
+ <numa>
+ <cell id="0" cpus="0" memory=guest_mem_amount unit="KiB" memAccess="shared"/>
+ </numa>
+ where guest_mem_amount is a parameter <guest_mem_amount>
+ It's important to note that the hosts in our lab don't use numa, adding
+ this is only required because of the memAccess="shared" attribute for
+ some reason the vhostuser nics don't work without it.
+ * Finally we add:
+ <cputune>
+ <vcpupin vcpu="0" cpuset="5"/>
+ <vcpupin vcpu="1" cpuset="6"/>
+ <vcpupin vcpu="2" cpuset="7"/>
+ <vcpupin vcpu="3" cpuset="7"/>
+ </cputune>
+ To permanently pin the guest cpus to the specific host cpus.
+
+ * the two vhostuser ports are added to the ovs bridge as ports in vhostuser
+ client mode
+ * ovs bridge br0 is therefore configured with 4 ports:
eth1 == port 11, named "nic1"
eth2 == port 12, named "nic2"
vhost1 == port 21, named "guest_nic1"
vhost2 == port 22, named "guest_nic2"
- and following flows:
+ * Guest is started
+ * The following flows are added to the ovs bridge
in_port=11,action=21
in_port=21,action=11
in_port=12,action=22
in_port=22,action=12
Guest description:
- Configured with 2 vhostuser nics in server mode. These are created to
- mirror the mac addresses of the eth1 and eth2 nics of Host2. This is to
- ensure that the generated traffic goes through the specified path on the
- lab switch.
- Runs a single testpmd process with the following configuration:
+ Provisioning requirements before recipe execution:
+ * Test was designed for RHEL7 x86_64 Server version >= 7.4 GA release
+ installation is handled by beaker and the default kickstart is used,
+ except for setting a root password that will be provided to LNST via
+ <guest_password>
+ * guest args passed to beaker are:
+ --ram=16384 --vcpus=4 --cpuset=5,6,7,8 --file-size=8 --hvm --kvm
+ * after installation, the following options are added to the kernel command
+ line:
+ default_hugepagesz=2M hugepagesz=2M hugepages=2048 intel_iommu=on iommu=pt
+ * packages installed on top of the default installation:
+ wget gcc make vim tcpdump pciutils glibc-headers tar bzip2 numactl-devel gzip libhugetlbfs-utils tmux python-paramiko python-netifaces driverctl
+ * dpdk version 17.08 is installed
+ * after this point the guest is managed by lnst, this includes changes to
+ the libvirt guest XML description
+
+ * irqbalance service is stopped and all irqs are boud to CPU0
+ * The 2 vhostuser nics are identified by their mac addresses - copied from
+ the Host #2 nics that are currently dpdk ports in an ovs bridge. This is
+ to ensure that the generated traffic goes through the specified path on
+ the lab switch.
+ * The number of hugepages is set to <nr_hugepages> using the sysfs interface
+ * eth1 and eth2 (vhostuser nics) are bound to the vfio-pci driver using
+ driverctl for dpdk use (testpmd)
+ * this is slightly different in the guest compared to the Hosts:
+ modprobe -r vfio_iommu_type1
+ modprobe -r vfio
+ modprobe vfio enable_unsafe_noiommu_mode=1
+ modprobe vfio-pci
+ driverctl set-override vfio-pci <nic_pci>
+ * Runs a single testpmd process with the following configuration:
-c <guest_dpdk_cores>
+ -w {g_nic1_pci} -w {g_nic2_pci}
-n 4 --socket-mem 1024,0 --
-i --eth-peer=0,{hw1} --eth-peer=1,{hw2}
--forward-mode=mac
- where hw1 == host1.eth1.hw_address and hw2 == host1.eth2.hw_address
+ where hw1 == host1.eth1.hw_address and hw2 == host1.eth2.hw_address
+ and g_nic1_pci, g_nic2_pci are the pci addresses of the two vhostuser
+ nics
+ * "start tx_first" is sent to testpmd to send some initial packets into the
+ whole pvp setup
+
Test name:
ovs-dpdk-pvp.py
--
2.15.0
6 years
[PATCH] recipes: fix missing msg size parameter in perfrepo result
for SCTP
by Jan Tluka
The netperf packet size was not set in the PerfRepo result for
SCTP_STREAM test.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 6 ++++++
recipes/regression_tests/phase1/3_vlans_over_bond.py | 6 ++++++
recipes/regression_tests/phase1/bonding_test.py | 6 ++++++
recipes/regression_tests/phase1/simple_netperf.py | 6 ++++++
recipes/regression_tests/phase2/3_vlans_over_team.py | 6 ++++++
recipes/regression_tests/phase2/team_test.py | 12 ++++++++++++
6 files changed, 42 insertions(+)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index 2b72f4e..d7783df 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -364,6 +364,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server_on_vlan', vlans[0])
result_sctp.set_parameter('netperf_client_on_vlan', vlans[0])
result_sctp.add_tag(product_name)
@@ -458,6 +461,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server_on_vlan', vlans[0])
result_sctp.set_parameter('netperf_client_on_vlan', vlans[0])
result_sctp.add_tag(product_name)
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py
index 8cf0e7e..3d453df 100644
--- a/recipes/regression_tests/phase1/3_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py
@@ -364,6 +364,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server_on_vlan', vlans[0])
result_sctp.set_parameter('netperf_client_on_vlan', vlans[0])
result_sctp.add_tag(product_name)
@@ -458,6 +461,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server_on_vlan', vlans[0])
result_sctp.set_parameter('netperf_client_on_vlan', vlans[0])
result_sctp.add_tag(product_name)
diff --git a/recipes/regression_tests/phase1/bonding_test.py b/recipes/regression_tests/phase1/bonding_test.py
index 324a455..cb3b85f 100644
--- a/recipes/regression_tests/phase1/bonding_test.py
+++ b/recipes/regression_tests/phase1/bonding_test.py
@@ -348,6 +348,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.add_tag(product_name)
if nperf_mode == "multi":
result_sctp.add_tag("multithreaded")
@@ -438,6 +441,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.add_tag(product_name)
if nperf_mode == "multi":
result_sctp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase1/simple_netperf.py b/recipes/regression_tests/phase1/simple_netperf.py
index e159953..1aad6f9 100644
--- a/recipes/regression_tests/phase1/simple_netperf.py
+++ b/recipes/regression_tests/phase1/simple_netperf.py
@@ -310,6 +310,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.add_tag(product_name)
if nperf_mode == "multi":
result_sctp.add_tag("multithreaded")
@@ -396,6 +399,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.add_tag(product_name)
if nperf_mode == "multi":
result_sctp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py
index c4ea8e8..bad0563 100644
--- a/recipes/regression_tests/phase2/3_vlans_over_team.py
+++ b/recipes/regression_tests/phase2/3_vlans_over_team.py
@@ -364,6 +364,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server_on_vlan', vlans[0])
result_sctp.set_parameter('netperf_client_on_vlan', vlans[0])
result_sctp.add_tag(product_name)
@@ -458,6 +461,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server_on_vlan', vlans[0])
result_sctp.set_parameter('netperf_client_on_vlan', vlans[0])
result_sctp.add_tag(product_name)
diff --git a/recipes/regression_tests/phase2/team_test.py b/recipes/regression_tests/phase2/team_test.py
index 2fa03dd..51da678 100644
--- a/recipes/regression_tests/phase2/team_test.py
+++ b/recipes/regression_tests/phase2/team_test.py
@@ -353,6 +353,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server', "testmachine1")
result_sctp.set_parameter('netperf_client', "testmachine2")
result_sctp.add_tag(product_name)
@@ -449,6 +452,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server', "testmachine1")
result_sctp.set_parameter('netperf_client', "testmachine2")
result_sctp.add_tag(product_name)
@@ -596,6 +602,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server', "testmachine2")
result_sctp.set_parameter('netperf_client', "testmachine1")
result_sctp.add_tag(product_name)
@@ -692,6 +701,9 @@ for setting in offload_settings:
for offload in setting:
result_sctp.set_parameter(offload[0], offload[1])
+ if nperf_msg_size is not None:
+ result_sctp.set_parameter("nperf_msg_size", nperf_msg_size)
+
result_sctp.set_parameter('netperf_server', "testmachine2")
result_sctp.set_parameter('netperf_client', "testmachine1")
result_sctp.add_tag(product_name)
--
2.9.5
6 years
[PATCH v2] pyrecipes: add TestRecipe base class and simple_netperf
by Kamil Jerabek
This commit adds class TestRecipe that contains all setup and methods, that
are in common for all our recipes.
It also contains simple_netperf test ported from recipes/regression_tests/phase1.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
* changes v2
* initial setup through physical interfaces
---
lnst/RecipeCommon/TestRecipe.py | 158 +++++++++++++++++++++++
recipes/examples/python_recipe_simple_netperf.py | 104 +++++++++++++++
2 files changed, 262 insertions(+)
create mode 100755 lnst/RecipeCommon/TestRecipe.py
create mode 100755 recipes/examples/python_recipe_simple_netperf.py
diff --git a/lnst/RecipeCommon/TestRecipe.py b/lnst/RecipeCommon/TestRecipe.py
new file mode 100755
index 0000000..05fc683
--- /dev/null
+++ b/lnst/RecipeCommon/TestRecipe.py
@@ -0,0 +1,158 @@
+#!/bin/python2
+
+import time
+import re
+
+from lnst.Common.Parameters import StrParam, IntParam, Param
+from lnst.Controller import BaseRecipe
+from lnst.Controller.Recipe import RecipeError
+
+from lnst.Tests.Netperf import Netperf
+
+from lnst.RecipeCommon.PerfRepo.PerfRepo import PerfRepoAPI
+from lnst.RecipeCommon.PerfRepo.PerfRepo import generate_perfrepo_comment
+from lnst.RecipeCommon.PerfRepo.PerfRepoUtils import netperf_baseline_template
+from lnst.RecipeCommon.PerfRepo.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+
+class TestRecipe(BaseRecipe):
+ ipv = StrParam(default="both")
+ mtu = IntParam(default=1500)
+
+ mapping_file = StrParam()
+ pr_user_comment = StrParam()
+
+ nperf_cpupin = IntParam()
+ nperf_reserve = IntParam(default=20)
+ nperf_mode = StrParam(default="default")
+
+ netperf_duration = IntParam(default=1)
+ netperf_confidence = StrParam(default="99,5")
+ netperf_runs = IntParam(default=5)
+ netperf_cpu_util = IntParam()
+ netperf_num_parallel = IntParam(default=2)
+ netperf_debug = IntParam(default=0)
+ netperf_max_deviation = Param(default={
+ 'type': 'percent',
+ 'value': 20})
+
+ test_if1 = Param()
+ test_if2 = Param()
+
+ def __init__(self, **kwargs):
+ super(TestRecipe, self).__init__(**kwargs)
+
+ if "mapping_file" in self.params:
+ self.perf_api = PerfRepoAPI(self.params.mapping_file)
+ self.perf_api.connect_PerfRepo()
+
+
+ def initial_setup(self):
+ machines = []
+
+ if "pr_user_comment" in self.params:
+ machines = [machines.append(m) for m in self.matched]
+
+ self.pr_comment = generate_perfrepo_comment(machines,
+ self.params.pr_user_comment)
+
+ if "nperf_cpupin" in self.params:
+ for m in self.matched:
+ m.run("service irqbalance stop")
+
+ for m in self.matched:
+ for d in m.devices:
+ if re.match(r'^eth[0-9]+$', d.name):
+ pin_dev_irqs(m, d, 0)
+
+
+ self.nperf_opts = ""
+
+ if "test_if2" in self.params:
+ self.nperf_opts = "-L %s" % (self.params.test_if2.ips[0])
+
+ if "nperf_cpupin" in self.params and self.params.netperf_mode != "multi":
+ self.nperf_opts += " -T%s,%s" % (self.params.netperf_cpupin,
+ self.params.netperf_cpupin)
+
+ self.nperf_opts6 = ""
+
+ if "test_if2" in self.params:
+ self.nperf_opts6 = "-L %s" % (self.params.test_if2.ips[1])
+
+ self.nperf_opts6 += " -6"
+
+ if "nperf_cpupin" in self.params and self.params.netperf_mode != "multi":
+ self.nperf_popts6 += " -T%s,%s" % (self.params.netperf_cpupin,
+ self.params.netperf_cpupin)
+
+ time.sleep(15)
+
+ def clean_setup(self):
+ if "nperf_cpupin" in self.params:
+ for m in self.matched:
+ m.run("service irqbalance start")
+
+ def generate_netperf_cli(self, dst_addr, testname):
+ kwargs = {}
+
+ for key, val in self.params:
+ param_name = re.split(r'netperf_', key)
+ if len(param_name) > 1:
+ kwargs[param_name[1]] = val
+
+ kwargs['server'] = dst_addr
+ kwargs['testname'] = testname
+
+ if str(dst_addr).find(":") is -1:
+ kwargs['opts'] = self.nperf_opts
+ else:
+ kwargs['opts'] = self.nperf_opts6
+
+ return Netperf(**kwargs)
+
+
+ def netperf_run(self, netserver, netperf, perfrepo_result=None):
+ srv_proc = self.matched.m1.run(netserver, bg=True)
+
+ if perfrepo_result:
+ if not hasattr(self, 'perf_api'):
+ raise RecipeError("no class variable called perf_api")
+
+
+ baseline = self.perf_api.get_baseline_of_result(perfrepo_result)
+ #TODO:
+ #netperf_baseline_template(netperf, baseline)
+
+ time.sleep(2)
+
+ res_data = self.matched.m2.run(netperf,
+ timeout = (
+ self.params.netperf_duration +
+ self.params.nperf_reserve) *
+ self.params.netperf_runs)
+
+ if perfrepo_result:
+ netperf_result_template(perfrepo_result, res_data)
+
+ if hasattr(self, 'pr_comment'):
+ perfrepo_result.set_comment(self.params.pr_comment)
+
+ self.perf_api.save_result(perfrepo_result)
+
+ srv_proc.kill(2)
+
+ return res_data, srv_proc
+
+ def network_setup(self):
+ pass
+
+ def core_test(self):
+ pass
+
+ def test(self):
+ self.network_setup()
+ self.initial_setup()
+ self.core_test()
+ self.clean_setup()
diff --git a/recipes/examples/python_recipe_simple_netperf.py b/recipes/examples/python_recipe_simple_netperf.py
new file mode 100755
index 0000000..aab1c5b
--- /dev/null
+++ b/recipes/examples/python_recipe_simple_netperf.py
@@ -0,0 +1,104 @@
+#!/bin/python2
+
+import time
+
+from lnst.Common.Parameters import StrParam
+from lnst.Common.IpAddress import ipaddress
+from lnst.Controller import Controller
+from lnst.Controller import HostReq, DeviceReq
+
+from lnst.Tests.Netperf import Netserver
+from lnst.RecipeCommon.TestRecipe import TestRecipe
+
+class SimpleNetperfRecipe(TestRecipe):
+ offloads = ["gro", "gso", "tso", "tx"]
+ offload_settings = [[("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "off")]]
+
+ mapping_file = StrParam(default="simple_netperf.mapping")
+ product_name = StrParam(default="RHEL7")
+
+ m1 = HostReq()
+ m1.eth0 = DeviceReq(label="net1")
+
+ m2 = HostReq()
+ m2.eth0 = DeviceReq(label="net1")
+
+ def network_setup(self):
+ self.matched.m1.eth0.ip_add(ipaddress("192.168.101.1/24"))
+ self.matched.m1.eth0.ip_add(ipaddress("fc00::1/64"))
+ self.matched.m1.eth0.up()
+
+ self.matched.m2.eth0.ip_add(ipaddress("192.168.101.2/24"))
+ self.matched.m2.eth0.ip_add(ipaddress("fc00::2/64"))
+ self.matched.m2.eth0.up()
+
+ def clean_setup(self):
+ super(SimpleNetperfRecipe, self).clean_setup()
+ #reset offload states
+ dev_features = ""
+ for offload in self.offloads:
+ dev_features += " %s %s" % (offload, "on")
+
+
+ self.matched.m1.run("ethtool -K %s %s" % (self.matched.m1.eth0.name,
+ dev_features))
+ self.matched.m2.run("ethtool -K %s %s" % (self.matched.m2.eth0.name,
+ dev_features))
+
+ def core_test(self):
+ ipv = ('ipv4', 'ipv6')
+ transport_type = [('tcp', 'TCP_STREAM'), ('udp', 'UDP_STREAM')]
+
+ for setting in self.offload_settings:
+ dev_features = ""
+
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+
+ self.matched.m1.run("ethtool -K %s %s" % (self.matched.m1.eth0.name,
+ dev_features))
+ self.matched.m2.run("ethtool -K %s %s" % (self.matched.m2.eth0.name,
+ dev_features))
+
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ time.sleep(15)
+
+
+ for ipver in ipv:
+ if self.params.ipv in [ipver, 'both']:
+ for ttype, ttype_name in transport_type:
+ result = self.perf_api.new_result(ttype+"_"+ipver+"_id",
+ ttype+"_"+ipver+"_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+
+ for offload in setting:
+ result.set_parameter(offload[0], offload[1])
+
+ result.add_tag(self.params.product_name)
+ if self.params.nperf_mode == "multi":
+ result.add_tag("multithreaded")
+ result.set_parameter("num_parallel",
+ self.params.nperf_num_parallel)
+
+ ip_num = 0 if ipver is 'ipv4' else 1
+ netperf = self.generate_netperf_cli(self.matched.m1.eth0.ips[ip_num],
+ ttype_name)
+
+ self.netperf_run(Netserver(bind=self.matched.m1.eth0.ips[ip_num]),
+ netperf,
+ result)
+ time.sleep(5)
+
+
+ctl = Controller(debug=1)
+
+r = SimpleNetperfRecipe()
+ctl.run(r, allow_virt=True)
--
2.5.5
6 years