This patch adds new test to phase1 of ENRT. The topology is
the same as in ping_flood test, but instead of ping, only
netperf is run.
This tests covers raw performance of ethernet, without any extra layer
like VLAN, bond, team, etc.
Signed-off-by: Jiri Prochazka <jprochaz(a)redhat.com>
---
.../regression_tests/phase1/simple_netperf.README | 33 +++
recipes/regression_tests/phase1/simple_netperf.py | 246 +++++++++++++++++++++
recipes/regression_tests/phase1/simple_netperf.xml | 37 ++++
3 files changed, 316 insertions(+)
create mode 100644 recipes/regression_tests/phase1/simple_netperf.README
create mode 100644 recipes/regression_tests/phase1/simple_netperf.py
create mode 100644 recipes/regression_tests/phase1/simple_netperf.xml
diff --git a/recipes/regression_tests/phase1/simple_netperf.README b/recipes/regression_tests/phase1/simple_netperf.README
new file mode 100644
index 0000000..6d04821
--- /dev/null
+++ b/recipes/regression_tests/phase1/simple_netperf.README
@@ -0,0 +1,33 @@
+Topology:
+ +--------+
+ | |
+ +----------------------+ switch +----------------------+
+ | | | |
+ | +--------+ |
+ | |
+ | |
+ | |
+ | |
+ +--+-+ +-+--+
++-------|eth1|-------+ +-------|eth1|-------+
+| +----+ | | +----+ |
+| | | |
+| | | |
+| | | |
+| | | |
+| host1 | | host2 |
+| | | |
+| | | |
+| | | |
+| | | |
+| | | |
++--------------------+ +--------------------+
+
+
+Number of hosts: 2
+Host #1 description:
+ One ethernet device
+Host #2 description:
+ One ethernet device
+Test name:
+ simple_netperf.py
diff --git a/recipes/regression_tests/phase1/simple_netperf.py b/recipes/regression_tests/phase1/simple_netperf.py
new file mode 100644
index 0000000..a8ca07b
--- /dev/null
+++ b/recipes/regression_tests/phase1/simple_netperf.py
@@ -0,0 +1,246 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("machine1")
+m2 = ctl.get_host("machine2")
+
+m1.sync_resources(modules=["Netperf"])
+m2.sync_resources(modules=["Netperf"])
+
+# ------
+# TESTS
+# ------
+
+offloads = ["gro", "gso", "tso", "tx"]
+offload_settings = [ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off")]]
+
+ipv = ctl.get_alias("ipv")
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+
+m1_testiface = m1.get_interface("testiface")
+m2_testiface = m2.get_interface("testiface")
+
+m1_testiface.set_mtu(mtu)
+m2_testiface.set_mtu(mtu)
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+
+ for m, d in [ (m1, m1_testiface), (m2, m2_testiface) ]:
+ pin_dev_irqs(m, d, 0)
+
+ctl.wait(15)
+
+p_opts = "-L %s" % (m2_testiface.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+netperf_cli_tcp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : m1_testiface.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts
+ })
+netperf_cli_tcp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : m1_testiface.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" :
+ "-L %s -6" % (m2_testiface.get_ip(1))
+ })
+netperf_cli_udp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : m1_testiface.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts
+ })
+netperf_cli_udp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : m1_testiface.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" :
+ "-L %s -6" % (m2_testiface.get_ip(1))
+ })
+
+netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : m1_testiface.get_ip(0)
+ })
+
+netperf_srv6 = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : m1_testiface.get_ip(1)
+ })
+
+if nperf_mode == "multi":
+ netperf_cli_tcp.unset_option("confidence")
+ netperf_cli_udp.unset_option("confidence")
+ netperf_cli_tcp6.unset_option("confidence")
+ netperf_cli_udp6.unset_option("confidence")
+
+ netperf_cli_tcp.update_options({"num_parallel" : nperf_num_parallel})
+ netperf_cli_udp.update_options({"num_parallel" : nperf_num_parallel})
+ netperf_cli_tcp6.update_options({"num_parallel" : nperf_num_parallel})
+ netperf_cli_udp6.update_options({"num_parallel" : nperf_num_parallel})
+
+ctl.wait(15)
+
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ m1.run("ethtool -K %s %s" % (m1_testiface.get_devname(), dev_features))
+ m2.run("ethtool -K %s %s" % (m2_testiface.get_devname(), dev_features))
+
+ # Netperf test
+ if ipv in [ 'ipv4', 'both' ]:
+ srv_proc = m1.run(netperf_srv, bg=True)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ srv_proc = m1.run(netperf_srv6, bg=True)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+ srv_proc.intr()
+
+# reset offload states
+dev_features = ""
+for offload in offloads:
+ dev_features += " %s %s" % (offload, "on")
+
+m1.run("ethtool -K %s %s" % (m1_testiface.get_devname(), dev_features))
+m2.run("ethtool -K %s %s" % (m2_testiface.get_devname(), dev_features))
+
+if nperf_cpupin:
+ m1.run("service irqbalance start")
+ m2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase1/simple_netperf.xml b/recipes/regression_tests/phase1/simple_netperf.xml
new file mode 100644
index 0000000..2123b3e
--- /dev/null
+++ b/recipes/regression_tests/phase1/simple_netperf.xml
@@ -0,0 +1,37 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="mtu" value="1500" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5" />
+ <alias name="nperf_mode" value="default" />
+ <alias name="nperf_num_parallel" value="2" />
+ <alias name="mapping_file" value="simple_netperf.mapping" />
+ <alias name="net" value="192.168.101" />
+ </define>
+ <network>
+ <host id="machine1">
+ <interfaces>
+ <eth id="testiface" label="testnet">
+ <addresses>
+ <address>{$net}.10/24</address>
+ <address>2002::1/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="machine2">
+ <interfaces>
+ <eth id="testiface" label="testnet">
+ <addresses>
+ <address>{$net}.11/24</address>
+ <address>2002::2/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+ <task python="simple_netperf.py"/>
+</lnstrecipe>
--
2.4.3