Thu, Jun 01, 2017 at 04:16:02PM CEST, jprochaz(a)redhat.com wrote:
This patch introduces new test for testing short-lived connections.
The
topology is the same as in regression-tests/phase1/simple_netperf test.
It's two baremetal hosts, each with one NIC. Measurement tool is
netperf, test types are TCP_RR and TCP_CRR. For both test types, the
test is run with these Request/Response sizes: 1K, 5K, 7K, 10K, 12K.
The test does have working integration with PerfRepo and uses all
aliases used in regression-tests/.
Signed-off-by: Jiri Prochazka <jprochaz(a)redhat.com>
---
.../short_lived_connections.py | 169 +++++++++++++++++++++
.../short_lived_connections.xml | 43 ++++++
2 files changed, 212 insertions(+)
create mode 100644 recipes/short-lived-connections/short_lived_connections.py
create mode 100644 recipes/short-lived-connections/short_lived_connections.xml
diff --git a/recipes/short-lived-connections/short_lived_connections.py
b/recipes/short-lived-connections/short_lived_connections.py
new file mode 100644
index 0000000..0b07f61
--- /dev/null
+++ b/recipes/short-lived-connections/short_lived_connections.py
As we discussed in private mail, move this to one of regression_tests/phaseX
directories.
@@ -0,0 +1,169 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("machine1")
+m2 = ctl.get_host("machine2")
+
+m1.sync_resources(modules=["Netperf"])
+m2.sync_resources(modules=["Netperf"])
+
+# ------
+# TESTS
+# ------
+
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+
+m1_testiface = m1.get_interface("testiface")
+m2_testiface = m2.get_interface("testiface")
+
+m1_testiface.set_mtu(mtu)
+m2_testiface.set_mtu(mtu)
+
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+
+ for m, d in [ (m1, m1_testiface), (m2, m2_testiface) ]:
+ pin_dev_irqs(m, d, 0)
+
+p_opts = "-L %s" % (m2_testiface.get_ip(0))
+
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+for size in ["1K,1K", "5K,5K", "7K,7K",
"10K,10K", "12K,12K"]:
+
+ n_p_opts = p_opts
+
+ netperf_cli_tcp_rr = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
m1_testiface.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_RR",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : n_p_opts,
+ "max_deviation" : nperf_max_dev,
+ "debug" : nperf_debug,
+ "testoptions" : "-r %s" %
size
+ })
Please use following:
mod = ctl.get_module() # before loop
for size in ...:
mod.update_options({"testoptions": ...})
+
+
+ netperf_cli_tcp_crr = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
m1_testiface.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_CRR",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : n_p_opts,
+ "max_deviation" : nperf_max_dev,
+ "debug" : nperf_debug,
+ "testoptions" : "-r %s" %
size
+ })
+
+
+ netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : m1_testiface.get_ip(0)
+ })
+
+ if nperf_mode == "multi":
+ netperf_cli_tcp_rr.unset_option("confidence")
+ netperf_cli_tcp_crr.unset_option("confidence")
+
+ netperf_cli_tcp_rr.update_options({"num_parallel":
nperf_num_parallel})
+ netperf_cli_tcp_crr.update_options({"num_parallel":
nperf_num_parallel})
+
+ # we have to use multiqueue qdisc to get appropriate data
+ m1.run("tc qdisc replace dev %s root mq" % m1_phy1.get_devname())
+ m2.run("tc qdisc replace dev %s root mq" % m2_phy1.get_devname())
m1_phy1 and m2_phy1 undefined! use pylint, Luke! ;-)
$ pylint -E recipes/short-lived-connections/short_lived_connections.py
No config file found, using default configuration
************* Module short_lived_connections
E:110,51: Undefined variable 'm1_phy1' (undefined-variable)
E:111,51: Undefined variable 'm2_phy1' (undefined-variable)
+
+ ctl.wait(15)
why here? this will add 15 secs to every iteration.
>+
>+ # Netperf test
>+ srv_proc = m1.run(netperf_srv, bg=True)
>+ ctl.wait(2)
>+
>+ # prepare PerfRepo result for tcp_rr
>+ result_tcp_rr = perf_api.new_result("tcp_rr_id",
>+ "tcp_rr_result",
>+ hash_ignore=[
>+ 'kernel_release',
>+ 'redhat_release'])
>+ result_tcp_rr.add_tag(product_name)
>+ if nperf_mode == "multi":
>+ result_tcp_rr.add_tag("multithreaded")
>+ result_tcp_rr.set_parameter('num_parallel', nperf_num_parallel)
>+
>+ result_tcp_rr.set_parameter("rr_size", size)
>+
>+ baseline = perf_api.get_baseline_of_result(result_tcp_rr)
>+ netperf_baseline_template(netperf_cli_tcp_rr, baseline,
test_type="RR")
>+
>+ tcp_rr_res_data = m2.run(netperf_cli_tcp_rr,
>+ timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>+
>+ netperf_result_template(result_tcp_rr, tcp_rr_res_data,
test_type="RR")
>+ result_tcp_rr.set_comment(pr_comment)
>+ perf_api.save_result(result_tcp_rr)
>+
>+ # prepare PerfRepo result for tcp_crr
>+ result_tcp_crr = perf_api.new_result("tcp_crr_id",
>+ "tcp_crr_result",
>+ hash_ignore=[
>+ 'kernel_release',
>+ 'redhat_release'])
>+ result_tcp_crr.add_tag(product_name)
>+ if nperf_mode == "multi":
>+ result_tcp_crr.add_tag("multithreaded")
>+ result_tcp_crr.set_parameter('num_parallel', nperf_num_parallel)
>+
>+ result_tcp_crr.set_parameter("rr_size", size)
>+
>+ baseline = perf_api.get_baseline_of_result(result_tcp_crr)
>+ netperf_baseline_template(netperf_cli_tcp_crr, baseline,
test_type="RR")
>+
>+ tcp_crr_res_data = m2.run(netperf_cli_tcp_crr,
>+ timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>+
>+ netperf_result_template(result_tcp_crr, tcp_crr_res_data,
test_type="RR")
>+ result_tcp_crr.set_comment(pr_comment)
>+ perf_api.save_result(result_tcp_crr)
>+
>+ srv_proc.intr()
>+
>+if nperf_cpupin:
>+ m1.run("service irqbalance start")
>+ m2.run("service irqbalance start")
>diff --git a/recipes/short-lived-connections/short_lived_connections.xml
b/recipes/short-lived-connections/short_lived_connections.xml
>new file mode 100644
>index 0000000..289205e
>--- /dev/null
>+++ b/recipes/short-lived-connections/short_lived_connections.xml
>@@ -0,0 +1,43 @@
>+<lnstrecipe>
>+ <define>
>+ <alias name="mtu" value="1500" />
>+ <alias name="netperf_duration" value="60" />
>+ <alias name="nperf_reserve" value="20" />
>+ <alias name="nperf_confidence" value="99,5" />
>+ <alias name="nperf_max_runs" value="5" />
>+ <alias name="nperf_mode" value="default"/>
>+ <alias name="nperf_num_parallel" value="2"/>
>+ <alias name="nperf_debug" value="0"/>
>+ <alias name="nperf_max_dev" value="20%"/>
>+ <alias name="mapping_file"
value="short_lived_connections.mapping" />
>+ <alias name="net" value="192.168.101" />
>+ <alias name="driver" value="ixgbe" />
>+ </define>
>+ <network>
>+ <host id="machine1">
>+ <interfaces>
>+ <eth id="testiface" label="testnet">
>+ <params>
>+ <param name="driver"
value="{$driver}"/>
>+ </params>
>+ <addresses>
>+ <address>{$net}.10/24</address>
>+ </addresses>
>+ </eth>
>+ </interfaces>
>+ </host>
>+ <host id="machine2">
>+ <interfaces>
>+ <eth id="testiface" label="testnet">
>+ <params>
>+ <param name="driver"
value="{$driver}"/>
>+ </params>
>+ <addresses>
>+ <address>{$net}.11/24</address>
>+ </addresses>
>+ </eth>
>+ </interfaces>
>+ </host>
>+ </network>
>+ <task python="short_lived_connections.py"/>
>+</lnstrecipe>
>--
>2.9.4
>_______________________________________________
>LNST-developers mailing list -- lnst-developers(a)lists.fedorahosted.org
>To unsubscribe send an email to lnst-developers-leave(a)lists.fedorahosted.org