This patch introduces new test for testing short-lived connections. The topology is the same as in regression-tests/phase1/simple_netperf test. It's two baremetal hosts, each with one NIC. Measurement tool is netperf, test types are TCP_RR and TCP_CRR. For both test types, the test is run with these Request/Response sizes: 1K, 5K, 7K, 10K, 12K.
The test does have working integration with PerfRepo and uses all aliases used in regression-tests/.
-- Changes in v2: * Moved the test to regression-tests/phase3/. * Added README file * Fixed undefined variables typo * Prettier module update * Removed redundant 15s wait
Signed-off-by: Jiri Prochazka jprochaz@redhat.com --- .../phase3/short_lived_connections.README | 69 +++++++++ .../phase3/short_lived_connections.py | 165 +++++++++++++++++++++ .../phase3/short_lived_connections.xml | 43 ++++++ 3 files changed, 277 insertions(+) create mode 100644 recipes/regression_tests/phase3/short_lived_connections.README create mode 100644 recipes/regression_tests/phase3/short_lived_connections.py create mode 100644 recipes/regression_tests/phase3/short_lived_connections.xml
diff --git a/recipes/regression_tests/phase3/short_lived_connections.README b/recipes/regression_tests/phase3/short_lived_connections.README new file mode 100644 index 0000000..594a348 --- /dev/null +++ b/recipes/regression_tests/phase3/short_lived_connections.README @@ -0,0 +1,69 @@ +Topology: + +--------+ + | | + +----------------------+ switch +----------------------+ + | | | | + | +--------+ | + | | + | | + | | + | | + +--+-+ +-+--+ ++-------|eth1|-------+ +-------|eth1|-------+ +| +----+ | | +----+ | +| | | | +| | | | +| | | | +| | | | +| host1 | | host2 | +| | | | +| | | | +| | | | +| | | | +| | | | ++--------------------+ +--------------------+ + + +Number of hosts: 2 +Host #1 description: + One ethernet device +Host #2 description: + One ethernet device +Test name: + simple_netperf.py + +Test description (short_lived_connections.py): + Netperf: + + duration: 60s + + TCP_RR and TCP_CRR + + between physical interfaces + Request/Response sizes: + + 1K, 5K, 7K, 10K, 12K + +PerfRepo integration: + First, preparation in PerfRepo is required - you need to create Test objects + through the web interface that properly describe the individual Netperf + tests that this recipe runs. Don't forget to also add appropriate metrics. + For these Netperf tests it's always: + * rr_rate + * rr_rate_min + * rr_rate_max + * rr_rate_deviation + + After that, to enable support for PerfRepo you need to create the file + short_lived_connections.mapping and define the following id mappings: + tcp_rr_id -> to store TCP_RR Netperf test results, maps to TestUid of a PerfRepo Test object + tcp_crr_id -> to store TCP_CRR Netperf test results, maps to TestUid of a PerfRepo Test object + + To enable result comparison against baselines you need to create a Report in + PerfRepo that will store the baseline. Set up the Report to only contain results + with the same hash tag and then add a new mapping to the mapping file, with + this format: + <some_hash> = <report_id> + + The hash value is automatically generated during test execution and added + to each result stored in PerfRepo. To get the Report id you need to open + that report in our browser and find it in the URL. + + When running this recipe you should also define the 'product_name' alias + (e.g. RHEL7) in order to tag the result object in PerfRepo. diff --git a/recipes/regression_tests/phase3/short_lived_connections.py b/recipes/regression_tests/phase3/short_lived_connections.py new file mode 100644 index 0000000..781fa5e --- /dev/null +++ b/recipes/regression_tests/phase3/short_lived_connections.py @@ -0,0 +1,165 @@ +from lnst.Controller.Task import ctl +from lnst.Controller.PerfRepoUtils import netperf_baseline_template +from lnst.Controller.PerfRepoUtils import netperf_result_template + +from lnst.RecipeCommon.IRQ import pin_dev_irqs +from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment + +# ------ +# SETUP +# ------ + +mapping_file = ctl.get_alias("mapping_file") +perf_api = ctl.connect_PerfRepo(mapping_file) + +product_name = ctl.get_alias("product_name") + +m1 = ctl.get_host("machine1") +m2 = ctl.get_host("machine2") + +m1.sync_resources(modules=["Netperf"]) +m2.sync_resources(modules=["Netperf"]) + +# ------ +# TESTS +# ------ + +mtu = ctl.get_alias("mtu") +netperf_duration = int(ctl.get_alias("netperf_duration")) +nperf_reserve = int(ctl.get_alias("nperf_reserve")) +nperf_confidence = ctl.get_alias("nperf_confidence") +nperf_max_runs = int(ctl.get_alias("nperf_max_runs")) +nperf_cpupin = ctl.get_alias("nperf_cpupin") +nperf_cpu_util = ctl.get_alias("nperf_cpu_util") +nperf_mode = ctl.get_alias("nperf_mode") +nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) +nperf_debug = ctl.get_alias("nperf_debug") +nperf_max_dev = ctl.get_alias("nperf_max_dev") +pr_user_comment = ctl.get_alias("perfrepo_comment") + +m1_testiface = m1.get_interface("testiface") +m2_testiface = m2.get_interface("testiface") + +m1_testiface.set_mtu(mtu) +m2_testiface.set_mtu(mtu) + +pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment) + +if nperf_cpupin: + m1.run("service irqbalance stop") + m2.run("service irqbalance stop") + + for m, d in [ (m1, m1_testiface), (m2, m2_testiface) ]: + pin_dev_irqs(m, d, 0) + +p_opts = "-L %s" % (m2_testiface.get_ip(0)) + +if nperf_cpupin and nperf_mode != "multi": + p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin) + +netperf_cli_tcp_rr = ctl.get_module("Netperf", + options={ + "role" : "client", + "netperf_server" : m1_testiface.get_ip(0), + "duration" : netperf_duration, + "testname" : "TCP_RR", + "confidence" : nperf_confidence, + "cpu_util" : nperf_cpu_util, + "runs" : nperf_max_runs, + "netperf_opts" : p_opts, + "max_deviation" : nperf_max_dev, + "debug" : nperf_debug, + }) + + +netperf_cli_tcp_crr = ctl.get_module("Netperf", + options={ + "role" : "client", + "netperf_server" : m1_testiface.get_ip(0), + "duration" : netperf_duration, + "testname" : "TCP_CRR", + "confidence" : nperf_confidence, + "cpu_util" : nperf_cpu_util, + "runs" : nperf_max_runs, + "netperf_opts" : p_opts, + "max_deviation" : nperf_max_dev, + "debug" : nperf_debug, + }) + +netperf_srv = ctl.get_module("Netperf", + options={ + "role" : "server", + "bind" : m1_testiface.get_ip(0) + }) + +for size in ["1K,1K", "5K,5K", "7K,7K", "10K,10K", "12K,12K"]: + + netperf_cli_tcp_rr.update_options({"testoptions": "-r %s" % size}) + netperf_cli_tcp_crr.update_options({"testoptions": "-r %s" % size}) + + if nperf_mode == "multi": + netperf_cli_tcp_rr.unset_option("confidence") + netperf_cli_tcp_crr.unset_option("confidence") + + netperf_cli_tcp_rr.update_options({"num_parallel": nperf_num_parallel}) + netperf_cli_tcp_crr.update_options({"num_parallel": nperf_num_parallel}) + + # we have to use multiqueue qdisc to get appropriate data + m1.run("tc qdisc replace dev %s root mq" % m1_testiface.get_devname()) + m2.run("tc qdisc replace dev %s root mq" % m2_testiface.get_devname()) + + # Netperf test + srv_proc = m1.run(netperf_srv, bg=True) + ctl.wait(2) + + # prepare PerfRepo result for tcp_rr + result_tcp_rr = perf_api.new_result("tcp_rr_id", + "tcp_rr_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + result_tcp_rr.add_tag(product_name) + if nperf_mode == "multi": + result_tcp_rr.add_tag("multithreaded") + result_tcp_rr.set_parameter('num_parallel', nperf_num_parallel) + + result_tcp_rr.set_parameter("rr_size", size) + + baseline = perf_api.get_baseline_of_result(result_tcp_rr) + netperf_baseline_template(netperf_cli_tcp_rr, baseline, test_type="RR") + + tcp_rr_res_data = m2.run(netperf_cli_tcp_rr, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_tcp_rr, tcp_rr_res_data, test_type="RR") + result_tcp_rr.set_comment(pr_comment) + perf_api.save_result(result_tcp_rr) + + # prepare PerfRepo result for tcp_crr + result_tcp_crr = perf_api.new_result("tcp_crr_id", + "tcp_crr_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + result_tcp_crr.add_tag(product_name) + if nperf_mode == "multi": + result_tcp_crr.add_tag("multithreaded") + result_tcp_crr.set_parameter('num_parallel', nperf_num_parallel) + + result_tcp_crr.set_parameter("rr_size", size) + + baseline = perf_api.get_baseline_of_result(result_tcp_crr) + netperf_baseline_template(netperf_cli_tcp_crr, baseline, test_type="RR") + + tcp_crr_res_data = m2.run(netperf_cli_tcp_crr, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_tcp_crr, tcp_crr_res_data, test_type="RR") + result_tcp_crr.set_comment(pr_comment) + perf_api.save_result(result_tcp_crr) + + srv_proc.intr() + +if nperf_cpupin: + m1.run("service irqbalance start") + m2.run("service irqbalance start") diff --git a/recipes/regression_tests/phase3/short_lived_connections.xml b/recipes/regression_tests/phase3/short_lived_connections.xml new file mode 100644 index 0000000..289205e --- /dev/null +++ b/recipes/regression_tests/phase3/short_lived_connections.xml @@ -0,0 +1,43 @@ +<lnstrecipe> + <define> + <alias name="mtu" value="1500" /> + <alias name="netperf_duration" value="60" /> + <alias name="nperf_reserve" value="20" /> + <alias name="nperf_confidence" value="99,5" /> + <alias name="nperf_max_runs" value="5" /> + <alias name="nperf_mode" value="default"/> + <alias name="nperf_num_parallel" value="2"/> + <alias name="nperf_debug" value="0"/> + <alias name="nperf_max_dev" value="20%"/> + <alias name="mapping_file" value="short_lived_connections.mapping" /> + <alias name="net" value="192.168.101" /> + <alias name="driver" value="ixgbe" /> + </define> + <network> + <host id="machine1"> + <interfaces> + <eth id="testiface" label="testnet"> + <params> + <param name="driver" value="{$driver}"/> + </params> + <addresses> + <address>{$net}.10/24</address> + </addresses> + </eth> + </interfaces> + </host> + <host id="machine2"> + <interfaces> + <eth id="testiface" label="testnet"> + <params> + <param name="driver" value="{$driver}"/> + </params> + <addresses> + <address>{$net}.11/24</address> + </addresses> + </eth> + </interfaces> + </host> + </network> + <task python="short_lived_connections.py"/> +</lnstrecipe>