There's a typo, I'll send v2 next week and run the tests again
2017-01-05 19:24 GMT+01:00 Jiri Prochazka <jprochaz(a)redhat.com>:
> This patch was sent here for review first, I'll run the tests in
> Beaker to verify nothing is broken before I ask for push upstream.
>
> Thanks,
> Jiri
>
> 2017-01-05 19:23 GMT+01:00 Jiri Prochazka <jprochaz(a)redhat.com>:
>> Alias nperf_msg_Size was missing in all of phase3 recipes, this patch adds it
and
>> also reduces repeated code a bit - client_opts are defined once and only
>> updated before every netperf executions.
>>
>> Signed-off-by: Jiri Prochazka <jprochaz(a)redhat.com>
>> ---
>> .../regression_tests/phase3/2_virt_ovs_vxlan.py | 80 +++++++++++-----------
>> .../regression_tests/phase3/novirt_ovs_vxlan.py | 80 +++++++++++-----------
>> recipes/regression_tests/phase3/simple_macsec.py | 80 +++++++++++-----------
>> recipes/regression_tests/phase3/vxlan_multicast.py | 80 +++++++++++-----------
>> recipes/regression_tests/phase3/vxlan_remote.py | 80 +++++++++++-----------
>> 5 files changed, 200 insertions(+), 200 deletions(-)
>>
>> diff --git a/recipes/regression_tests/phase3/2_virt_ovs_vxlan.py
b/recipes/regression_tests/phase3/2_virt_ovs_vxlan.py
>> index 5ebaacc..cc1a0a8 100644
>> --- a/recipes/regression_tests/phase3/2_virt_ovs_vxlan.py
>> +++ b/recipes/regression_tests/phase3/2_virt_ovs_vxlan.py
>> @@ -43,6 +43,7 @@ nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
>> nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
>> nperf_debug = ctl.get_alias("nperf_debug")
>> nperf_max_dev = ctl.get_alias("nperf_max_dev")
>> +nperf_msg_size = ctl.get_alias("nperf_msg_size")
>> pr_user_comment = ctl.get_alias("perfrepo_comment")
>>
>> pr_comment = generate_perfrepo_comment([guest1, guest2, guest3, guest4],
>> @@ -124,6 +125,19 @@ if ipv in ['ipv6', 'both']:
>> (guest4, g4_nic, 1, {"scope": 0}),
>> options=ping_opts, expect="fail")
>>
>> +client_opts = {"duration" : netperf_duration,
>> + "testname" : "TCP_STREAM",
>> + "confidence" : nperf_confidence,
>> + "num_parallel" : nperf_num_parallel,
>> + "cpu_util" : nperf_cpu_util,
>> + "runs": nperf_max_runs,
>> + "netperf_opts": nperf_opts,
>> + "debug": nperf_debug,
>> + "max_deviation": nperf_max_dev}
>> +
>> +if nperf_msg_size is not None:
>> + client_opts["msg_size"] = nperf_msg_size
>> +
>>
>> if ipv in [ 'ipv4', 'both' ]:
>> # prepare PerfRepo result for tcp
>> @@ -142,20 +156,16 @@ if ipv in [ 'ipv4', 'both' ]:
>> result_tcp.add_tag("multithreaded")
>> result_tcp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> +
>> tcp_res_data = netperf((guest1, g1_nic, 0), (guest3, g3_nic, 0),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "netperf_opts": nperf_opts,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -178,20 +188,16 @@ if ipv in [ 'ipv4', 'both' ]:
>> result_udp.add_tag("multithreaded")
>> result_udp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "UDP_STREAM"
>> +
>> udp_res_data = netperf((guest1, g1_nic, 0), (guest3, g3_nic, 0),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "netperf_opts": nperf_opts,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> @@ -214,20 +220,17 @@ if ipv in [ 'ipv6', 'both' ]:
>> result_tcp.add_tag("multithreaded")
>> result_tcp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> tcp_res_data = netperf((guest1, g1_nic, 1), (guest3, g3_nic, 1),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "netperf_opts" : nperf_opts +
"-6",
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -250,20 +253,17 @@ if ipv in [ 'ipv6', 'both' ]:
>> result_udp.add_tag("multithreaded")
>> result_udp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "UDP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> udp_res_data = netperf((guest1, g1_nic, 1), (guest3, g3_nic, 1),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "netperf_opts" : nperf_opts +
"-6",
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> diff --git a/recipes/regression_tests/phase3/novirt_ovs_vxlan.py
b/recipes/regression_tests/phase3/novirt_ovs_vxlan.py
>> index 5419ad9..b283543 100644
>> --- a/recipes/regression_tests/phase3/novirt_ovs_vxlan.py
>> +++ b/recipes/regression_tests/phase3/novirt_ovs_vxlan.py
>> @@ -37,6 +37,7 @@ nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
>> nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
>> nperf_debug = ctl.get_alias("nperf_debug")
>> nperf_max_dev = ctl.get_alias("nperf_max_dev")
>> +nperf_msg_size = ctl.get_alias("nperf_msg_size")
>> pr_user_comment = ctl.get_alias("perfrepo_comment")
>>
>> pr_comment = generate_perfrepo_comment([h1, h2], pr_user_comment)
>> @@ -73,6 +74,19 @@ if ipv in [ 'ipv6', 'both' ]:
>> (h2, h2_nic, 1, {"scope": 0}),
>> options=ping_opts)
>>
>> +client_opts = {"duration" : netperf_duration,
>> + "testname" : "TCP_STREAM",
>> + "confidence" : nperf_confidence,
>> + "num_parallel" : nperf_num_parallel,
>> + "cpu_util" : nperf_cpu_util,
>> + "runs": nperf_max_runs,
>> + "netperf_opts": nperf_opts,
>> + "debug": nperf_debug,
>> + "max_deviation": nperf_max_dev}
>> +
>> +if nperf_msg_size is not None:
>> + client_opts["msg_size"] = nperf_msg_size
>> +
>> #netperfs
>> if ipv in [ 'ipv4', 'both' ]:
>> ctl.wait(2)
>> @@ -89,21 +103,17 @@ if ipv in [ 'ipv4', 'both' ]:
>> result_tcp.add_tag("multithreaded")
>> result_tcp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> +
>> tcp_res_data = netperf((h1, h1_nic, 0, {"scope": 0}),
>> (h2, h2_nic, 0, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts": nperf_opts},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -122,21 +132,17 @@ if ipv in [ 'ipv4', 'both' ]:
>> result_udp.add_tag("multithreaded")
>> result_udp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> +
>> udp_res_data = netperf((h1, h1_nic, 0, {"scope": 0}),
>> (h2, h2_nic, 0, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts": nperf_opts},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> @@ -157,21 +163,18 @@ if ipv in [ 'ipv6', 'both' ]:
>> result_tcp.add_tag("multithreaded")
>> result_tcp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> tcp_res_data = netperf((h1, h1_nic, 1, {"scope": 0}),
>> (h2, h2_nic, 1, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts" : nperf_opts +
"-6"},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -190,21 +193,18 @@ if ipv in [ 'ipv6', 'both' ]:
>> result_udp.add_tag("multithreaded")
>> result_udp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "UDP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> udp_res_data = netperf((h1, h1_nic, 1, {"scope": 0}),
>> (h2, h2_nic, 1, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts" : nperf_opts +
"-6"},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> diff --git a/recipes/regression_tests/phase3/simple_macsec.py
b/recipes/regression_tests/phase3/simple_macsec.py
>> index b16214b..440e976 100644
>> --- a/recipes/regression_tests/phase3/simple_macsec.py
>> +++ b/recipes/regression_tests/phase3/simple_macsec.py
>> @@ -36,6 +36,7 @@ nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
>> nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
>> nperf_debug = ctl.get_alias("nperf_debug")
>> nperf_max_dev = ctl.get_alias("nperf_max_dev")
>> +nperf_msg_size = ctl.get_alias("nperf_msg_size")
>> pr_user_comment = ctl.get_alias("perfrepo_comment")
>>
>> pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
>> @@ -118,6 +119,19 @@ ping((m1, m1_phy, 0, {"scope": 0}),
>>
>> ctl.wait(2)
>>
>> +client_opts = {"duration" : netperf_duration,
>> + "testname" : "TCP_STREAM",
>> + "confidence" : nperf_confidence,
>> + "num_parallel" : nperf_num_parallel,
>> + "cpu_util" : nperf_cpu_util,
>> + "runs": nperf_max_runs,
>> + "netperf_opts": nperf_opts,
>> + "debug": nperf_debug,
>> + "max_deviation": nperf_max_dev}
>> +
>> +if nperf_msg_size is not None:
>> + client_opts["msg_size"] = nperf_msg_size
>> +
>> for setting in encryption_settings:
>> #macsec setup
>> macsecSetup(setting)
>> @@ -145,21 +159,17 @@ for setting in encryption_settings:
>>
>> result_tcp.set_parameter('encryption', setting)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size",
nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> +
>> tcp_res_data = netperf((m1, m1_tif, 0, {"scope": 0}),
>> (m2, m2_tif, 0, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" :
nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts":
nperf_opts},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -179,21 +189,17 @@ for setting in encryption_settings:
>>
>> result_udp.set_parameter('encryption', setting)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size",
nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "UDP_STREAM"
>> +
>> udp_res_data = netperf((m1, m1_tif, 0, {"scope": 0}),
>> (m2, m2_tif, 0, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" :
nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts":
nperf_opts},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> @@ -221,21 +227,18 @@ for setting in encryption_settings:
>>
>> result_tcp.set_parameter('encryption', setting)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size",
nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> tcp_res_data = netperf((m1, m1_tif, 1, {"scope": 0}),
>> (m2, m2_tif, 1, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" :
nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts" :
nperf_opts + " -6"},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -255,21 +258,18 @@ for setting in encryption_settings:
>>
>> result_udp.set_parameter('encryption', setting)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size",
nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "UDP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> udp_res_data = netperf((m1, m1_tif, 1, {"scope": 0}),
>> (m2, m2_tif, 1, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" :
nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts" :
nperf_opts + "-6"},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> diff --git a/recipes/regression_tests/phase3/vxlan_multicast.py
b/recipes/regression_tests/phase3/vxlan_multicast.py
>> index 0c29977..d333480 100644
>> --- a/recipes/regression_tests/phase3/vxlan_multicast.py
>> +++ b/recipes/regression_tests/phase3/vxlan_multicast.py
>> @@ -38,6 +38,7 @@ nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
>> nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
>> nperf_debug = ctl.get_alias("nperf_debug")
>> nperf_max_dev = ctl.get_alias("nperf_max_dev")
>> +nperf_msg_size = ctl.get_alias("nperf_msg_size")
>> pr_user_comment = ctl.get_alias("perfrepo_comment")
>>
>> pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
>> @@ -97,6 +98,19 @@ for x in ipv6_endpoints:
>> for i in ipv6_pings:
>> i.wait()
>>
>> +client_opts = {"duration" : netperf_duration,
>> + "testname" : "TCP_STREAM",
>> + "confidence" : nperf_confidence,
>> + "num_parallel" : nperf_num_parallel,
>> + "cpu_util" : nperf_cpu_util,
>> + "runs": nperf_max_runs,
>> + "netperf_opts": nperf_opts,
>> + "debug": nperf_debug,
>> + "max_deviation": nperf_max_dev}
>> +
>> +if nperf_msg_size is not None:
>> + client_opts["msg_size"] = nperf_msg_size
>> +
>> ctl.wait(2)
>> if ipv in [ 'ipv4', 'both' ]:
>> # prepare PerfRepo result for tcp
>> @@ -114,21 +128,17 @@ if ipv in [ 'ipv4', 'both' ]:
>> result_tcp.add_tag("multithreaded")
>> result_tcp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> +
>> tcp_res_data = netperf((m1, test_if1, 0, {"scope": 0}),
>> (m2, test_if2, 0, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts": nperf_opts},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -150,21 +160,17 @@ if ipv in [ 'ipv4', 'both' ]:
>> result_udp.add_tag("multithreaded")
>> result_udp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "UDP_STREAM"
>> +
>> udp_res_data = netperf((m1, test_if1, 0, {"scope": 0}),
>> (m2, test_if2, 0, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts": nperf_opts},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> @@ -187,21 +193,18 @@ if ipv in [ 'ipv6', 'both' ]:
>> result_tcp.add_tag("multithreaded")
>> result_tcp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> tcp_res_data = netperf((m1, test_if1, 1, {"scope": 0}),
>> (m2, test_if2, 1, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts" : nperf_opts +
" -6"},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -223,21 +226,18 @@ if ipv in [ 'ipv6', 'both' ]:
>> result_udp.add_tag("multithreaded")
>> result_udp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "UDP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> udp_res_data = netperf((m1, test_if1, 1, {"scope": 0}),
>> (m2, test_if2, 1, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts" : nperf_opts +
"-6"},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> diff --git a/recipes/regression_tests/phase3/vxlan_remote.py
b/recipes/regression_tests/phase3/vxlan_remote.py
>> index 12050d9..cf88444 100644
>> --- a/recipes/regression_tests/phase3/vxlan_remote.py
>> +++ b/recipes/regression_tests/phase3/vxlan_remote.py
>> @@ -37,6 +37,7 @@ nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
>> nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
>> nperf_debug = ctl.get_alias("nperf_debug")
>> nperf_max_dev = ctl.get_alias("nperf_max_dev")
>> +nperf_msg_size = ctl.get_alias("nperf_msg_size")
>> pr_user_comment = ctl.get_alias("perfrepo_comment")
>>
>> pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
>> @@ -66,6 +67,19 @@ ctl.wait(15)
>>
>> ping_opts = {"count": 100, "interval": 0.1}
>>
>> +client_opts = {"duration" : netperf_duration,
>> + "testname" : "TCP_STREAM",
>> + "confidence" : nperf_confidence,
>> + "num_parallel" : nperf_num_parallel,
>> + "cpu_util" : nperf_cpu_util,
>> + "runs": nperf_max_runs,
>> + "netperf_opts": nperf_opts,
>> + "debug": nperf_debug,
>> + "max_deviation": nperf_max_dev}
>> +
>> +if nperf_msg_size is not None:
>> + client_opts["msg_size"] = nperf_msg_size
>> +
>> if ipv in [ 'ipv4', 'both' ]:
>> ping((m1, test_if1, 0, {"scope": 0}),
>> (m2, test_if2, 0, {"scope": 0}),
>> @@ -85,21 +99,17 @@ if ipv in [ 'ipv4', 'both' ]:
>> result_tcp.add_tag("multithreaded")
>> result_tcp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> +
>> tcp_res_data = netperf((m1, test_if1, 0, {"scope": 0}),
>> (m2, test_if2, 0, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts": nperf_opts},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -118,21 +128,17 @@ if ipv in [ 'ipv4', 'both' ]:
>> result_udp.add_tag("multithreaded")
>> result_udp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "UDP_STREAM"
>> +
>> udp_res_data = netperf((m1, test_if1, 0, {"scope": 0}),
>> (m2, test_if2, 0, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts": nperf_opts},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> @@ -156,21 +162,18 @@ if ipv in [ 'ipv6', 'both' ]:
>> result_tcp.add_tag("multithreaded")
>> result_tcp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_tcp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "TCP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> tcp_res_data = netperf((m1, test_if1, 1, {"scope": 0}),
>> (m2, test_if2, 1, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"TCP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts" : nperf_opts +
" -6"},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_tcp, tcp_res_data)
>> @@ -189,21 +192,18 @@ if ipv in [ 'ipv6', 'both' ]:
>> result_udp.add_tag("multithreaded")
>> result_udp.set_parameter('num_parallel', nperf_num_parallel)
>>
>> + if nperf_msg_size is not None:
>> + result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
>> +
>> baseline = perf_api.get_baseline_of_result(result_udp)
>> baseline = perfrepo_baseline_to_dict(baseline)
>>
>> + client_opts["testname"] = "UDP_STREAM"
>> + client_opts["netperf_opts"] = nperf_opts + "-6"
>> +
>> udp_res_data = netperf((m1, test_if1, 1, {"scope": 0}),
>> (m2, test_if2, 1, {"scope": 0}),
>> - client_opts={"duration" :
netperf_duration,
>> - "testname" :
"UDP_STREAM",
>> - "confidence" :
nperf_confidence,
>> - "num_parallel" :
nperf_num_parallel,
>> - "cpu_util" : nperf_cpu_util,
>> - "runs": nperf_max_runs,
>> - "debug": nperf_debug,
>> - "max_deviation":
nperf_max_dev,
>> - "netperf_opts" : nperf_opts +
"-6"},
>> - baseline = baseline,
>> + client_opts, baseline = baseline,
>> timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>>
>> netperf_result_template(result_udp, udp_res_data)
>> --
>> 2.9.3
>>