This patch modifies all tests in phase1 and phase2 that involve 3 vlans.
The change is that the netperf test is run only on one of the vlans
instead on all of them that reduces the time the test takes.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 345 ++++++++++----------
.../regression_tests/phase1/3_vlans_over_bond.py | 350 +++++++++++----------
.../regression_tests/phase2/3_vlans_over_team.py | 347 ++++++++++----------
3 files changed, 525 insertions(+), 517 deletions(-)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index 907ad3c..fdb58e0 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -54,6 +54,7 @@ for vlan in vlans:
ctl.wait(15)
+# ICMP/ICMP6 tests
ping_mod = ctl.get_module("IcmpPing",
options={
"count" : 100,
@@ -64,50 +65,99 @@ ping_mod6 = ctl.get_module("Icmp6Ping",
"count" : 100,
"interval" : 0.1
})
+
+for vlan1 in vlans:
+ m1_vlan1 = m1.get_interface(vlan1)
+ for vlan2 in vlans:
+ m2_vlan2 = m2.get_interface(vlan2)
+
+ ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
+ "iface": m1_vlan1.get_devname()})
+
+ ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
+ "iface": m1_vlan1.get_ip(1)})
+
+ if vlan1 == vlan2:
+ # These tests should pass
+ # Ping between same VLANs
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod)
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6)
+ else:
+ # These tests should fail
+ # Ping across different VLAN
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod, expect="fail")
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6, expect="fail")
+
+# Netperf tests
+# performance is measured only on one of the vlans
+m1_vlan1 = m1.get_interface(vlans[0])
+m2_vlan1 = m2.get_interface(vlans[0])
+
netperf_srv = ctl.get_module("Netperf",
options={
- "role" : "server"
- })
+ "role" : "server",
+ "bind": m1_vlan1.get_ip(0)
+ })
netperf_srv6 = ctl.get_module("Netperf",
options={
"role" : "server",
+ "bind": m1_vlan1.get_ip(1),
"netperf_opts" : " -6"
})
+
+p_opts = "-L %s" % (m2_vlan1.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
netperf_cli_tcp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_udp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
netperf_cli_udp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
if nperf_mode == "multi":
@@ -121,171 +171,124 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
-for vlan1 in vlans:
- m1_vlan1 = m1.get_interface(vlan1)
- for vlan2 in vlans:
- m2_vlan2 = m2.get_interface(vlan2)
-
- ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
- "iface": m1_vlan1.get_devname()})
-
- ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
- "iface": m1_vlan1.get_ip(1)})
-
- netperf_srv.update_options({"bind": m1_vlan1.get_ip(0)})
- netperf_srv6.update_options({"bind": m1_vlan1.get_ip(1)})
-
- p_opts = "-L %s" % (m2_vlan2.get_ip(0))
- if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
-
- netperf_cli_tcp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_udp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_tcp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- netperf_cli_udp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- if vlan1 == vlan2:
- # These tests should pass
- # Ping between same VLANs
- for setting in offload_settings:
- dev_features = ""
- for offload in setting:
- dev_features += " %s %s" % (offload[0], offload[1])
-
- m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
- dev_features))
- m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
- dev_features))
-
- if ipv in [ 'ipv4', 'both' ]:
- # Ping test
- m1.run(ping_mod)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp
- result_tcp = perf_api.new_result("tcp_ipv4_id",
- "tcp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
-
- if ipv in [ 'ipv6', 'both' ]:
- # Ping test
- m1.run(ping_mod6)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv6, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp ipv6
- result_tcp = perf_api.new_result("tcp_ipv6_id",
- "tcp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.set_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp6, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.set_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
- # These tests should fail
- # Ping across different VLAN
- else:
- if ipv in [ 'ipv4', 'both' ]:
- m1.run(ping_mod, expect="fail")
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6, expect="fail")
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+
+ m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
+ dev_features))
+ m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
+ dev_features))
+
+ if ipv in [ 'ipv4', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.set_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.set_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
#reset offload states
dev_features = ""
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py
index c14f1f0..71c1777 100644
--- a/recipes/regression_tests/phase1/3_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py
@@ -55,6 +55,7 @@ for vlan in vlans:
ctl.wait(15)
+# ICMP/ICMP6 tests
ping_mod = ctl.get_module("IcmpPing",
options={
"count" : 100,
@@ -65,50 +66,99 @@ ping_mod6 = ctl.get_module("Icmp6Ping",
"count" : 100,
"interval" : 0.1
})
+
+for vlan1 in vlans:
+ m1_vlan1 = m1.get_interface(vlan1)
+ for vlan2 in vlans:
+ m2_vlan2 = m2.get_interface(vlan2)
+
+ ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
+ "iface": m1_vlan1.get_devname()})
+
+ ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
+ "iface": m1_vlan1.get_ip(1)})
+
+ if vlan1 == vlan2:
+ # These tests should pass
+ # Ping between same VLANs
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod)
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6)
+ else:
+ # These tests should fail
+ # Ping across different VLAN
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod, expect="fail")
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6, expect="fail")
+
+# Netperf tests
+# performance is measured only on one of the vlans
+m1_vlan1 = m1.get_interface(vlans[0])
+m2_vlan1 = m2.get_interface(vlans[0])
+
netperf_srv = ctl.get_module("Netperf",
options={
- "role" : "server"
+ "role" : "server",
+ "bind": m1_vlan1.get_ip(0)
})
netperf_srv6 = ctl.get_module("Netperf",
options={
"role" : "server",
+ "bind": m1_vlan1.get_ip(1),
"netperf_opts" : " -6"
})
+
+p_opts = "-L %s" % (m2_vlan1.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
netperf_cli_tcp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_udp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
netperf_cli_udp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
if nperf_mode == "multi":
@@ -122,175 +172,127 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
-for vlan1 in vlans:
- m1_vlan1 = m1.get_interface(vlan1)
- for vlan2 in vlans:
- m2_vlan2 = m2.get_interface(vlan2)
-
- ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
- "iface": m1_vlan1.get_devname()})
-
- ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
- "iface": m1_vlan1.get_ip(1)})
-
- netperf_srv.update_options({"bind": m1_vlan1.get_ip(0)})
-
- netperf_srv6.update_options({"bind": m1_vlan1.get_ip(1)})
-
- p_opts = "-L %s" % (m2_vlan2.get_ip(0))
- if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
-
- netperf_cli_tcp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_udp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_tcp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- netperf_cli_udp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- if vlan1 == vlan2:
- # These tests should pass
- # Ping between same VLANs
- for setting in offload_settings:
- #apply offload setting
- dev_features = ""
- for offload in setting:
- dev_features += " %s %s" % (offload[0], offload[1])
- m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
- dev_features))
- m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
- dev_features))
- m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
- dev_features))
-
- if ipv in [ 'ipv4', 'both' ]:
- # Ping test
- m1.run(ping_mod)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp
- result_tcp = perf_api.new_result("tcp_ipv4_id",
- "tcp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
-
- if ipv in [ 'ipv6', 'both' ]:
- # Ping test
- m1.run(ping_mod6)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv6, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp ipv6
- result_tcp = perf_api.new_result("tcp_ipv6_id",
- "tcp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp6, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
- # These tests should fail
- # Ping across different VLAN
- else:
- if ipv in [ 'ipv4', 'both' ]:
- m1.run(ping_mod, expect="fail")
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6, expect="fail")
+for setting in offload_settings:
+ #apply offload setting
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
+ dev_features))
+ m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
+ dev_features))
+ m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
+ dev_features))
+
+ if ipv in [ 'ipv4', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_udp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_udp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
#reset offload states
dev_features = ""
diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py
index dc163c0..4c0bc87 100644
--- a/recipes/regression_tests/phase2/3_vlans_over_team.py
+++ b/recipes/regression_tests/phase2/3_vlans_over_team.py
@@ -56,6 +56,7 @@ for vlan in vlans:
ctl.wait(15)
+# ICMP/ICMP6 tests
ping_mod = ctl.get_module("IcmpPing",
options={
"count" : 100,
@@ -66,50 +67,99 @@ ping_mod6 = ctl.get_module("Icmp6Ping",
"count" : 100,
"interval" : 0.1
})
+
+for vlan1 in vlans:
+ m1_vlan1 = m1.get_interface(vlan1)
+ for vlan2 in vlans:
+ m2_vlan2 = m2.get_interface(vlan2)
+
+ ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
+ "iface": m1_vlan1.get_devname()})
+
+ ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
+ "iface": m1_vlan1.get_ip(1)})
+
+ if vlan1 == vlan2:
+ # These tests should pass
+ # Ping between same VLANs
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod)
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6)
+ else:
+ # These tests should fail
+ # Ping across different VLAN
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod, expect="fail")
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6, expect="fail")
+
+# Netperf tests
+# performance is measured only on one of the vlans
+m1_vlan1 = m1.get_interface(vlans[0])
+m2_vlan1 = m2.get_interface(vlans[0])
+
netperf_srv = ctl.get_module("Netperf",
options={
- "role" : "server"
+ "role" : "server",
+ "bind": m1_vlan1.get_ip(0)
})
netperf_srv6 = ctl.get_module("Netperf",
options={
"role" : "server",
+ "bind": m1_vlan1.get_ip(1),
"netperf_opts" : " -6"
})
+
+p_opts = "-L %s" % (m2_vlan1.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
netperf_cli_tcp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_udp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
netperf_cli_udp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
if nperf_mode == "multi":
@@ -123,173 +173,126 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
-for vlan1 in vlans:
- m1_vlan1 = m1.get_interface(vlan1)
- for vlan2 in vlans:
- m2_vlan2 = m2.get_interface(vlan2)
-
- ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
- "iface": m1_vlan1.get_devname()})
-
- ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
- "iface": m1_vlan1.get_ip(1)})
-
- netperf_srv.update_options({"bind": m1_vlan1.get_ip(0)})
-
- netperf_srv6.update_options({"bind": m1_vlan1.get_ip(1)})
-
- p_opts = "-L %s" % (m2_vlan2.get_ip(0))
- if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
-
- netperf_cli_tcp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts})
-
- netperf_cli_udp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts})
-
- netperf_cli_tcp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- netperf_cli_udp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- if vlan1 == vlan2:
- # These tests should pass
- # Ping between same VLANs
- for setting in offload_settings:
- dev_features = ""
- for offload in setting:
- dev_features += " %s %s" % (offload[0], offload[1])
- m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
- dev_features))
- m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
- dev_features))
- m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
- dev_features))
-
- if ipv in [ 'ipv4', 'both' ]:
- # Ping test
- m1.run(ping_mod)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp
- result_tcp = perf_api.new_result("tcp_ipv4_id",
- "tcp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv6, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp ipv6
- result_tcp = perf_api.new_result("tcp_ipv6_id",
- "tcp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp6, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
- # These tests should fail
- # Ping across different VLAN
- else:
- if ipv in [ 'ipv4', 'both' ]:
- m1.run(ping_mod, expect="fail")
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6, expect="fail")
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
+ dev_features))
+ m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
+ dev_features))
+ m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
+ dev_features))
+
+ if ipv in [ 'ipv4', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_udp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_udp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
#reset offload states
dev_features = ""
--
2.4.3