First, I want to check, does it work for both modes, both IPv4 and IPv6?
You mentioned that you had troubles to make tunnel mode working.
More comments inline.
-Jan
Fri, Dec 16, 2016 at 04:44:36PM CET, kjerabek(a)redhat.com wrote:
diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
new file mode 100644
index 0000000..b99dc41
--- /dev/null
+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.py
@@ -0,0 +1,586 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.ModuleWrap import ping, ping6, netperf
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+import re
+
+# ---------------------------
+# ALGORITHM AND CIPHER CONFIG
+# ---------------------------
+
+ciphers = {}
+
+def generate_key(length):
+ key = "0x"
+ key = key + length * "0b"
+ return key
+
+ciphers['aes'] = generate_key(16)
+ciphers['des'] = generate_key(8)
+ciphers['des3_ede'] = generate_key(24)
+ciphers['cast5'] = generate_key(16)
+ciphers['blowfish'] = generate_key(56)
+ciphers['serpent'] = generate_key(32)
+ciphers['twofish'] = generate_key(16)
+
+hashes = {}
+
+hashes['hmac(md5)'] = generate_key(16)
+hashes['sha1'] = generate_key(16)
+hashes['sha256'] = generate_key(16)
+
+# these does not work on RHEL6.6
+#hashes['sha384'] = generate_key(16)
+#hashes['sha512'] = generate_key(16)
+
+thresholds = {
+ 'aes': [ 100, 200 ],
+ 'des': [ 50, 80 ],
+ 'des3_ede': [ 80, 120 ],
+ 'cast5': [ 100, 150 ],
+ 'blowfish': [ 120, 200 ],
+ 'serpent': [ 120, 200 ],
+ 'twofish': [ 100, 250 ]
+}
The hardcoded thresholds should go away. We want to use PerfRepo for baselines.
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("machine1")
+m2 = ctl.get_host("machine2")
+
+m1.sync_resources(modules=["IcmpPing", "Icmp6Ping",
"Netperf", "Custom"])
+m2.sync_resources(modules=["PacketAssert", "IcmpPing",
"Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+ipsec_mode = ctl.get_alias("ipsec_mode")
+
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+
+m1_if = m1.get_interface("eth")
+m2_if = m2.get_interface("eth")
+
+m1_if_name = m1_if.get_devname()
+m2_if_name = m2_if.get_devname()
+
+m1_if_addr = m1_if.get_ip()
+m2_if_addr = m2_if.get_ip()
+
+m1_if_addr6 = m1_if.get_ip(1)
+m2_if_addr6 = m2_if.get_ip(1)
+
+
+# add routing rulez ipv4
+# so the rtr knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr, m1_if_name))
+
+# so the rtr knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr, m2_if_name))
+
+# add routing rulez ipv6
+# so the rtr knows where to send traffic destined to remote site
+m1.run("ip route add %s dev %s" % (m2_if_addr6, m1_if_name))
+
+# so the rtr knows where to send traffic destined to remote site
+m2.run("ip route add %s dev %s" % (m1_if_addr6, m2_if_name))
+
+if nperf_msg_size is None:
+ nperf_msg_size = 1400
+
+if ipsec_mode is None:
+ ipsec_mode = "transport"
+
+res = m1.run("rpm -qa iproute", save_output=True)
+if
(res.get_result()["res_data"]["stdout"].find("iproute-2") !=
-1):
+ m1_key="0x"
+else:
+ m1_key=""
+
+res = m2.run("rpm -qa iproute", save_output=True)
+if
(res.get_result()["res_data"]["stdout"].find("iproute-2") !=
-1):
+ m2_key="0x"
+else:
+ m2_key=""
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+
+ dev_list = [(m1, m1_phy), (m2, m2_phy)]
+
+ # this will pin devices irqs to cpu #0
+ for m, d in dev_list:
+ pin_dev_irqs(m, d, 0)
+
+nperf_opts = ""
+if nperf_cpupin and nperf_num_parallel == 1:
+ nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+ctl.wait(15)
+
+def configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, ip_version):
+ if ip_version == "ipv4":
+ m1_addr = m1_if_addr
+ m2_addr = m2_if_addr
+ else:
+ m1_addr = m1_if_addr6
+ m2_addr = m2_if_addr6
+
+ # configure policy and state
+ m1.run("ip xfrm policy flush")
+ m1.run("ip xfrm state flush")
+ m2.run("ip xfrm policy flush")
+ m2.run("ip xfrm state flush")
+
+ m1.run("ip xfrm policy add src %s dst %s dir out "\
+ "tmpl src %s dst %s proto comp spi 4 mode %s "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode))
+ m1.run("ip xfrm policy add src %s dst %s dir in "\
+ "tmpl src %s dst %s proto comp spi 1 mode %s level use "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode))
+
^^^^
White spaces.
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 4 mode %s "\
+ "comp deflate"\
+ % (m1_addr, m2_addr, ipsec_mode))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 1 mode %s "\
+ "comp deflate"\
+ % (m2_addr, m1_addr, ipsec_mode))
+
^^^^
White spaces.
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"
+ % (m1_addr, m2_addr, ipsec_mode,
+ hash_alg, hash_key))
+ m1.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"
+ % (m2_addr, m1_addr, ipsec_mode,
+ hash_alg, hash_key))
+
+
+ # second machine
+ m2.run("ip xfrm policy add src %s dst %s dir out "\
+ "tmpl src %s dst %s proto comp spi 1 mode %s "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m2_addr, m1_addr,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode,
+ m2_addr, m1_addr, ipsec_mode))
+ m2.run("ip xfrm policy add src %s dst %s dir in "\
+ "tmpl src %s dst %s proto comp spi 4 mode %s level use "\
+ "tmpl src %s dst %s proto esp spi 2 mode %s "\
+ "tmpl src %s dst %s proto ah spi 3 mode %s"
+ % (m1_addr, m2_addr,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode,
+ m1_addr, m2_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 4 mode %s "\
+ "comp deflate"\
+ % (m1_addr, m2_addr, ipsec_mode))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto comp spi 1 mode %s "\
+ "comp deflate"\
+ % (m2_addr, m1_addr, ipsec_mode))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto esp spi 2 mode %s "\
+ "enc '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ ciph_alg, ciph_key))
+
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"\
+ % (m1_addr, m2_addr, ipsec_mode,
+ hash_alg, hash_key))
+ m2.run("ip xfrm state add "\
+ "src %s dst %s proto ah spi 3 mode %s "\
+ "auth '%s' %s"\
+ % (m2_addr, m1_addr, ipsec_mode,
+ hash_alg, hash_key))
+
+
+for ciph_alg, ciph_key in ciphers.iteritems():
+ for hash_alg, hash_key in hashes.iteritems():
+ if ipv in [ 'ipv4', 'both']:
+ configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, "ipv4")
+ # ------
+ # TESTS
+ # ------
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ # ping + PacketAssert
+ assert_mod = ctl.get_module("PacketAssert",
+ options={
+ "interface": m2_if_name,
+ "filter": "ah",
+ "grep_for": [
"AH\(spi=0x00000003",
+ "ESP\(spi=0x00000002"
],
+ "min": 10
+ })
+
+ assert_proc = m2.run(assert_mod, bg=True)
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": m2_if_addr,
+ "count": 10,
+ "interval": 0.1})
+
+ ctl.wait(2)
+
+ m1.run(ping_mod)
+
+ ctl.wait(2)
+
+ assert_proc.intr()
+
+ dump.intr()
+
+ m1.run("ip -s xfrm pol")
+ m1.run("ip -s xfrm state")
+
+ # ping test with bigger size to check compression is used
+ pkt_capture = m2.run("tcpdump -i %s ah" % m2_if_name,
+ save_output=True,
+ bg=True)
+ ctl.wait(3)
+ ping_mod.update_options({ "size": int(mtu) - 28 })
+ ping_mod.update_options({ "count": 1 })
+ m1.run(ping_mod)
+ ctl.wait(3)
+
+ pkt_capture.intr()
+
+ stdout = pkt_capture.get_result()["res_data"]["stdout"]
+
+ small_ping_fail=0
+ re_length = ".*length ([0-9]*)"
+ m = re.match(re_length, stdout)
+ if m:
+ pkt_len = int(m.group(1))
+ if pkt_len > (int(mtu) - 28)/2:
+ # failed
+ small_ping_fail=1
+ else:
+ small_ping_fail=1
+
+ comp_mod = ctl.get_module("Custom")
+ if small_ping_fail != 0:
+ comp_mod.update_options({ "fail": "Check of compression
for bigger packets failed" })
+ else:
+ comp_mod.update_options({ "passed": "Check of compression
for bigger packets passed" })
+
+ m1.run(comp_mod)
+
+ # fragmentation of packets bigger than mtu
+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
+
+ ping_mod.update_options({ "size": 2*mtu })
+ ping_mod.update_options({ "count": 10 })
+
+ m1.run(ping_mod)
+ dump.intr()
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ r'kernel_release',
+ r'redhat_release'])
+ result_tcp.add_tag(product_name)
+
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ result_tcp.set_parameter('cipher_alg', ciph_alg)
+ result_tcp.set_parameter('hash_alg', hash_alg)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+
+ tcp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
+ (m2, m2_if, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" :
"TCP_STREAM",
+ "confidence" :
nperf_confidence,
+ "num_parallel" :
nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "max_deviation":
nperf_max_dev,
+ "msg_size" : nperf_msg_size,
+ "netperf_opts": nperf_opts,
+ "threshold": "%s
Mbits/sec"
+ % thresholds[ciph_alg][0]},
^^^^
I don't understand this. Why do you set both threshold and baseline?
>+ baseline = baseline,
>+ timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>+
>+ netperf_result_template(result_tcp, tcp_res_data)
>+ result_tcp.set_comment(pr_comment)
>+ perf_api.save_result(result_tcp)
>+
>+ # prepare PerfRepo result for udp
>+ result_udp = perf_api.new_result("udp_ipv4_id",
>+ "udp_ipv4_result",
>+ hash_ignore=[
>+ r'kernel_release',
>+ r'redhat_release'])
>+ result_udp.add_tag(product_name)
>+
>+ if nperf_num_parallel > 1:
>+ result_udp.add_tag("multithreaded")
>+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
>+
>+ result_udp.set_parameter('cipher_alg', ciph_alg)
>+ result_udp.set_parameter('hash_alg', hash_alg)
>+
>+ baseline = perf_api.get_baseline_of_result(result_udp)
>+ baseline = perfrepo_baseline_to_dict(baseline)
>+
>+ udp_res_data = netperf((m1, m1_if, 0, {"scope": 0}),
>+ (m2, m2_if, 0, {"scope": 0}),
>+ client_opts={"duration" :
netperf_duration,
>+ "testname" :
"UDP_STREAM",
>+ "confidence" :
nperf_confidence,
>+ "num_parallel" :
nperf_num_parallel,
>+ "cpu_util" :
nperf_cpu_util,
>+ "runs": nperf_max_runs,
>+ "debug": nperf_debug,
>+ "max_deviation":
nperf_max_dev,
>+ "msg_size" :
nperf_msg_size,
>+ "netperf_opts":
nperf_opts,
>+ "threshold": "%s
Mbits/sec"
>+ % thresholds[ciph_alg][1]},
>+ baseline = baseline,
>+ timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>+
>+ netperf_result_template(result_udp, udp_res_data)
>+ result_udp.set_comment(pr_comment)
>+ perf_api.save_result(result_udp)
>+
>+ if ipv in [ 'ipv6', 'both']:
>+ configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key,
"ipv6")
>+ # ------
>+ # TESTS
>+ # ------
>+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
>+
>+ # ping + PacketAssert
>+ assert_mod = ctl.get_module("PacketAssert",
>+ options={
>+ "interface": m2_if_name,
>+ "filter": "ah",
>+ "grep_for": [
"AH\(spi=0x00000003",
>+
"ESP\(spi=0x00000002" ],
>+ "min": 10
>+ })
>+
>+ assert_proc = m2.run(assert_mod, bg=True)
>+
>+ ping_mod = ctl.get_module("Icmp6Ping",
>+ options={
>+ "addr": m2_if_addr6,
>+ "count": 10,
>+ "interval": 0.1})
>+
>+ ctl.wait(2)
>+
>+ m1.run(ping_mod)
>+
>+ ctl.wait(2)
>+
>+ assert_proc.intr()
>+
>+ dump.intr()
>+
>+ m1.run("ip -s xfrm pol")
>+ m1.run("ip -s xfrm state")
>+
>+ # ping test with bigger size to check compression is used
>+ pkt_capture = m2.run("tcpdump -i %s ah" % m2_if_name,
>+ save_output=True,
>+ bg=True)
>+ ctl.wait(3)
>+ ping_mod.update_options({ "size": int(mtu) - 28 })
>+ ping_mod.update_options({ "count": 1 })
>+ m1.run(ping_mod)
>+ ctl.wait(3)
>+
>+ pkt_capture.intr()
>+
>+ stdout =
pkt_capture.get_result()["res_data"]["stdout"]
>+
>+ small_ping_fail=0
>+ re_length = ".*length ([0-9]*)"
>+ m = re.match(re_length, stdout)
>+ if m:
>+ pkt_len = int(m.group(1))
>+ if pkt_len > (int(mtu) - 28)/2:
>+ # failed
>+ small_ping_fail=1
>+ else:
>+ small_ping_fail=1
>+
>+ comp_mod = ctl.get_module("Custom")
>+ if small_ping_fail != 0:
>+ comp_mod.update_options({ "fail": "Check of
compression for bigger packets failed" })
>+ else:
>+ comp_mod.update_options({ "passed": "Check of
compression for bigger packets passed" })
>+
>+ m1.run(comp_mod)
>+
>+ # fragmentation of packets bigger than mtu
>+ dump = m1.run("tcpdump -i %s -nn -vv" % m1_if_name, bg=True)
>+
>+ ping_mod.update_options({ "size": 2*mtu })
>+ ping_mod.update_options({ "count": 10 })
>+
>+ m1.run(ping_mod)
>+ dump.intr()
>+
>+ # prepare PerfRepo result for tcp
>+ result_tcp = perf_api.new_result("tcp_ipv6_id",
>+ "tcp_ipv6_result",
>+ hash_ignore=[
>+ r'kernel_release',
>+ r'redhat_release'])
>+ result_tcp.add_tag(product_name)
>+
>+ if nperf_num_parallel > 1:
>+ result_tcp.add_tag("multithreaded")
>+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
>+
>+ result_tcp.set_parameter('cipher_alg', ciph_alg)
>+ result_tcp.set_parameter('hash_alg', hash_alg)
>+
>+ baseline = perf_api.get_baseline_of_result(result_tcp)
>+ baseline = perfrepo_baseline_to_dict(baseline)
>+
>+
>+ tcp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
>+ (m2, m2_if, 1, {"scope": 0}),
>+ client_opts={"duration" :
netperf_duration,
>+ "testname" :
"TCP_STREAM",
>+ "confidence" :
nperf_confidence,
>+ "num_parallel" :
nperf_num_parallel,
>+ "cpu_util" :
nperf_cpu_util,
>+ "runs": nperf_max_runs,
>+ "debug": nperf_debug,
>+ "max_deviation":
nperf_max_dev,
>+ "msg_size" :
nperf_msg_size,
>+ "threshold": "%s
Mbits/sec"
>+ % thresholds[ciph_alg][0],
>+ "netperf_opts" : nperf_opts
+ "-6"},
>+ baseline = baseline,
>+ timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>+
>+ netperf_result_template(result_tcp, tcp_res_data)
>+ result_tcp.set_comment(pr_comment)
>+ perf_api.save_result(result_tcp)
>+
>+ # prepare PerfRepo result for udp
>+ result_udp = perf_api.new_result("udp_ipv6_id",
>+ "udp_ipv6_result",
>+ hash_ignore=[
>+ r'kernel_release',
>+ r'redhat_release'])
>+ result_udp.add_tag(product_name)
>+
>+ if nperf_num_parallel > 1:
>+ result_udp.add_tag("multithreaded")
>+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
>+
>+ result_udp.set_parameter('cipher_alg', ciph_alg)
>+ result_udp.set_parameter('hash_alg', hash_alg)
>+
>+ baseline = perf_api.get_baseline_of_result(result_udp)
>+ baseline = perfrepo_baseline_to_dict(baseline)
>+
>+ udp_res_data = netperf((m1, m1_if, 1, {"scope": 0}),
>+ (m2, m2_if, 1, {"scope": 0}),
>+ client_opts={"duration" :
netperf_duration,
>+ "testname" :
"UDP_STREAM",
>+ "confidence" :
nperf_confidence,
>+ "num_parallel" :
nperf_num_parallel,
>+ "cpu_util" :
nperf_cpu_util,
>+ "runs": nperf_max_runs,
>+ "debug": nperf_debug,
>+ "max_deviation":
nperf_max_dev,
>+ "msg_size" :
nperf_msg_size,
>+ "threshold": "%s
Mbits/sec"
>+ % thresholds[ciph_alg][1],
>+ "netperf_opts" : nperf_opts
+ "-6"},
>+ baseline = baseline,
>+ timeout = (netperf_duration +
nperf_reserve)*nperf_max_runs)
>+
>+ netperf_result_template(result_udp, udp_res_data)
>+ result_udp.set_comment(pr_comment)
>+ perf_api.save_result(result_udp)
>+
>+m1.run("ip xfrm policy flush")
>+m1.run("ip xfrm state flush")
>+m2.run("ip xfrm policy flush")
>+m2.run("ip xfrm state flush")
>+
>+if nperf_cpupin:
>+ m1.run("service irqbalance start")
>+ m2.run("service irqbalance start")
>diff --git a/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
>new file mode 100644
>index 0000000..c7c8e68
>--- /dev/null
>+++ b/recipes/regression_tests/phase3/ipsec_esp_ah_comp.xml
>@@ -0,0 +1,50 @@
>+<lnstrecipe>
>+ <define>
>+ <alias name="ipv" value="both" />
>+ <alias name="mtu" value="1450" />
>+ <alias name="netperf_duration" value="60" />
>+ <alias name="nperf_reserve" value="20" />
>+ <alias name="nperf_confidence" value="99,5" />
>+ <alias name="nperf_max_runs" value="5"/>
>+ <alias name="nperf_num_parallel" value="1"/>
>+ <alias name="nperf_debug" value="0"/>
>+ <alias name="nperf_max_dev" value="20%"/>
>+ <alias name="mapping_file"
value="ipsec_transport_esp_ah_comp.mapping"/>
>+ <alias name="net_1" value="192.168.99"/>
>+ <alias name="net6_1" value="fc00:1::"/>
>+ <alias name="net_2" value="192.168.100"/>
>+ <alias name="net6_2" value="fc00:2::"/>
>+ <alias name="driver" value=""/>
>+ </define>
>+ <network>
>+ <host id="machine1">
>+ <interfaces>
>+ <eth id="eth" label="localnet">
>+ <params>
>+ <param name="driver" value="{$driver}"/>
>+ </params>
>+ <addresses>
>+ <address value="{$net_1}.1/24"/>
>+ <address value="{$net6_1}1/64"/>
>+ </addresses>
>+ </eth>
>+ </interfaces>
>+ </host>
>+ <host id="machine2">
>+ <interfaces>
>+ <eth id="eth" label="localnet">
>+ <params>
>+ <param name="driver" value="{$driver}"/>
>+ </params>
>+ <addresses>
>+ <address value="{$net_2}.1/24"/>
>+ <address value="{$net6_2}1/64"/>
>+ </addresses>
>+ </eth>
>+ </interfaces>
>+ </host>
>+ </network>
>+
>+ <task python="ipsec_esp_ah_comp.py"/>
>+
>+</lnstrecipe>
>--
>2.5.5
>_______________________________________________
>LNST-developers mailing list -- lnst-developers(a)lists.fedorahosted.org
>To unsubscribe send an email to lnst-developers-leave(a)lists.fedorahosted.org