This patch adds test_type param to all PerfRepo template functions. When this param is not specified, it is expected the test to be *_STREAM and metric labels are throughput, throughput_deviation, etc. When the param is set to RR, metric labels used are rr_rate, rr_rate_deviation, etc.
This is needed for the new short-lived connections test, where the unit we measure is not throughput but request/response rate.
Signed-off-by: Jiri Prochazka jprochaz@redhat.com --- lnst/Controller/PerfRepoUtils.py | 52 ++++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 15 deletions(-)
diff --git a/lnst/Controller/PerfRepoUtils.py b/lnst/Controller/PerfRepoUtils.py index 4c545e4..8d5a29d 100644 --- a/lnst/Controller/PerfRepoUtils.py +++ b/lnst/Controller/PerfRepoUtils.py @@ -14,7 +14,7 @@ olichtne@redhat.com (Ondrej Lichtner) import logging from lnst.Common.Utils import Noop
-def netperf_baseline_template(module, baseline): +def netperf_baseline_template(module, baseline, test_type="STREAM"): module.unset_option('threshold') module.unset_option('threshold_deviation')
@@ -22,35 +22,51 @@ def netperf_baseline_template(module, baseline): return module
try: - throughput = baseline.get_value('throughput') - deviation = baseline.get_value('throughput_deviation') + if test_type == "RR": + throughput = baseline.get_value('rr_rate') + deviation = baseline.get_value('rr_rate_deviation') + else: + throughput = baseline.get_value('throughput') + deviation = baseline.get_value('throughput_deviation') except: logging.error("Invalid baseline TestExecution passed.") return module
logging.debug("Setting Netperf threshold.") if throughput is not None and deviation is not None: - module.update_options({'threshold': '%s bits/sec' % throughput, - 'threshold_deviation': '%s bits/sec' % deviation}) + if test_type == "RR": + module.update_options({'threshold': '%s Trans/sec' % throughput, + 'threshold_deviation': '%s Trans/sec' % deviation}) + else: + module.update_options({'threshold': '%s bits/sec' % throughput, + 'threshold_deviation': '%s bits/sec' % deviation}) return module
-def perfrepo_baseline_to_dict(baseline): +def perfrepo_baseline_to_dict(baseline, test_type="STREAM"): if baseline.get_texec() is None: return {}
try: - throughput = baseline.get_value('throughput') - deviation = baseline.get_value('throughput_deviation') + if test_type == "RR": + throughput = baseline.get_value('rr_rate') + deviation = baseline.get_value('rr_rate_deviation') + else: + throughput = baseline.get_value('throughput') + deviation = baseline.get_value('throughput_deviation') except: logging.error("Invalid baseline TestExecution passed.") return {}
if throughput is not None and deviation is not None: - return {'threshold': '%s bits/sec' % throughput, - 'threshold_deviation': '%s bits/sec' % deviation} + if test_type == "RR": + return {'threshold': '%s Trans/sec' % throughput, + 'threshold_deviation': '%s Trans/sec' % deviation} + else: + return {'threshold': '%s bits/sec' % throughput, + 'threshold_deviation': '%s bits/sec' % deviation} return {}
-def netperf_result_template(perfrepo_result, netperf_result): +def netperf_result_template(perfrepo_result, netperf_result, test_type="STREAM"): if isinstance(perfrepo_result, Noop): return perfrepo_result
@@ -64,9 +80,15 @@ def netperf_result_template(perfrepo_result, netperf_result): return perfrepo_result
logging.debug("Adding Netperf results to PerfRepo object.") - perfrepo_result.add_value('throughput', rate) - perfrepo_result.add_value('throughput_min', rate - deviation) - perfrepo_result.add_value('throughput_max', rate + deviation) - perfrepo_result.add_value('throughput_deviation', deviation) + if test_type == "RR": + perfrepo_result.add_value('rr_rate', rate) + perfrepo_result.add_value('rr_rate_min', rate - deviation) + perfrepo_result.add_value('rr_rate_max', rate + deviation) + perfrepo_result.add_value('rr_rate_deviation', deviation) + else: + perfrepo_result.add_value('throughput', rate) + perfrepo_result.add_value('throughput_min', rate - deviation) + perfrepo_result.add_value('throughput_max', rate + deviation) + perfrepo_result.add_value('throughput_deviation', deviation)
return perfrepo_result
There is an extra dot in regex for parsing RR threshold value. This patch removes it, so the parsing works now correctly.
Signed-off-by: Jiri Prochazka jprochaz@redhat.com --- test_modules/Netperf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index 6cbd504..43dab42 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -337,8 +337,8 @@ class Netperf(TestGeneric): threshold_unit_type = "bps" elif (self._testname == "TCP_RR" or self._testname == "UDP_RR" or self._testname == "SCTP_RR"): - pattern_rr = "(\d*(.\d*)?)\s*trans./sec" - r1 = re.search(pattern_rr, threshold.lower()) + pattern_rr = "(\d*(.\d*)?)\s*Trans/sec" + r1 = re.search(pattern_rr, threshold) if r1 is None: res_data["msg"] = "Invalid unit type in the "\ "throughput option"
In _parse_threshold() the recognition of test type (STREAM and RR) is done via long if statement, when we only need to check for STREAM or RR in test type name. This also fixes TCP_CRR test threshold parsing as that test type was not present in the if condition.
Signed-off-by: Jiri Prochazka jprochaz@redhat.com --- test_modules/Netperf.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index 43dab42..28e12b9 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -303,10 +303,7 @@ class Netperf(TestGeneric): # group(1) ... threshold value # group(3) ... threshold units # group(4) ... bytes/bits - if (self._testname == "TCP_STREAM" or - self._testname == "UDP_STREAM" or - self._testname == "SCTP_STREAM" or - self._testname == "SCTP_STREAM_MANY"): + if "STREAM" in self._testname: pattern_stream = "(\d*(.\d*)?)\s*([ kmgtKMGT])(bits|bytes)/sec" r1 = re.search(pattern_stream, threshold) if r1 is None: @@ -335,8 +332,7 @@ class Netperf(TestGeneric): if threshold_unit_type == "bytes": threshold_rate *= 8 threshold_unit_type = "bps" - elif (self._testname == "TCP_RR" or self._testname == "UDP_RR" or - self._testname == "SCTP_RR"): + elif "RR" in self._testname: pattern_rr = "(\d*(.\d*)?)\s*Trans/sec" r1 = re.search(pattern_rr, threshold) if r1 is None:
This patch introduces new test for testing short-lived connections. The topology is the same as in regression-tests/phase1/simple_netperf test. It's two baremetal hosts, each with one NIC. Measurement tool is netperf, test types are TCP_RR and TCP_CRR. For both test types, the test is run with these Request/Response sizes: 1K, 5K, 7K, 10K, 12K.
The test does have working integration with PerfRepo and uses all aliases used in regression-tests/.
Signed-off-by: Jiri Prochazka jprochaz@redhat.com --- .../short_lived_connections.py | 169 +++++++++++++++++++++ .../short_lived_connections.xml | 43 ++++++ 2 files changed, 212 insertions(+) create mode 100644 recipes/short-lived-connections/short_lived_connections.py create mode 100644 recipes/short-lived-connections/short_lived_connections.xml
diff --git a/recipes/short-lived-connections/short_lived_connections.py b/recipes/short-lived-connections/short_lived_connections.py new file mode 100644 index 0000000..0b07f61 --- /dev/null +++ b/recipes/short-lived-connections/short_lived_connections.py @@ -0,0 +1,169 @@ +from lnst.Controller.Task import ctl +from lnst.Controller.PerfRepoUtils import netperf_baseline_template +from lnst.Controller.PerfRepoUtils import netperf_result_template + +from lnst.RecipeCommon.IRQ import pin_dev_irqs +from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment + +# ------ +# SETUP +# ------ + +mapping_file = ctl.get_alias("mapping_file") +perf_api = ctl.connect_PerfRepo(mapping_file) + +product_name = ctl.get_alias("product_name") + +m1 = ctl.get_host("machine1") +m2 = ctl.get_host("machine2") + +m1.sync_resources(modules=["Netperf"]) +m2.sync_resources(modules=["Netperf"]) + +# ------ +# TESTS +# ------ + +mtu = ctl.get_alias("mtu") +netperf_duration = int(ctl.get_alias("netperf_duration")) +nperf_reserve = int(ctl.get_alias("nperf_reserve")) +nperf_confidence = ctl.get_alias("nperf_confidence") +nperf_max_runs = int(ctl.get_alias("nperf_max_runs")) +nperf_cpupin = ctl.get_alias("nperf_cpupin") +nperf_cpu_util = ctl.get_alias("nperf_cpu_util") +nperf_mode = ctl.get_alias("nperf_mode") +nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) +nperf_debug = ctl.get_alias("nperf_debug") +nperf_max_dev = ctl.get_alias("nperf_max_dev") +pr_user_comment = ctl.get_alias("perfrepo_comment") + +m1_testiface = m1.get_interface("testiface") +m2_testiface = m2.get_interface("testiface") + +m1_testiface.set_mtu(mtu) +m2_testiface.set_mtu(mtu) + +pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment) + +if nperf_cpupin: + m1.run("service irqbalance stop") + m2.run("service irqbalance stop") + + for m, d in [ (m1, m1_testiface), (m2, m2_testiface) ]: + pin_dev_irqs(m, d, 0) + +p_opts = "-L %s" % (m2_testiface.get_ip(0)) + +if nperf_cpupin and nperf_mode != "multi": + p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin) + +for size in ["1K,1K", "5K,5K", "7K,7K", "10K,10K", "12K,12K"]: + + n_p_opts = p_opts + + netperf_cli_tcp_rr = ctl.get_module("Netperf", + options={ + "role" : "client", + "netperf_server" : m1_testiface.get_ip(0), + "duration" : netperf_duration, + "testname" : "TCP_RR", + "confidence" : nperf_confidence, + "cpu_util" : nperf_cpu_util, + "runs" : nperf_max_runs, + "netperf_opts" : n_p_opts, + "max_deviation" : nperf_max_dev, + "debug" : nperf_debug, + "testoptions" : "-r %s" % size + }) + + + netperf_cli_tcp_crr = ctl.get_module("Netperf", + options={ + "role" : "client", + "netperf_server" : m1_testiface.get_ip(0), + "duration" : netperf_duration, + "testname" : "TCP_CRR", + "confidence" : nperf_confidence, + "cpu_util" : nperf_cpu_util, + "runs" : nperf_max_runs, + "netperf_opts" : n_p_opts, + "max_deviation" : nperf_max_dev, + "debug" : nperf_debug, + "testoptions" : "-r %s" % size + }) + + + netperf_srv = ctl.get_module("Netperf", + options={ + "role" : "server", + "bind" : m1_testiface.get_ip(0) + }) + + if nperf_mode == "multi": + netperf_cli_tcp_rr.unset_option("confidence") + netperf_cli_tcp_crr.unset_option("confidence") + + netperf_cli_tcp_rr.update_options({"num_parallel": nperf_num_parallel}) + netperf_cli_tcp_crr.update_options({"num_parallel": nperf_num_parallel}) + + # we have to use multiqueue qdisc to get appropriate data + m1.run("tc qdisc replace dev %s root mq" % m1_phy1.get_devname()) + m2.run("tc qdisc replace dev %s root mq" % m2_phy1.get_devname()) + + ctl.wait(15) + + # Netperf test + srv_proc = m1.run(netperf_srv, bg=True) + ctl.wait(2) + + # prepare PerfRepo result for tcp_rr + result_tcp_rr = perf_api.new_result("tcp_rr_id", + "tcp_rr_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + result_tcp_rr.add_tag(product_name) + if nperf_mode == "multi": + result_tcp_rr.add_tag("multithreaded") + result_tcp_rr.set_parameter('num_parallel', nperf_num_parallel) + + result_tcp_rr.set_parameter("rr_size", size) + + baseline = perf_api.get_baseline_of_result(result_tcp_rr) + netperf_baseline_template(netperf_cli_tcp_rr, baseline, test_type="RR") + + tcp_rr_res_data = m2.run(netperf_cli_tcp_rr, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_tcp_rr, tcp_rr_res_data, test_type="RR") + result_tcp_rr.set_comment(pr_comment) + perf_api.save_result(result_tcp_rr) + + # prepare PerfRepo result for tcp_crr + result_tcp_crr = perf_api.new_result("tcp_crr_id", + "tcp_crr_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + result_tcp_crr.add_tag(product_name) + if nperf_mode == "multi": + result_tcp_crr.add_tag("multithreaded") + result_tcp_crr.set_parameter('num_parallel', nperf_num_parallel) + + result_tcp_crr.set_parameter("rr_size", size) + + baseline = perf_api.get_baseline_of_result(result_tcp_crr) + netperf_baseline_template(netperf_cli_tcp_crr, baseline, test_type="RR") + + tcp_crr_res_data = m2.run(netperf_cli_tcp_crr, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_tcp_crr, tcp_crr_res_data, test_type="RR") + result_tcp_crr.set_comment(pr_comment) + perf_api.save_result(result_tcp_crr) + + srv_proc.intr() + +if nperf_cpupin: + m1.run("service irqbalance start") + m2.run("service irqbalance start") diff --git a/recipes/short-lived-connections/short_lived_connections.xml b/recipes/short-lived-connections/short_lived_connections.xml new file mode 100644 index 0000000..289205e --- /dev/null +++ b/recipes/short-lived-connections/short_lived_connections.xml @@ -0,0 +1,43 @@ +<lnstrecipe> + <define> + <alias name="mtu" value="1500" /> + <alias name="netperf_duration" value="60" /> + <alias name="nperf_reserve" value="20" /> + <alias name="nperf_confidence" value="99,5" /> + <alias name="nperf_max_runs" value="5" /> + <alias name="nperf_mode" value="default"/> + <alias name="nperf_num_parallel" value="2"/> + <alias name="nperf_debug" value="0"/> + <alias name="nperf_max_dev" value="20%"/> + <alias name="mapping_file" value="short_lived_connections.mapping" /> + <alias name="net" value="192.168.101" /> + <alias name="driver" value="ixgbe" /> + </define> + <network> + <host id="machine1"> + <interfaces> + <eth id="testiface" label="testnet"> + <params> + <param name="driver" value="{$driver}"/> + </params> + <addresses> + <address>{$net}.10/24</address> + </addresses> + </eth> + </interfaces> + </host> + <host id="machine2"> + <interfaces> + <eth id="testiface" label="testnet"> + <params> + <param name="driver" value="{$driver}"/> + </params> + <addresses> + <address>{$net}.11/24</address> + </addresses> + </eth> + </interfaces> + </host> + </network> + <task python="short_lived_connections.py"/> +</lnstrecipe>
Thu, Jun 01, 2017 at 04:16:02PM CEST, jprochaz@redhat.com wrote:
This patch introduces new test for testing short-lived connections. The topology is the same as in regression-tests/phase1/simple_netperf test. It's two baremetal hosts, each with one NIC. Measurement tool is netperf, test types are TCP_RR and TCP_CRR. For both test types, the test is run with these Request/Response sizes: 1K, 5K, 7K, 10K, 12K.
The test does have working integration with PerfRepo and uses all aliases used in regression-tests/.
Signed-off-by: Jiri Prochazka jprochaz@redhat.com
.../short_lived_connections.py | 169 +++++++++++++++++++++ .../short_lived_connections.xml | 43 ++++++ 2 files changed, 212 insertions(+) create mode 100644 recipes/short-lived-connections/short_lived_connections.py create mode 100644 recipes/short-lived-connections/short_lived_connections.xml
diff --git a/recipes/short-lived-connections/short_lived_connections.py b/recipes/short-lived-connections/short_lived_connections.py new file mode 100644 index 0000000..0b07f61 --- /dev/null +++ b/recipes/short-lived-connections/short_lived_connections.py
As we discussed in private mail, move this to one of regression_tests/phaseX directories.
@@ -0,0 +1,169 @@ +from lnst.Controller.Task import ctl +from lnst.Controller.PerfRepoUtils import netperf_baseline_template +from lnst.Controller.PerfRepoUtils import netperf_result_template
+from lnst.RecipeCommon.IRQ import pin_dev_irqs +from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+# ------ +# SETUP +# ------
+mapping_file = ctl.get_alias("mapping_file") +perf_api = ctl.connect_PerfRepo(mapping_file)
+product_name = ctl.get_alias("product_name")
+m1 = ctl.get_host("machine1") +m2 = ctl.get_host("machine2")
+m1.sync_resources(modules=["Netperf"]) +m2.sync_resources(modules=["Netperf"])
+# ------ +# TESTS +# ------
+mtu = ctl.get_alias("mtu") +netperf_duration = int(ctl.get_alias("netperf_duration")) +nperf_reserve = int(ctl.get_alias("nperf_reserve")) +nperf_confidence = ctl.get_alias("nperf_confidence") +nperf_max_runs = int(ctl.get_alias("nperf_max_runs")) +nperf_cpupin = ctl.get_alias("nperf_cpupin") +nperf_cpu_util = ctl.get_alias("nperf_cpu_util") +nperf_mode = ctl.get_alias("nperf_mode") +nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) +nperf_debug = ctl.get_alias("nperf_debug") +nperf_max_dev = ctl.get_alias("nperf_max_dev") +pr_user_comment = ctl.get_alias("perfrepo_comment")
+m1_testiface = m1.get_interface("testiface") +m2_testiface = m2.get_interface("testiface")
+m1_testiface.set_mtu(mtu) +m2_testiface.set_mtu(mtu)
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+if nperf_cpupin:
- m1.run("service irqbalance stop")
- m2.run("service irqbalance stop")
- for m, d in [ (m1, m1_testiface), (m2, m2_testiface) ]:
pin_dev_irqs(m, d, 0)
+p_opts = "-L %s" % (m2_testiface.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+for size in ["1K,1K", "5K,5K", "7K,7K", "10K,10K", "12K,12K"]:
- n_p_opts = p_opts
- netperf_cli_tcp_rr = ctl.get_module("Netperf",
options={
"role" : "client",
"netperf_server" : m1_testiface.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_RR",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
"runs" : nperf_max_runs,
"netperf_opts" : n_p_opts,
"max_deviation" : nperf_max_dev,
"debug" : nperf_debug,
"testoptions" : "-r %s" % size
})
Please use following:
mod = ctl.get_module() # before loop
for size in ...: mod.update_options({"testoptions": ...})
- netperf_cli_tcp_crr = ctl.get_module("Netperf",
options={
"role" : "client",
"netperf_server" : m1_testiface.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_CRR",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
"runs" : nperf_max_runs,
"netperf_opts" : n_p_opts,
"max_deviation" : nperf_max_dev,
"debug" : nperf_debug,
"testoptions" : "-r %s" % size
})
- netperf_srv = ctl.get_module("Netperf",
options={
"role" : "server",
"bind" : m1_testiface.get_ip(0)
})
- if nperf_mode == "multi":
netperf_cli_tcp_rr.unset_option("confidence")
netperf_cli_tcp_crr.unset_option("confidence")
netperf_cli_tcp_rr.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_tcp_crr.update_options({"num_parallel": nperf_num_parallel})
# we have to use multiqueue qdisc to get appropriate data
m1.run("tc qdisc replace dev %s root mq" % m1_phy1.get_devname())
m2.run("tc qdisc replace dev %s root mq" % m2_phy1.get_devname())
m1_phy1 and m2_phy1 undefined! use pylint, Luke! ;-)
$ pylint -E recipes/short-lived-connections/short_lived_connections.py No config file found, using default configuration ************* Module short_lived_connections E:110,51: Undefined variable 'm1_phy1' (undefined-variable) E:111,51: Undefined variable 'm2_phy1' (undefined-variable)
- ctl.wait(15)
why here? this will add 15 secs to every iteration.
- # Netperf test
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
- # prepare PerfRepo result for tcp_rr
- result_tcp_rr = perf_api.new_result("tcp_rr_id",
"tcp_rr_result",
hash_ignore=[
'kernel_release',
'redhat_release'])
- result_tcp_rr.add_tag(product_name)
- if nperf_mode == "multi":
result_tcp_rr.add_tag("multithreaded")
result_tcp_rr.set_parameter('num_parallel', nperf_num_parallel)
- result_tcp_rr.set_parameter("rr_size", size)
- baseline = perf_api.get_baseline_of_result(result_tcp_rr)
- netperf_baseline_template(netperf_cli_tcp_rr, baseline, test_type="RR")
- tcp_rr_res_data = m2.run(netperf_cli_tcp_rr,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
- netperf_result_template(result_tcp_rr, tcp_rr_res_data, test_type="RR")
- result_tcp_rr.set_comment(pr_comment)
- perf_api.save_result(result_tcp_rr)
- # prepare PerfRepo result for tcp_crr
- result_tcp_crr = perf_api.new_result("tcp_crr_id",
"tcp_crr_result",
hash_ignore=[
'kernel_release',
'redhat_release'])
- result_tcp_crr.add_tag(product_name)
- if nperf_mode == "multi":
result_tcp_crr.add_tag("multithreaded")
result_tcp_crr.set_parameter('num_parallel', nperf_num_parallel)
- result_tcp_crr.set_parameter("rr_size", size)
- baseline = perf_api.get_baseline_of_result(result_tcp_crr)
- netperf_baseline_template(netperf_cli_tcp_crr, baseline, test_type="RR")
- tcp_crr_res_data = m2.run(netperf_cli_tcp_crr,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
- netperf_result_template(result_tcp_crr, tcp_crr_res_data, test_type="RR")
- result_tcp_crr.set_comment(pr_comment)
- perf_api.save_result(result_tcp_crr)
- srv_proc.intr()
+if nperf_cpupin:
- m1.run("service irqbalance start")
- m2.run("service irqbalance start")
diff --git a/recipes/short-lived-connections/short_lived_connections.xml b/recipes/short-lived-connections/short_lived_connections.xml new file mode 100644 index 0000000..289205e --- /dev/null +++ b/recipes/short-lived-connections/short_lived_connections.xml @@ -0,0 +1,43 @@ +<lnstrecipe>
<define>
<alias name="mtu" value="1500" />
<alias name="netperf_duration" value="60" />
<alias name="nperf_reserve" value="20" />
<alias name="nperf_confidence" value="99,5" />
<alias name="nperf_max_runs" value="5" />
<alias name="nperf_mode" value="default"/>
<alias name="nperf_num_parallel" value="2"/>
<alias name="nperf_debug" value="0"/>
<alias name="nperf_max_dev" value="20%"/>
<alias name="mapping_file" value="short_lived_connections.mapping" />
<alias name="net" value="192.168.101" />
<alias name="driver" value="ixgbe" />
</define>
<network>
<host id="machine1">
<interfaces>
<eth id="testiface" label="testnet">
<params>
<param name="driver" value="{$driver}"/>
</params>
<addresses>
<address>{$net}.10/24</address>
</addresses>
</eth>
</interfaces>
</host>
<host id="machine2">
<interfaces>
<eth id="testiface" label="testnet">
<params>
<param name="driver" value="{$driver}"/>
</params>
<addresses>
<address>{$net}.11/24</address>
</addresses>
</eth>
</interfaces>
</host>
</network>
<task python="short_lived_connections.py"/>
+</lnstrecipe>
2.9.4 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org
lnst-developers@lists.fedorahosted.org