[PATCH lnst 00/10] switchdev: various fixes & IPv6
by Yuval Mintz
As part of cleanup we're doing, we've discovered we have several lnst
patches in our internal development tree that were not sent upstream yet.
Some of the content here relate to recent changes in user APIs -
- #1, #2 from Arkadi relate to user indicationes of whether an FDB
entry was externally learned
- #9 from Ido relate to nexthop flags change
- #10 from Yotam handle mirroring changes - support clsact qdisc
instead of ingress qdisc [in mlxsw].
The rest of the patches in this series are from Ido, and mostly revolve
around adding/improving IPv6 support.
Arkadi Sharshevsky (2):
BridgeTool: Update FDB parsing for new externally learned attribute
recipes: switchdev: Change FDB check routine and bridge tests
Ido Schimmel (7):
recipes: switchdev: Fix incorrect ping6 parameter
recipes: switchdev: Enable multicast querier on bridge
InterfaceManager: Don't flush IPv6 link-local addresses
recipes: switchdev: Redue ECMP to two links
recipes: switchdev: Execute recipe only for IPv4
recipes: switchdev: Add IPv6 support to existing L3 recipes
HostAPI: Parse nexthop flags in route dump
Yotam Gigi (1):
recipes: switchdev: span: Move to use the clsact qdisc
lnst/Slave/BridgeTool.py | 4 +-
lnst/Slave/InterfaceManager.py | 2 +-
lnst/Slave/NetTestSlave.py | 4 +-
recipes/switchdev/TestLib.py | 25 +++--
recipes/switchdev/ecmp_common.py | 10 ++
recipes/switchdev/l2-000-minimal.py | 3 +-
recipes/switchdev/l2-001-bridge.py | 3 +-
recipes/switchdev/l2-002-bridge_fdb.py | 101 ++++++---------------
recipes/switchdev/l2-003-bridge_stp.py | 25 ++---
recipes/switchdev/l2-004-bridge_bond.py | 3 +-
recipes/switchdev/l2-005-bridge_bond_failover.py | 3 +-
recipes/switchdev/l2-006-bridge_team.py | 3 +-
recipes/switchdev/l2-007-bridge_team_failover.py | 3 +-
recipes/switchdev/l2-008-bridge_vlan1q_sanity.py | 2 +-
recipes/switchdev/l2-009-bridge_vlan1q.py | 2 +-
recipes/switchdev/l2-010-bridge_vlan1d_sanity.py | 8 +-
recipes/switchdev/l2-011-bridge_vlan1d.py | 8 +-
.../switchdev/l2-012-bridge_bond_vlan1d_sanity.py | 8 +-
recipes/switchdev/l2-013-bridge_bond_vlan1d.py | 8 +-
.../switchdev/l2-014-bridge_team_vlan1d_sanity.py | 8 +-
recipes/switchdev/l2-015-bridge_team_vlan1d.py | 8 +-
recipes/switchdev/l2-017-bridge_fdb_vlan1d.py | 100 ++++++--------------
recipes/switchdev/l2-018-bridge_fdb_team.py | 100 ++++++--------------
recipes/switchdev/l2-019-bridge_fdb_team_vlan1d.py | 100 ++++++--------------
recipes/switchdev/l2-021-span.py | 15 ++-
recipes/switchdev/l3-000-minimal.py | 6 ++
recipes/switchdev/l3-001-router-port.py | 6 ++
recipes/switchdev/l3-002-vlan-interface.py | 6 ++
recipes/switchdev/l3-003-bond-interface.py | 6 ++
recipes/switchdev/l3-004-team-interface.py | 6 ++
recipes/switchdev/l3-005-ecmp-basic.py | 23 +----
recipes/switchdev/l3-005-ecmp-basic.xml | 4 -
recipes/switchdev/l3-006-ecmp-linkdown.py | 13 +--
recipes/switchdev/l3-006-ecmp-linkdown.xml | 7 +-
recipes/switchdev/l3-007-ecmp-maxsize.py | 29 ++----
recipes/switchdev/l3-007-ecmp-maxsize.xml | 4 -
recipes/switchdev/l3-008-routes_stress.py | 2 +-
recipes/switchdev/l3-009-1q_bridge_rif.py | 7 ++
recipes/switchdev/l3-010-1d_bridge_rif.py | 7 ++
recipes/switchdev/tc-001-action-vlan-modify.py | 3 +-
recipes/switchdev/tc-002-flower-vlan.py | 3 +-
41 files changed, 280 insertions(+), 408 deletions(-)
--
2.4.3
6 years
[PATCH] lnst.Common.Path: fix file path check
by Kamil Jerabek
Removing unnecessary path checks.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
lnst/Common/Path.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/lnst/Common/Path.py b/lnst/Common/Path.py
index fe7f709..ebbbcf8 100644
--- a/lnst/Common/Path.py
+++ b/lnst/Common/Path.py
@@ -20,10 +20,7 @@ def get_path_class(root, path):
if path.startswith('http'):
return HttpPath(root, path)
else:
- if os.access(path, os.R_OK):
- return FilePath(None, os.path.realpath(path))
- else:
- raise Exception("Path does not exist \"%s\"!" % path)
+ return FilePath(None, os.path.realpath(path))
if root.startswith('http'):
return HttpPath(root, path)
--
2.5.5
6 years, 1 month
[PATCH] RecipeCommon PerfRepo: move, modify perfrepo classes
by Kamil Jerabek
Moving all perfrepo related classes into RecipeCommon/PerfRepo directory.
Classes are modified so that it is usable within new LNST version.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
lnst/Controller/PerfRepoMapping.py | 74 -----
lnst/Controller/PerfRepoUtils.py | 72 -----
lnst/Controller/Task.py | 277 -------------------
lnst/RecipeCommon/PerfRepo.py | 44 ---
lnst/RecipeCommon/PerfRepo/PerfRepo.py | 373 ++++++++++++++++++++++++++
lnst/RecipeCommon/PerfRepo/PerfRepoMapping.py | 74 +++++
lnst/RecipeCommon/PerfRepo/PerfRepoUtils.py | 72 +++++
lnst/RecipeCommon/PerfRepo/__init__.py | 0
8 files changed, 519 insertions(+), 467 deletions(-)
delete mode 100644 lnst/Controller/PerfRepoMapping.py
delete mode 100644 lnst/Controller/PerfRepoUtils.py
delete mode 100644 lnst/RecipeCommon/PerfRepo.py
create mode 100644 lnst/RecipeCommon/PerfRepo/PerfRepo.py
create mode 100644 lnst/RecipeCommon/PerfRepo/PerfRepoMapping.py
create mode 100644 lnst/RecipeCommon/PerfRepo/PerfRepoUtils.py
create mode 100644 lnst/RecipeCommon/PerfRepo/__init__.py
diff --git a/lnst/Controller/PerfRepoMapping.py b/lnst/Controller/PerfRepoMapping.py
deleted file mode 100644
index 2cec2d8..0000000
--- a/lnst/Controller/PerfRepoMapping.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-This module defines the PerfRepoMapping class that is used as an interface to
-mapping files that map recipe keys to IDs in a PerfRepo instance.
-
-Copyright 2015 Red Hat, Inc.
-Licensed under the GNU General Public License, version 2 as
-published by the Free Software Foundation; see COPYING for details.
-"""
-
-__author__ = """
-olichtne(a)redhat.com (Ondrej Lichtner)
-"""
-
-import re
-import logging
-import pprint
-from lnst.Common.Path import Path
-
-class PerfRepoMapping(object):
- def __init__(self, filepath):
- if not isinstance(filepath, Path):
- filepath = Path(None, filepath)
-
- self._filepath = filepath.resolve()
- self._mapping = {}
- self.load_mapping_file(self._filepath)
-
- def load_mapping_file(self, filename):
- line_re = re.compile(r"^(\w+)\s*=\s*(\w+)\s*$")
- res_dict = {}
-
- lines = []
- try:
- with open(filename) as f:
- lines = f.readlines()
- except:
- self._mapping = {}
- raise
-
- lines = self._preprocess_lines(lines)
-
- for line in lines:
- match = line_re.match(line)
- if match is not None and len(match.groups()) == 2:
- res_dict[match.group(1)] = match.group(2)
- else:
- logging.warn("Skipping mapping line, invalid format:\n%s" %line)
- self._mapping = res_dict
-
- def _preprocess_lines(self, lines):
- comment_re = re.compile(r"^(.*?)#.*$")
- result_lines = []
-
- for line in lines:
- line = line.strip()
- match = comment_re.match(line)
- if match and len(match.groups()) == 1:
- line = match.group(1)
- line = line.strip()
- if line != "":
- result_lines.append(line)
- return result_lines
-
- def get_id(self, key):
- try:
- return self._mapping[key]
- except (KeyError, TypeError):
- return None
-
- def __str__(self):
- if self._mapping is None:
- return ""
- else:
- return pprint.pformat(self._mapping)
diff --git a/lnst/Controller/PerfRepoUtils.py b/lnst/Controller/PerfRepoUtils.py
deleted file mode 100644
index 4c545e4..0000000
--- a/lnst/Controller/PerfRepoUtils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""
-This module contains helper functions useful when writing recipes
-that use PerfRepo.
-
-Copyright 2015 Red Hat, Inc.
-Licensed under the GNU General Public License, version 2 as
-published by the Free Software Foundation; see COPYING for details.
-"""
-
-__author__ = """
-olichtne(a)redhat.com (Ondrej Lichtner)
-"""
-
-import logging
-from lnst.Common.Utils import Noop
-
-def netperf_baseline_template(module, baseline):
- module.unset_option('threshold')
- module.unset_option('threshold_deviation')
-
- if baseline.get_texec() is None:
- return module
-
- try:
- throughput = baseline.get_value('throughput')
- deviation = baseline.get_value('throughput_deviation')
- except:
- logging.error("Invalid baseline TestExecution passed.")
- return module
-
- logging.debug("Setting Netperf threshold.")
- if throughput is not None and deviation is not None:
- module.update_options({'threshold': '%s bits/sec' % throughput,
- 'threshold_deviation': '%s bits/sec' % deviation})
- return module
-
-def perfrepo_baseline_to_dict(baseline):
- if baseline.get_texec() is None:
- return {}
-
- try:
- throughput = baseline.get_value('throughput')
- deviation = baseline.get_value('throughput_deviation')
- except:
- logging.error("Invalid baseline TestExecution passed.")
- return {}
-
- if throughput is not None and deviation is not None:
- return {'threshold': '%s bits/sec' % throughput,
- 'threshold_deviation': '%s bits/sec' % deviation}
- return {}
-
-def netperf_result_template(perfrepo_result, netperf_result):
- if isinstance(perfrepo_result, Noop):
- return perfrepo_result
-
- try:
- result = netperf_result.get_result()
- res_data = result['res_data']
- rate = res_data['rate']
- deviation = res_data['rate_deviation']
- except:
- logging.error("Netperf didn't return usable result data.")
- return perfrepo_result
-
- logging.debug("Adding Netperf results to PerfRepo object.")
- perfrepo_result.add_value('throughput', rate)
- perfrepo_result.add_value('throughput_min', rate - deviation)
- perfrepo_result.add_value('throughput_max', rate + deviation)
- perfrepo_result.add_value('throughput_deviation', deviation)
-
- return perfrepo_result
diff --git a/lnst/Controller/Task.py b/lnst/Controller/Task.py
index 5c288cc..fdb1734 100644
--- a/lnst/Controller/Task.py
+++ b/lnst/Controller/Task.py
@@ -822,280 +822,3 @@ class VolatileValue(object):
def __str__(self):
return str(self.get_val())
-class PerfRepoAPI(object):
- def __init__(self):
- self._rest_api = None
- self._mapping = None
-
- def load_mapping(self, file_path):
- try:
- self._mapping = PerfRepoMapping(file_path.resolve())
- except:
- logging.error("Failed to load PerfRepo mapping file '%s'" %\
- file_path.abs_path())
- self._mapping = None
-
- def get_mapping(self):
- return self._mapping
-
- def connected(self):
- if self._rest_api is not None and self._rest_api.connected() and\
- self._mapping is not None:
- return True
- else:
- return False
-
- def connect(self, url, username, password):
- if PerfRepoRESTAPI is not None:
- self._rest_api = PerfRepoRESTAPI(url, username, password)
- if not self._rest_api.connected():
- self._rest_api = None
- else:
- self._rest_api = None
-
- def new_result(self, mapping_key, name, hash_ignore=[]):
- if not self.connected():
- return Noop()
-
- mapping_id = self._mapping.get_id(mapping_key)
- if mapping_id is None:
- logging.debug("Test key '%s' has no mapping defined!" % mapping_key)
- return Noop()
-
- logging.debug("Test key '%s' mapped to id '%s'" % (mapping_key,
- mapping_id))
-
- test = self._rest_api.test_get_by_id(mapping_id, log=False)
- if test is None:
- test = self._rest_api.test_get_by_uid(mapping_id, log=False)
-
- if test is not None:
- test_url = self._rest_api.get_obj_url(test)
- logging.debug("Found Test with id='%s' and uid='%s'! %s" % \
- (test.get_id(), test.get_uid(), test_url))
- else:
- logging.debug("No Test with id or uid '%s' found!" % mapping_id)
- return Noop()
-
- logging.info("Creating a new result object for PerfRepo")
- result = PerfRepoResult(test, name, hash_ignore)
- return result
-
- def save_result(self, result):
- if isinstance(result, Noop):
- return
- elif not self.connected():
- raise TaskError("Not connected to PerfRepo.")
- elif isinstance(result, PerfRepoResult):
- if len(result.get_testExecution().get_values()) < 1:
- logging.debug("PerfRepoResult with no result data, skipping "\
- "send to PerfRepo.")
- return
- h = result.generate_hash()
- logging.debug("Adding hash '%s' as tag to result." % h)
- result.add_tag(h)
- logging.info("Sending TestExecution to PerfRepo.")
- self._rest_api.testExecution_create(result.get_testExecution())
-
- report_id = self._mapping.get_id(h)
- if not report_id and result.get_testExecution().get_id() != None:
- logging.debug("No mapping defined for hash '%s'" % h)
- logging.debug("If you want to create a new report and set "\
- "this result as the baseline run this command:")
- cmd = "perfrepo report create"
- cmd += " name REPORTNAME"
-
- test = result.get_test()
- cmd += " chart CHARTNAME"
- cmd += " testid %s" % test.get_id()
- series_num = 0
- for m in test.get_metrics():
- cmd += " series NAME%d" % series_num
- cmd += " metric %s" % m.get_id()
- cmd += " tags %s" % h
- series_num += 1
- cmd += " baseline BASELINENAME"
- cmd += " execid %s" % result.get_testExecution().get_id()
- cmd += " metric %s" % test.get_metrics()[0].get_id()
- logging.debug(cmd)
- else:
- raise TaskError("Parameter result must be an instance "\
- "of PerfRepoResult")
-
- def get_baseline(self, report_id):
- if report_id is None or not self.connected():
- return Noop()
-
- report = self._rest_api.report_get_by_id(report_id, log=False)
- if report is None:
- logging.debug("No report with id %s found!" % report_id)
- return Noop()
- logging.debug("Report found: %s" %\
- self._rest_api.get_obj_url(report))
-
- baseline = report.get_baseline()
-
- if baseline is None:
- logging.debug("No baseline set for report %s" %\
- self._rest_api.get_obj_url(report))
- return Noop()
-
- baseline_exec_id = baseline["execId"]
- baseline_testExec = self._rest_api.testExecution_get(baseline_exec_id,
- log=False)
-
- logging.debug("TestExecution of baseline: %s" %\
- self._rest_api.get_obj_url(baseline_testExec))
- return PerfRepoBaseline(baseline_testExec)
-
- def get_baseline_of_result(self, result):
- if not isinstance(result, PerfRepoResult) or not self.connected():
- return Noop()
-
- res_hash = result.generate_hash()
- logging.debug("Result hash is: '%s'" % res_hash)
-
- report_id = self._mapping.get_id(res_hash)
- if report_id is not None:
- logging.debug("Hash '%s' maps to report id '%s'" % (res_hash,
- report_id))
- else:
- logging.debug("Hash '%s' has no mapping defined!" % res_hash)
- return Noop()
-
- baseline = self.get_baseline(report_id)
-
- if baseline.get_texec() is None:
- logging.debug("No baseline set for results with hash %s" % res_hash)
- return baseline
-
- def compare_to_baseline(self, result, report_id, metric_name):
- if not self.connected():
- return False
- baseline_testExec = self.get_baseline(report_id)
- result_testExec = result.get_testExecution()
-
- return self.compare_testExecutions(result_testExec,
- baseline_testExec,
- metric_name)
-
- def compare_testExecutions(self, first, second, metric_name):
- first_value = first.get_value(metric_name)
- first_min = first.get_value(metric_name + "_min")
- first_max = first.get_value(metric_name + "_max")
-
- second_value = second.get_value(metric_name)
- second_min = second.get_value(metric_name + "_min")
- second_max = second.get_value(metric_name + "_max")
-
- comp = second_value.get_comparator()
- if comp == "HB":
- if second_min.get_result() > first_max.get_result():
- return False
- return True
- elif comp == "LB":
- if first_min.get_result() > second_max.get_result():
- return False
- return True
- else:
- return False
- return False
-
-class PerfRepoResult(object):
- def __init__(self, test, name, hash_ignore=[]):
- self._test = test
- self._testExecution = PerfRepoTestExecution()
- self._testExecution.set_testId(test.get_id())
- self._testExecution.set_testUid(test.get_uid())
- self._testExecution.set_name(name)
- self.set_configuration(ctl.get_configuration())
- self._hash_ignore = hash_ignore
-
- def add_value(self, val_name, value):
- perf_value = PerfRepoValue()
- perf_value.set_metricName(val_name)
- perf_value.set_result(value)
-
- self._testExecution.add_value(perf_value)
-
- def set_configuration(self, configuration=None):
- if configuration is None:
- configuration = ctl.get_configuration()
- for pair in dict_to_dot(configuration, "configuration."):
- self._testExecution.add_parameter(pair[0], pair[1])
-
- def set_mapping(self, mapping=None):
- if mapping is None:
- mapping = ctl.get_mapping()
- for pair in list_to_dot(mapping, "mapping.", "machine"):
- self._testExecution.add_parameter(pair[0], pair[1])
-
- def set_tag(self, tag):
- self._testExecution.add_tag(tag)
-
- def add_tag(self, tag):
- self.set_tag(tag)
-
- def set_tags(self, tags):
- for tag in tags:
- self.set_tag(tag)
-
- def add_tags(self, tags):
- self.set_tags(tags)
-
- def set_parameter(self, name, value):
- self._testExecution.add_parameter(name, value)
-
- def set_parameters(self, params):
- for name, value in params:
- self.set_parameter(name, value)
-
- def set_hash_ignore(self, hash_ignore):
- self._hash_ignore = hash_ignore
-
- def set_comment(self, comment):
- if comment:
- self._testExecution.set_comment(comment)
-
- def get_hash_ignore(self):
- return self._hash_ignore
-
- def get_testExecution(self):
- return self._testExecution
-
- def get_test(self):
- return self._test
-
- def generate_hash(self, ignore=[]):
- ignore.extend(self._hash_ignore)
- tags = self._testExecution.get_tags()
- params = self._testExecution.get_parameters()
-
- sha1 = hashlib.sha1()
- sha1.update(self._testExecution.get_testUid())
- for i in sorted(tags):
- sha1.update(i)
- for i in sorted(params, key=lambda x: x[0]):
- skip = False
- for j in ignore:
- if re.search(j, i[0]):
- skip = True
- break
- if skip:
- continue
- sha1.update(i[0])
- sha1.update(str(i[1]))
- return sha1.hexdigest()
-
-class PerfRepoBaseline(object):
- def __init__(self, texec):
- self._texec = texec
-
- def get_value(self, name):
- if self._texec is None:
- return None
- perfrepovalue = self._texec.get_value(name)
- return perfrepovalue.get_result()
-
- def get_texec(self):
- return self._texec
diff --git a/lnst/RecipeCommon/PerfRepo.py b/lnst/RecipeCommon/PerfRepo.py
deleted file mode 100644
index 432dad8..0000000
--- a/lnst/RecipeCommon/PerfRepo.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-This module defines helper functions for interacting with PerfRepo
-that can be imported directly into LNST Python tasks.
-
-Copyright 2016 Red Hat, Inc.
-Licensed under the GNU General Public License, version 2 as
-published by the Free Software Foundation; see COPYING for details.
-"""
-
-__author__ = """
-jtluka(a)redhat.com (Jan Tluka)
-"""
-
-import os
-
-def generate_perfrepo_comment(hosts=[], user_comment=None):
- """ Prepare the PerfRepo comment.
-
- By default it will include kernel versions used on the hosts and
- Beaker job url.
-
- Keyword arguments:
- hosts -- list of HostAPI objects
- user_comment -- additional user specified comment
- """
-
- comment = ""
-
- for host in hosts:
- host_cfg = host.get_configuration()
- comment += "Kernel (%s): %s<BR>" % \
- (host_cfg['id'], host_cfg['kernel_release'])
-
- # if we're running in Beaker environment, include job url
- if 'BEAKER' in os.environ and 'JOBID' in os.environ:
- bkr_server = os.environ['BEAKER']
- bkr_jobid = os.environ['JOBID']
- bkr_job_url = bkr_server + 'jobs/' + bkr_jobid
- comment += "Beaker job: %s<BR>" % bkr_job_url
-
- if user_comment:
- comment += user_comment
-
- return comment
diff --git a/lnst/RecipeCommon/PerfRepo/PerfRepo.py b/lnst/RecipeCommon/PerfRepo/PerfRepo.py
new file mode 100644
index 0000000..43f9660
--- /dev/null
+++ b/lnst/RecipeCommon/PerfRepo/PerfRepo.py
@@ -0,0 +1,373 @@
+"""
+This module defines helper functions for interacting with PerfRepo
+that can be imported directly into LNST Python tasks.
+
+Copyright 2016 Red Hat, Inc.
+Licensed under the GNU General Public License, version 2 as
+published by the Free Software Foundation; see COPYING for details.
+"""
+
+__author__ = """
+jtluka(a)redhat.com (Jan Tluka)
+"""
+
+import os
+import re
+import logging
+
+from lnst.Common.Path import Path
+from lnst.RecipeCommon.PerfRepo.PerfRepoMapping import PerfRepoMapping
+
+try:
+ from perfrepo import PerfRepoRESTAPI
+ from perfrepo import PerfRepoTestExecution
+ from perfrepo import PerfRepoValue
+except:
+ PerfRepoRESTAPI = None
+ PerfRepoTestExecution = None
+ PerfRepoValue = None
+
+def generate_perfrepo_comment(hosts=[], user_comment=None):
+ """ Prepare the PerfRepo comment.
+
+ By default it will include kernel versions used on the hosts and
+ Beaker job url.
+
+ Keyword arguments:
+ hosts -- list of HostAPI objects
+ user_comment -- additional user specified comment
+ """
+
+ comment = ""
+
+ for host in hosts:
+ host_cfg = host.get_configuration()
+ comment += "Kernel (%s): %s<BR>" % \
+ (host_cfg['id'], host_cfg['kernel_release'])
+
+ # if we're running in Beaker environment, include job url
+ if 'BEAKER' in os.environ and 'JOBID' in os.environ:
+ bkr_server = os.environ['BEAKER']
+ bkr_jobid = os.environ['JOBID']
+ bkr_job_url = bkr_server + 'jobs/' + bkr_jobid
+ comment += "Beaker job: %s<BR>" % bkr_job_url
+
+ if user_comment:
+ comment += user_comment
+
+ return comment
+
+
+
+
+
+class PerfRepoAPI(object):
+ def __init__(self, mapping_file_path, url=None, username=None, password=None):
+ self._mapping_file_path = mapping_file_path
+ self._url = url
+ self._username = username
+ self._password = password
+
+ self._rest_api = None
+ self._mapping = None
+
+ def connect_PerfRepo(self):
+ if not self.connected():
+ #TODO: store credentials in config or not
+ #if self._url is None:
+ # self._url = lnst_config.get_option("perfrepo", "url")
+ #if self._username is None:
+ # self._username = lnst_config.get_option("perfrepo", "username")
+ #if self._password is None:
+ # self._password = lnst_config.get_option("perfrepo", "password")
+
+ if not self._url:
+ logging.warn("No PerfRepo URL specified in config file")
+ if not self._username:
+ logging.warn("No PerfRepo username specified in config file")
+ if not self._password:
+ logging.warn("No PerfRepo password specified in config file")
+ if self._url and self._username and self._password:
+ self.connect(self._url, self._username, self._password)
+
+ path = Path(None, self._mapping_file_path)
+ self.load_mapping(path)
+
+ if not self.connected():
+ if PerfRepoRESTAPI is None:
+ logging.warn("Python PerfRepo library not found.")
+ logging.warn("Connection to PerfRepo incomplete, further "\
+ "PerfRepo commands will be ignored.")
+
+ def load_mapping(self, file_path):
+ try:
+ self._mapping = PerfRepoMapping(file_path.resolve())
+ except:
+ logging.error("Failed to load PerfRepo mapping file '%s'" %\
+ file_path.abs_path())
+ self._mapping = None
+
+ def get_mapping(self):
+ return self._mapping
+
+ def connected(self):
+ if self._rest_api is not None and self._rest_api.connected() and\
+ self._mapping is not None:
+ return True
+ else:
+ return False
+
+ def connect(self, url, username, password):
+ if PerfRepoRESTAPI is not None:
+ self._rest_api = PerfRepoRESTAPI(url, username, password)
+ if not self._rest_api.connected():
+ self._rest_api = None
+ else:
+ self._rest_api = None
+
+ def new_result(self, mapping_key, name, hash_ignore=[]):
+ if not self.connected():
+ return Noop()
+
+ mapping_id = self._mapping.get_id(mapping_key)
+ if mapping_id is None:
+ logging.debug("Test key '%s' has no mapping defined!" % mapping_key)
+ return Noop()
+
+ logging.debug("Test key '%s' mapped to id '%s'" % (mapping_key,
+ mapping_id))
+
+ test = self._rest_api.test_get_by_id(mapping_id, log=False)
+ if test is None:
+ test = self._rest_api.test_get_by_uid(mapping_id, log=False)
+
+ if test is not None:
+ test_url = self._rest_api.get_obj_url(test)
+ logging.debug("Found Test with id='%s' and uid='%s'! %s" % \
+ (test.get_id(), test.get_uid(), test_url))
+ else:
+ logging.debug("No Test with id or uid '%s' found!" % mapping_id)
+ return Noop()
+
+ logging.info("Creating a new result object for PerfRepo")
+ result = PerfRepoResult(test, name, hash_ignore)
+ return result
+
+ def save_result(self, result):
+ if isinstance(result, Noop):
+ return
+ elif not self.connected():
+ raise TaskError("Not connected to PerfRepo.")
+ elif isinstance(result, PerfRepoResult):
+ if len(result.get_testExecution().get_values()) < 1:
+ logging.debug("PerfRepoResult with no result data, skipping "\
+ "send to PerfRepo.")
+ return
+ h = result.generate_hash()
+ logging.debug("Adding hash '%s' as tag to result." % h)
+ result.add_tag(h)
+ logging.info("Sending TestExecution to PerfRepo.")
+ self._rest_api.testExecution_create(result.get_testExecution())
+
+ report_id = self._mapping.get_id(h)
+ if not report_id and result.get_testExecution().get_id() != None:
+ logging.debug("No mapping defined for hash '%s'" % h)
+ logging.debug("If you want to create a new report and set "\
+ "this result as the baseline run this command:")
+ cmd = "perfrepo report create"
+ cmd += " name REPORTNAME"
+
+ test = result.get_test()
+ cmd += " chart CHARTNAME"
+ cmd += " testid %s" % test.get_id()
+ series_num = 0
+ for m in test.get_metrics():
+ cmd += " series NAME%d" % series_num
+ cmd += " metric %s" % m.get_id()
+ cmd += " tags %s" % h
+ series_num += 1
+ cmd += " baseline BASELINENAME"
+ cmd += " execid %s" % result.get_testExecution().get_id()
+ cmd += " metric %s" % test.get_metrics()[0].get_id()
+ logging.debug(cmd)
+ else:
+ raise TaskError("Parameter result must be an instance "\
+ "of PerfRepoResult")
+
+ def get_baseline(self, report_id):
+ if report_id is None or not self.connected():
+ return Noop()
+
+ report = self._rest_api.report_get_by_id(report_id, log=False)
+ if report is None:
+ logging.debug("No report with id %s found!" % report_id)
+ return Noop()
+ logging.debug("Report found: %s" %\
+ self._rest_api.get_obj_url(report))
+
+ baseline = report.get_baseline()
+
+ if baseline is None:
+ logging.debug("No baseline set for report %s" %\
+ self._rest_api.get_obj_url(report))
+ return Noop()
+
+ baseline_exec_id = baseline["execId"]
+ baseline_testExec = self._rest_api.testExecution_get(baseline_exec_id,
+ log=False)
+
+ logging.debug("TestExecution of baseline: %s" %\
+ self._rest_api.get_obj_url(baseline_testExec))
+ return PerfRepoBaseline(baseline_testExec)
+
+ def get_baseline_of_result(self, result):
+ if not isinstance(result, PerfRepoResult) or not self.connected():
+ return Noop()
+
+ res_hash = result.generate_hash()
+ logging.debug("Result hash is: '%s'" % res_hash)
+
+ report_id = self._mapping.get_id(res_hash)
+ if report_id is not None:
+ logging.debug("Hash '%s' maps to report id '%s'" % (res_hash,
+ report_id))
+ else:
+ logging.debug("Hash '%s' has no mapping defined!" % res_hash)
+ return Noop()
+
+ baseline = self.get_baseline(report_id)
+
+ if baseline.get_texec() is None:
+ logging.debug("No baseline set for results with hash %s" % res_hash)
+ return baseline
+
+ def compare_to_baseline(self, result, report_id, metric_name):
+ if not self.connected():
+ return False
+ baseline_testExec = self.get_baseline(report_id)
+ result_testExec = result.get_testExecution()
+
+ return self.compare_testExecutions(result_testExec,
+ baseline_testExec,
+ metric_name)
+
+ def compare_testExecutions(self, first, second, metric_name):
+ first_value = first.get_value(metric_name)
+ first_min = first.get_value(metric_name + "_min")
+ first_max = first.get_value(metric_name + "_max")
+
+ second_value = second.get_value(metric_name)
+ second_min = second.get_value(metric_name + "_min")
+ second_max = second.get_value(metric_name + "_max")
+
+ comp = second_value.get_comparator()
+ if comp == "HB":
+ if second_min.get_result() > first_max.get_result():
+ return False
+ return True
+ elif comp == "LB":
+ if first_min.get_result() > second_max.get_result():
+ return False
+ return True
+ else:
+ return False
+ return False
+
+class PerfRepoResult(object):
+ def __init__(self, test, name, hash_ignore=[]):
+ self._test = test
+ self._testExecution = PerfRepoTestExecution()
+ self._testExecution.set_testId(test.get_id())
+ self._testExecution.set_testUid(test.get_uid())
+ self._testExecution.set_name(name)
+ self.set_configuration(ctl.get_configuration())
+ self._hash_ignore = hash_ignore
+
+ def add_value(self, val_name, value):
+ perf_value = PerfRepoValue()
+ perf_value.set_metricName(val_name)
+ perf_value.set_result(value)
+
+ self._testExecution.add_value(perf_value)
+
+ def set_configuration(self, configuration=None):
+ if configuration is None:
+ configuration = ctl.get_configuration()
+ for pair in dict_to_dot(configuration, "configuration."):
+ self._testExecution.add_parameter(pair[0], pair[1])
+
+ def set_mapping(self, mapping=None):
+ if mapping is None:
+ mapping = ctl.get_mapping()
+ for pair in list_to_dot(mapping, "mapping.", "machine"):
+ self._testExecution.add_parameter(pair[0], pair[1])
+
+ def set_tag(self, tag):
+ self._testExecution.add_tag(tag)
+
+ def add_tag(self, tag):
+ self.set_tag(tag)
+
+ def set_tags(self, tags):
+ for tag in tags:
+ self.set_tag(tag)
+
+ def add_tags(self, tags):
+ self.set_tags(tags)
+
+ def set_parameter(self, name, value):
+ self._testExecution.add_parameter(name, value)
+
+ def set_parameters(self, params):
+ for name, value in params:
+ self.set_parameter(name, value)
+
+ def set_hash_ignore(self, hash_ignore):
+ self._hash_ignore = hash_ignore
+
+ def set_comment(self, comment):
+ if comment:
+ self._testExecution.set_comment(comment)
+
+ def get_hash_ignore(self):
+ return self._hash_ignore
+
+ def get_testExecution(self):
+ return self._testExecution
+
+ def get_test(self):
+ return self._test
+
+ def generate_hash(self, ignore=[]):
+ ignore.extend(self._hash_ignore)
+ tags = self._testExecution.get_tags()
+ params = self._testExecution.get_parameters()
+
+ sha1 = hashlib.sha1()
+ sha1.update(self._testExecution.get_testUid())
+ for i in sorted(tags):
+ sha1.update(i)
+ for i in sorted(params, key=lambda x: x[0]):
+ skip = False
+ for j in ignore:
+ if re.search(j, i[0]):
+ skip = True
+ break
+ if skip:
+ continue
+ sha1.update(i[0])
+ sha1.update(str(i[1]))
+ return sha1.hexdigest()
+
+class PerfRepoBaseline(object):
+ def __init__(self, texec):
+ self._texec = texec
+
+ def get_value(self, name):
+ if self._texec is None:
+ return None
+ perfrepovalue = self._texec.get_value(name)
+ return perfrepovalue.get_result()
+
+ def get_texec(self):
+ return self._texec
diff --git a/lnst/RecipeCommon/PerfRepo/PerfRepoMapping.py b/lnst/RecipeCommon/PerfRepo/PerfRepoMapping.py
new file mode 100644
index 0000000..2cec2d8
--- /dev/null
+++ b/lnst/RecipeCommon/PerfRepo/PerfRepoMapping.py
@@ -0,0 +1,74 @@
+"""
+This module defines the PerfRepoMapping class that is used as an interface to
+mapping files that map recipe keys to IDs in a PerfRepo instance.
+
+Copyright 2015 Red Hat, Inc.
+Licensed under the GNU General Public License, version 2 as
+published by the Free Software Foundation; see COPYING for details.
+"""
+
+__author__ = """
+olichtne(a)redhat.com (Ondrej Lichtner)
+"""
+
+import re
+import logging
+import pprint
+from lnst.Common.Path import Path
+
+class PerfRepoMapping(object):
+ def __init__(self, filepath):
+ if not isinstance(filepath, Path):
+ filepath = Path(None, filepath)
+
+ self._filepath = filepath.resolve()
+ self._mapping = {}
+ self.load_mapping_file(self._filepath)
+
+ def load_mapping_file(self, filename):
+ line_re = re.compile(r"^(\w+)\s*=\s*(\w+)\s*$")
+ res_dict = {}
+
+ lines = []
+ try:
+ with open(filename) as f:
+ lines = f.readlines()
+ except:
+ self._mapping = {}
+ raise
+
+ lines = self._preprocess_lines(lines)
+
+ for line in lines:
+ match = line_re.match(line)
+ if match is not None and len(match.groups()) == 2:
+ res_dict[match.group(1)] = match.group(2)
+ else:
+ logging.warn("Skipping mapping line, invalid format:\n%s" %line)
+ self._mapping = res_dict
+
+ def _preprocess_lines(self, lines):
+ comment_re = re.compile(r"^(.*?)#.*$")
+ result_lines = []
+
+ for line in lines:
+ line = line.strip()
+ match = comment_re.match(line)
+ if match and len(match.groups()) == 1:
+ line = match.group(1)
+ line = line.strip()
+ if line != "":
+ result_lines.append(line)
+ return result_lines
+
+ def get_id(self, key):
+ try:
+ return self._mapping[key]
+ except (KeyError, TypeError):
+ return None
+
+ def __str__(self):
+ if self._mapping is None:
+ return ""
+ else:
+ return pprint.pformat(self._mapping)
diff --git a/lnst/RecipeCommon/PerfRepo/PerfRepoUtils.py b/lnst/RecipeCommon/PerfRepo/PerfRepoUtils.py
new file mode 100644
index 0000000..4c545e4
--- /dev/null
+++ b/lnst/RecipeCommon/PerfRepo/PerfRepoUtils.py
@@ -0,0 +1,72 @@
+"""
+This module contains helper functions useful when writing recipes
+that use PerfRepo.
+
+Copyright 2015 Red Hat, Inc.
+Licensed under the GNU General Public License, version 2 as
+published by the Free Software Foundation; see COPYING for details.
+"""
+
+__author__ = """
+olichtne(a)redhat.com (Ondrej Lichtner)
+"""
+
+import logging
+from lnst.Common.Utils import Noop
+
+def netperf_baseline_template(module, baseline):
+ module.unset_option('threshold')
+ module.unset_option('threshold_deviation')
+
+ if baseline.get_texec() is None:
+ return module
+
+ try:
+ throughput = baseline.get_value('throughput')
+ deviation = baseline.get_value('throughput_deviation')
+ except:
+ logging.error("Invalid baseline TestExecution passed.")
+ return module
+
+ logging.debug("Setting Netperf threshold.")
+ if throughput is not None and deviation is not None:
+ module.update_options({'threshold': '%s bits/sec' % throughput,
+ 'threshold_deviation': '%s bits/sec' % deviation})
+ return module
+
+def perfrepo_baseline_to_dict(baseline):
+ if baseline.get_texec() is None:
+ return {}
+
+ try:
+ throughput = baseline.get_value('throughput')
+ deviation = baseline.get_value('throughput_deviation')
+ except:
+ logging.error("Invalid baseline TestExecution passed.")
+ return {}
+
+ if throughput is not None and deviation is not None:
+ return {'threshold': '%s bits/sec' % throughput,
+ 'threshold_deviation': '%s bits/sec' % deviation}
+ return {}
+
+def netperf_result_template(perfrepo_result, netperf_result):
+ if isinstance(perfrepo_result, Noop):
+ return perfrepo_result
+
+ try:
+ result = netperf_result.get_result()
+ res_data = result['res_data']
+ rate = res_data['rate']
+ deviation = res_data['rate_deviation']
+ except:
+ logging.error("Netperf didn't return usable result data.")
+ return perfrepo_result
+
+ logging.debug("Adding Netperf results to PerfRepo object.")
+ perfrepo_result.add_value('throughput', rate)
+ perfrepo_result.add_value('throughput_min', rate - deviation)
+ perfrepo_result.add_value('throughput_max', rate + deviation)
+ perfrepo_result.add_value('throughput_deviation', deviation)
+
+ return perfrepo_result
diff --git a/lnst/RecipeCommon/PerfRepo/__init__.py b/lnst/RecipeCommon/PerfRepo/__init__.py
new file mode 100644
index 0000000..e69de29
--
2.5.5
6 years, 1 month
[PATCH] switchdev: recipes: Add non-equal-cost multi-path recipe
by Ido Schimmel
Inject 10M IPv4 UDP packets with random source and destination ports and
make sure the flows are distributed between both multi-path links
according to the configured weights.
The default weights can be overridden using:
$ lnst-ctl -a weight0=100 -a weight1=2 run l3-011-nexthop4-weights.xml
Signed-off-by: Ido Schimmel <idosch(a)mellanox.com>
Reviewed-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/l3-011-nexthop4-weights.py | 135 ++++++++++++++++++++++++++
recipes/switchdev/l3-011-nexthop4-weights.xml | 60 ++++++++++++
2 files changed, 195 insertions(+)
create mode 100644 recipes/switchdev/l3-011-nexthop4-weights.py
create mode 100644 recipes/switchdev/l3-011-nexthop4-weights.xml
diff --git a/recipes/switchdev/l3-011-nexthop4-weights.py b/recipes/switchdev/l3-011-nexthop4-weights.py
new file mode 100644
index 0000000..4cb90e0
--- /dev/null
+++ b/recipes/switchdev/l3-011-nexthop4-weights.py
@@ -0,0 +1,135 @@
+"""
+Copyright 2017 Mellanox Technologies. All rights reserved.
+Licensed under the GNU General Public License, version 2 as
+published by the Free Software Foundation; see COPYING for details.
+"""
+
+__author__ = """
+idosch(a)mellanox.com (Ido Schimmel)
+"""
+
+from lnst.Controller.Task import ctl
+from TestLib import TestLib
+from time import sleep
+import logging
+
+def ping(ctl, if1, if2):
+ m1 = if1.get_host()
+ m1.sync_resources(modules=["IcmpPing"])
+
+ ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr": if2.get_ip(1),
+ "count": 100,
+ "interval": 0.2,
+ "iface" : if1.get_devname(),
+ "limit_rate": 90})
+ m1.run(ping_mod)
+
+def pktgen(ctl, if1, if2, neigh_mac):
+ m1 = if1.get_host()
+ m1.sync_resources(modules=["PktgenTx"])
+
+ pktgen_option = ["pkt_size {}".format(if1.get_mtu()),
+ "clone_skb 0",
+ "count {}".format(10 * 10 ** 6),
+ "dst_mac {}".format(neigh_mac),
+ "dst {}".format(if2.get_ip(1)),
+ "udp_src_min 1024", "udp_src_max 4096",
+ "udp_dst_min 1024", "udp_dst_max 4096",
+ "flag UDPSRC_RND", "flag UDPDST_RND"]
+ pktgen_mod = ctl.get_module("PktgenTx",
+ options={
+ "netdev_name": if1.get_devname(),
+ "pktgen_option": pktgen_option})
+ m1.run(pktgen_mod, timeout=600)
+
+def check_res(tl, m, if2_weight, if3_weight, if2_pkts, if3_pkts):
+ weight_ratio = float(if2_weight) / float(if3_weight)
+ msg = "Weights ratio: if2 ({}) / if3 ({}) = {:f}"
+ logging.info(msg.format(if2_weight, if3_weight, weight_ratio))
+
+ pkts_ratio = float(if2_pkts) / float(if3_pkts)
+ msg = "Tx-ed packets ratio: if2 ({}) / if3 ({}) = {:f}"
+ logging.info(msg.format(if2_pkts, if3_pkts, pkts_ratio))
+
+ if (abs(weight_ratio - pkts_ratio) / weight_ratio) <= 0.1:
+ err_msg=""
+ else:
+ err_msg = "Too large discrepancy (> 10%) in ratio"
+ tl.custom(m, "Ratio comparison", err_msg)
+
+def do_task(ctl, hosts, ifaces, aliases):
+ m1, m2, sw = hosts
+ m1_if1, m2_if1, m2_if2, sw_if1, sw_if2, sw_if3 = ifaces
+
+ subnet0 = aliases["subnet0"] + ".0/24"
+ subnet1 = aliases["subnet1"] + ".0/24"
+ subnet2 = aliases["subnet2"] + ".0/24"
+ subnet3 = aliases["subnet3"] + ".0/24"
+ weight0 = int(aliases["weight0"])
+ weight1 = int(aliases["weight1"])
+
+ tl = TestLib(ctl, aliases)
+
+ # +----------------------------------+
+ # | |
+ # | |
+ # | sw_if1 sw_if2 sw_if3 |
+ # +---+-----------------+-------+----+
+ # | | |
+ # | | |
+ # | | |
+ # | | |
+ # + + +
+ # m1_if1 m2_if1 m2_if2
+
+ cmd = "ip -4 route add {subnet} nexthop via {gw_ip} dev {nh_dev}"
+ m1.run(cmd.format(subnet=subnet3, gw_ip=sw_if1.get_ip(0),
+ nh_dev=m1_if1.get_devname()))
+
+ cmd = ("ip -4 route add {subnet} nexthop via {gw_ip0} dev {nh_dev0} "
+ "nexthop via {gw_ip1} dev {nh_dev1}")
+ m2.run(cmd.format(subnet=subnet0, gw_ip0=sw_if2.get_ip(0),
+ nh_dev0=m2_if1.get_devname(), gw_ip1=sw_if3.get_ip(0),
+ nh_dev1=m2_if2.get_devname()))
+
+ cmd = ("ip -4 route add {subnet} nexthop via {gw_ip0} dev {nh_dev0} "
+ "weight {w0} nexthop via {gw_ip1} dev {nh_dev1} weight {w1}")
+ sw.run(cmd.format(subnet=subnet3, gw_ip0=m2_if1.get_ip(0),
+ nh_dev0=sw_if2.get_devname(), w0=weight0,
+ gw_ip1=m2_if2.get_ip(0), nh_dev1=sw_if3.get_devname(),
+ w1=weight1))
+
+ # Make sure the kernel uses L4 fields for multipath hash, since we
+ # are going to use random UDP source and destination ports.
+ sw.run("sysctl -w net.ipv4.fib_multipath_hash_policy=1")
+
+ sleep(30)
+
+ # Basic sanity check to make sure test is not failing due to
+ # setup issues.
+ ping(ctl, m1_if1, m2_if2)
+
+ if2_pre = sw_if2.link_stats()["tx_packets"]
+ if3_pre = sw_if3.link_stats()["tx_packets"]
+
+ # Send different flows from m1 to m2, so that traffic is hashed
+ # according to provided weights.
+ pktgen(ctl, m1_if1, m2_if2, sw_if1.get_hwaddr())
+
+ if2_post = sw_if2.link_stats()["tx_packets"]
+ if3_post = sw_if3.link_stats()["tx_packets"]
+
+ check_res(tl, m1, weight0, weight1, if2_post - if2_pre, if3_post - if3_pre)
+
+do_task(ctl, [ctl.get_host("machine1"),
+ ctl.get_host("machine2"),
+ ctl.get_host("switch")],
+ [ctl.get_host("machine1").get_interface("if1"),
+ ctl.get_host("machine2").get_interface("if1"),
+ ctl.get_host("machine2").get_interface("if2"),
+ ctl.get_host("switch").get_interface("if1"),
+ ctl.get_host("switch").get_interface("if2"),
+ ctl.get_host("switch").get_interface("if3")],
+ ctl.get_aliases())
diff --git a/recipes/switchdev/l3-011-nexthop4-weights.xml b/recipes/switchdev/l3-011-nexthop4-weights.xml
new file mode 100644
index 0000000..b48db7e
--- /dev/null
+++ b/recipes/switchdev/l3-011-nexthop4-weights.xml
@@ -0,0 +1,60 @@
+<lnstrecipe xmlns:xi="http://www.w3.org/2003/XInclude">
+ <xi:include href="default_aliases.xml" />
+ <define>
+ <alias name="ipv" value="ipv4" />
+ <alias name="subnet0" value="192.168.100" />
+ <alias name="subnet1" value="192.168.101" />
+ <alias name="subnet2" value="192.168.102" />
+ <alias name="subnet3" value="192.168.103" />
+ <alias name="weight0" value="45" />
+ <alias name="weight1" value="11" />
+ </define>
+ <network>
+ <host id="machine1">
+ <params/>
+ <interfaces>
+ <eth id="if1" label="A">
+ <addresses>
+ <address>{$subnet0}.2/24</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="machine2">
+ <params/>
+ <interfaces>
+ <eth id="if1" label="B">
+ <addresses>
+ <address>{$subnet1}.2/24</address>
+ </addresses>
+ </eth>
+ <eth id="if2" label="C">
+ <addresses>
+ <address>{$subnet2}.2/24</address>
+ <address>{$subnet3}.1/24</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="switch">
+ <interfaces>
+ <eth id="if1" label="A">
+ <addresses>
+ <address>{$subnet0}.1/24</address>
+ </addresses>
+ </eth>
+ <eth id="if2" label="B">
+ <addresses>
+ <address>{$subnet1}.1/24</address>
+ </addresses>
+ </eth>
+ <eth id="if3" label="C">
+ <addresses>
+ <address>{$subnet2}.1/24</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+ <task python="l3-011-nexthop4-weights.py" />
+</lnstrecipe>
--
2.13.6
6 years, 1 month
[PATCH lnst 01/11] IcmpPing, Icmp6Ping: Support ttl
by Petr Machata
Allow passing of -t to ping resp. ping6.
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/ipip_common.py | 19 +++++++++++--------
test_modules/Icmp6Ping.py | 4 ++++
test_modules/IcmpPing.py | 3 +++
3 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/recipes/switchdev/ipip_common.py b/recipes/switchdev/ipip_common.py
index 8d114ad..c1b07c1 100644
--- a/recipes/switchdev/ipip_common.py
+++ b/recipes/switchdev/ipip_common.py
@@ -15,18 +15,21 @@ from TestLib import route
def ping_test(tl, m1, sw, addr, m1_if1, gre,
require_fastpath=True, fail_expected=False, count=100,
- ipv6=False):
+ ipv6=False, ttl=None):
limit = int(0.9 * count)
if gre is not None:
before_stats = gre.link_stats()["rx_packets"]
+ options = {
+ "addr": addr,
+ "count": count,
+ "interval": 0.2,
+ "iface" : m1_if1.get_devname(),
+ "limit_rate": limit,
+ }
+ if ttl is not None:
+ options["ttl"] = ttl
ping_mod = ctl.get_module("IcmpPing" if not ipv6 else "Icmp6Ping",
- options={
- "addr": addr,
- "count": count,
- "interval": 0.2,
- "iface" : m1_if1.get_devname(),
- "limit_rate": limit,
- })
+ options)
m1.run(ping_mod, fail_expected=fail_expected)
if not fail_expected and gre is not None:
diff --git a/test_modules/Icmp6Ping.py b/test_modules/Icmp6Ping.py
index 405c756..7c3a88c 100644
--- a/test_modules/Icmp6Ping.py
+++ b/test_modules/Icmp6Ping.py
@@ -32,6 +32,10 @@ class Icmp6Ping(TestGeneric):
if size:
cmd += " -s %s" % size
+ ttl = self.get_opt("ttl")
+ if ttl:
+ cmd += " -t %s" % ttl
+
return cmd
def run(self):
diff --git a/test_modules/IcmpPing.py b/test_modules/IcmpPing.py
index 5b9d888..fb4d094 100644
--- a/test_modules/IcmpPing.py
+++ b/test_modules/IcmpPing.py
@@ -31,6 +31,9 @@ class IcmpPing(TestGeneric):
size = self.get_opt("size")
if size:
cmd += " -s %s" % size
+ ttl = self.get_opt("ttl")
+ if ttl:
+ cmd += " -t %s" % ttl
return cmd
def run(self):
--
2.4.11
6 years, 1 month
[PATCH lnst 11/11] recipes: switchdev: ipip: Bound device tests
by Petr Machata
- Test reseating a tunnel to a new bound device
- Test bound device up/down events. This test is split among two test
files: ipip-010-gre-hier-changes, where the end-to-end is confirmed to
be down when bound device is down. And ipip-006-gre-decap, where the
decap-only flow is confirmed to still work despite bound device being
down.
- Test that when a tunnel is created over a down bound device, that it's
created in the same state as if it was created over an up bound device
that is later downed.
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/ipip-006-gre-decap.py | 52 ++++++++++++----
recipes/switchdev/ipip-010-gre-hier-change.py | 87 +++++++++++++++++++++++++++
2 files changed, 127 insertions(+), 12 deletions(-)
diff --git a/recipes/switchdev/ipip-006-gre-decap.py b/recipes/switchdev/ipip-006-gre-decap.py
index 4beec67..217709a 100644
--- a/recipes/switchdev/ipip-006-gre-decap.py
+++ b/recipes/switchdev/ipip-006-gre-decap.py
@@ -28,7 +28,8 @@ def do_task(ctl, hosts, ifaces, aliases):
vrf_None = None
tl = TestLib(ctl, aliases)
- logging.info("=== Decap-only flow in default VRF")
+ logging.info("=== Decap-only flow tests")
+ logging.info("--- default VRF")
with encap_route(m2, vrf_None, 1, "gre1",
ip=ipv4, src=ipv4(test_ip(2, 33, []))), \
encap_route(m2, vrf_None, 1, "gre1", ip=ipv6), \
@@ -44,24 +45,51 @@ def do_task(ctl, hosts, ifaces, aliases):
ping_test(tl, m2, sw, ipv6(test_ip(1, 33, [])), m2_if1_10, g, ipv6=True)
ping_test(tl, m2, sw, ipv4(test_ip(1, 33, [])), m2_if1_10, g)
- logging.info("=== Decap-only flow in hierarchical configuration")
- with encap_route(m2, vrf_None, 1, "gre1",
- ip=ipv4, src=ipv4(test_ip(2, 33, []))), \
- encap_route(m2, vrf_None, 1, "gre1", ip=ipv6), \
- vrf(sw) as vrf_u, \
+ with vrf(sw) as vrf_u, \
vrf(sw) as vrf_o, \
dummy(sw, vrf_u, ip=["1.2.3.4/32"]) as d, \
- gre(sw, d, vrf_o,
- tos="inherit",
- local_ip="1.2.3.4",
- remote_ip="1.2.3.5") as g:
+ encap_route(m2, vrf_None, 1, "gre1",
+ ip=ipv4, src=ipv4(test_ip(2, 33, []))), \
+ encap_route(m2, vrf_None, 1, "gre1", ip=ipv6):
connect_host_ifaces(sw, sw_if1_10, vrf_o, sw_if2_10, vrf_u)
refresh_addrs(sw, sw_if1_10)
add_forward_route(sw, vrf_u, "1.2.3.5")
- ping_test(tl, m2, sw, ipv6(test_ip(1, 33, [])), m2_if1_10, g, ipv6=True)
- ping_test(tl, m2, sw, ipv4(test_ip(1, 33, [])), m2_if1_10, g)
+ with gre(sw, d, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.5") as g:
+
+ logging.info("--- hierarchical configuration")
+ sleep(15)
+
+ ping_test(tl, m2, sw, ipv6(test_ip(1, 33, [])), m2_if1_10, g, ipv6=True)
+ ping_test(tl, m2, sw, ipv4(test_ip(1, 33, [])), m2_if1_10, g)
+
+ # Make sure that downing an underlay device doesn't make the decap flow
+ # stop working. There is a complementary test in ipip-010 to test that
+ # encap stops working.
+ logging.info("--- set an underlay down")
+ d.set_link_down()
+ sleep(5)
+
+ ping_test(tl, m2, sw, ipv6(test_ip(1, 33, [])), m2_if1_10, g, ipv6=True)
+ ping_test(tl, m2, sw, ipv4(test_ip(1, 33, [])), m2_if1_10, g)
+
+ # Make sure that when a newly-created tunnel has a down underlay, decap
+ # still works. There's a complementary test in ipip-010 to test that
+ # encap doesn't work in that scenario.
+ logging.info("--- create tunnel with a down underlay")
+ d.set_link_down() # Should be down already, but make this robust against
+ # later coding changes.
+ with gre(sw, d, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.5") as g:
+
+ ping_test(tl, m2, sw, ipv6(test_ip(1, 33, [])), m2_if1_10, g, ipv6=True)
+ ping_test(tl, m2, sw, ipv4(test_ip(1, 33, [])), m2_if1_10, g)
do_task(ctl, [ctl.get_host("machine1"),
ctl.get_host("machine2"),
diff --git a/recipes/switchdev/ipip-010-gre-hier-change.py b/recipes/switchdev/ipip-010-gre-hier-change.py
index 1552daf..d709360 100644
--- a/recipes/switchdev/ipip-010-gre-hier-change.py
+++ b/recipes/switchdev/ipip-010-gre-hier-change.py
@@ -32,12 +32,99 @@ def do_task(ctl, hosts, ifaces, aliases):
logging.info("=== Hierarchical configuration, 'ip t change'")
with vrf(sw) as vrf_u, \
vrf(sw) as vrf_o, \
+ vrf(sw) as vrf3, \
dummy(sw, vrf_u, ip=["1.2.3.4/32"]) as d:
connect_host_ifaces(sw, sw_if1, vrf_o, sw_if2, vrf_u)
sw_if1.reset()
sw_if2.reset()
add_forward_route(sw, vrf_u, "1.2.3.5")
+ logging.info("--- create tunnel with a down underlay")
+ d.set_link_down()
+ with encap_route(m2, vrf_None, 1, "gre1", ip=ipv4), \
+ encap_route(m2, vrf_None, 1, "gre1", ip=ipv6), \
+ gre(sw, d, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.5") as g, \
+ encap_route(sw, vrf_o, 2, g, ip=ipv4), \
+ encap_route(sw, vrf_o, 2, g, ip=ipv6):
+
+ sleep(15)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ count=100, fail_expected=True, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ count=100, fail_expected=True)
+
+ d.set_link_up()
+ with encap_route(m2, vrf_None, 1, "gre1", ip=ipv4), \
+ encap_route(m2, vrf_None, 1, "gre1", ip=ipv6), \
+ dummy(sw, vrf3, ip=["1.2.3.4/32"]) as d2, \
+ gre(sw, d2, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.5") as g, \
+ encap_route(sw, vrf_o, 2, g, ip=ipv4), \
+ encap_route(sw, vrf_o, 2, g, ip=ipv6):
+
+ sleep(15)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True)
+
+ logging.info("--- change of bound device")
+ sw.run("ip t change name %s dev %s"
+ % (g.get_devname(), d.get_devname()))
+
+ sleep(5)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g)
+
+ logging.info("--- bound device up/down")
+ # Now change back to `d2', set `d' down and change to it again.
+ # Traffic shouldn't flow.
+ # There's a complementary test in ipip-006 to make sure that
+ # decap-only flow still works even if bound device is down.
+ sw.run("ip t change name %s dev %s"
+ % (g.get_devname(), d2.get_devname()))
+ d.set_link_down()
+ sleep(5)
+
+ sw.run("ip t change name %s dev %s"
+ % (g.get_devname(), d.get_devname()))
+
+ sleep(5)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True)
+
+ # Set `d' up while it's the bound device. Traffic should flow again.
+ d.set_link_up()
+ sleep(5)
+
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g)
+
+ # Do some more of flip-flopping to make sure it's stable.
+ d.set_link_down()
+ sleep(5)
+
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True)
+
+ d.set_link_up()
+ sleep(5)
+
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g)
+
logging.info("--- remote change")
with encap_route(m2, vrf_None, 1, "gre1", ip=ipv4), \
encap_route(m2, vrf_None, 1, "gre1", ip=ipv6), \
--
2.4.11
6 years, 1 month
[PATCH lnst 10/11] recipes: switchdev: ipip: Test change of tunnel
remote address
by Petr Machata
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/ipip-010-gre-hier-change.py | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/recipes/switchdev/ipip-010-gre-hier-change.py b/recipes/switchdev/ipip-010-gre-hier-change.py
index 751fc61..1552daf 100644
--- a/recipes/switchdev/ipip-010-gre-hier-change.py
+++ b/recipes/switchdev/ipip-010-gre-hier-change.py
@@ -38,6 +38,29 @@ def do_task(ctl, hosts, ifaces, aliases):
sw_if2.reset()
add_forward_route(sw, vrf_u, "1.2.3.5")
+ logging.info("--- remote change")
+ with encap_route(m2, vrf_None, 1, "gre1", ip=ipv4), \
+ encap_route(m2, vrf_None, 1, "gre1", ip=ipv6), \
+ gre(sw, d, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.7") as g, \
+ encap_route(sw, vrf_o, 2, g, ip=ipv4), \
+ encap_route(sw, vrf_o, 2, g, ip=ipv6):
+
+ sleep(15)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True)
+
+ sw.run("ip t change name %s remote 1.2.3.5" % g.get_devname())
+
+ sleep(5)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g)
+
logging.info("--- local change")
with encap_route(m2, vrf_None, 1, "gre1", ip=ipv4), \
encap_route(m2, vrf_None, 1, "gre1", ip=ipv6), \
--
2.4.11
6 years, 1 month
[PATCH lnst 09/11] recipes: switchdev: ipip: Test change of tunnel
local address
by Petr Machata
Add two test, one to test a change in local address as such, and another
that test resolution of a conflict that results from such change.
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/ipip-010-gre-hier-change.py | 58 +++++++++++++++++++++++++++
1 file changed, 58 insertions(+)
diff --git a/recipes/switchdev/ipip-010-gre-hier-change.py b/recipes/switchdev/ipip-010-gre-hier-change.py
index 63bb9a4..751fc61 100644
--- a/recipes/switchdev/ipip-010-gre-hier-change.py
+++ b/recipes/switchdev/ipip-010-gre-hier-change.py
@@ -38,6 +38,64 @@ def do_task(ctl, hosts, ifaces, aliases):
sw_if2.reset()
add_forward_route(sw, vrf_u, "1.2.3.5")
+ logging.info("--- local change")
+ with encap_route(m2, vrf_None, 1, "gre1", ip=ipv4), \
+ encap_route(m2, vrf_None, 1, "gre1", ip=ipv6), \
+ gre(sw, d, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.6",
+ remote_ip="1.2.3.5") as g, \
+ encap_route(sw, vrf_o, 2, g, ip=ipv4), \
+ encap_route(sw, vrf_o, 2, g, ip=ipv6):
+
+ sleep(15)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True)
+
+ sw.run("ip t change name %s local 1.2.3.4" % g.get_devname())
+
+ sleep(5)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g)
+
+ # IPv4 should go through g1, IPv6 through g2, but g2 starts out
+ # misconfigured. Thus both g1 and g2 are offloaded. When the
+ # configuration of g2 is fixed, both tunnels are forced to slow path,
+ # but now they both work.
+ logging.info("--- local change conflict")
+ with encap_route(m2, vrf_None, 1, "gre1", ip=ipv4), \
+ dummy(sw, vrf_u, ip=["1.2.3.6/32"]) as d4, \
+ gre(sw, d4, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.6",
+ remote_ip="1.2.3.5") as g4, \
+ encap_route(sw, vrf_o, 2, g4, ip=ipv4), \
+ \
+ encap_route(m2, vrf_None, 1, "gre2", ip=ipv6), \
+ gre(sw, d, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.5",
+ ikey=2222, okey=1111) as g6, \
+ encap_route(sw, vrf_o, 2, g6, ip=ipv6):
+
+ sleep(15)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g6,
+ count=25, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g4,
+ count=25, fail_expected=True)
+
+ sw.run("ip t change name %s local 1.2.3.4" % g4.get_devname())
+
+ sleep(5)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g6,
+ ipv6=True, require_fastpath=False)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g4,
+ require_fastpath=False)
+
logging.info("--- ikey change")
with encap_route(m2, vrf_None, 1, "gre2", ip=ipv4), \
encap_route(m2, vrf_None, 1, "gre2", ip=ipv6), \
--
2.4.11
6 years, 1 month
[PATCH lnst 08/11] recipes: switchdev: ipip: Test TTL of a IP-in-IP
tunnel
by Petr Machata
This test file keeps tests related to TTL setting of IP-in-IP tunnels.
Currently it tests whether a TTL of "inherit" on a GRE tunnel actually
copies TTL of a tunnel packet from overlay, and whether changing the TTL
of a tunnel then correctly adjusts the offload.
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/ipip-011-gre-hier-change-ttl.py | 124 +++++++++++++++++++++
recipes/switchdev/ipip-011-gre-hier-change-ttl.xml | 79 +++++++++++++
2 files changed, 203 insertions(+)
create mode 100644 recipes/switchdev/ipip-011-gre-hier-change-ttl.py
create mode 100644 recipes/switchdev/ipip-011-gre-hier-change-ttl.xml
diff --git a/recipes/switchdev/ipip-011-gre-hier-change-ttl.py b/recipes/switchdev/ipip-011-gre-hier-change-ttl.py
new file mode 100644
index 0000000..e114c98
--- /dev/null
+++ b/recipes/switchdev/ipip-011-gre-hier-change-ttl.py
@@ -0,0 +1,124 @@
+"""
+Copyright 2017 Mellanox Technologies. All rights reserved.
+Licensed under the GNU General Public License, version 2 as
+published by the Free Software Foundation; see COPYING for details.
+"""
+
+__author__ = """
+petrm(a)mellanox.com (Petr Machata)
+"""
+
+from lnst.Controller.Task import ctl
+from TestLib import TestLib, vrf, dummy, gre
+from ipip_common import ping_test, encap_route, \
+ add_forward_route, connect_host_ifaces, \
+ test_ip, ipv4, ipv6
+from time import sleep
+import logging
+
+def do_task(ctl, hosts, ifaces, aliases):
+ m1, m2, sw = hosts
+ m1_if1, m2_if1, m2_mg, m2_v3, sw_if1, sw_if2 = ifaces
+
+ m2.config("/proc/sys/net/ipv4/ip_forward", "1", netns="ns1")
+ m2.config("/proc/sys/net/ipv4/ip_forward", "1", netns="ns2")
+
+ m1_if1.add_nhs_route(ipv4(test_ip(2, 0)), [ipv4(test_ip(1, 1, []))])
+ m1_if1.add_nhs_route(ipv6(test_ip(2, 0)), [ipv6(test_ip(1, 1, []))])
+ m2_if1.add_nhs_route("1.2.3.4/32", [ipv4(test_ip(99, 1, []))])
+ m2_if1.add_nhs_route("1.2.3.5/32", [ipv4(test_ip(88, 2, []))])
+ m2_v3.add_nhs_route("1.2.3.4/32", [ipv4(test_ip(88, 1, []))])
+ m2_v3.add_nhs_route("1.2.3.5/32", [ipv4(test_ip(77, 2, []))])
+ m2_mg.add_nhs_route("1.2.3.4/32", [ipv4(test_ip(77, 1, []))])
+
+ vrf_None = None
+ tl = TestLib(ctl, aliases)
+ sw_if1.reset(ip=test_ip(1, 1))
+ sw_if2.reset(ip=test_ip(99,1))
+
+ logging.info("=== TTL tests")
+ with vrf(sw) as vrf_u, \
+ vrf(sw) as vrf_o, \
+ dummy(sw, vrf_u, ip=["1.2.3.4/32"]) as d, \
+ encap_route(m2, vrf_None, 1, "mg", ip=ipv4), \
+ encap_route(m2, vrf_None, 1, "mg", ip=ipv6):
+ connect_host_ifaces(sw, sw_if1, vrf_o, sw_if2, vrf_u)
+ sw_if1.reset()
+ sw_if2.reset()
+ add_forward_route(sw, vrf_u, "1.2.3.5", via=ipv4(test_ip(99, 2, [])))
+
+ # - Test that tunnel configured with TTL inherit actually copies the TTL
+ # from the overlay packet. The topology is as follows:
+ #
+ # +-- M1 ----------------+ +-- M2 ----------------+
+ # | 1.33/24 +--|----. | |
+ # | | | | 2.33/32 md + |
+ # +----------------------+ | | 1.2.3.5/31 mg + |
+ # +-- SW -------------------------+ | + 77.2 |
+ # | | | | | |
+ # | +-- ol vrf -----------------+ | | +-- ns2 -----------+ |
+ # | | 1.2.3.4/31 g + | | | | | | 88.2 + | |
+ # | | | 1.1/24 + | | | | + 77.1 | | |
+ # | +---------------------------+ | | +------------------+ |
+ # | | | | | |
+ # | +-- ul vrf -----------------+ | | +-- ns1 -----------+ |
+ # | | | +--|-|----|-|--+ | | |
+ # | | 1.2.3.4/32 d + 99.1/24 | | | | 99.2/24 88.1 + | |
+ # | +---------------------------+ | | +------------------+ |
+ # +-------------------------------+ +----------------------+
+ #
+ # The point of the test is that there are several next hops between
+ # 1.2.3.4 and 1.2.3.5. If the tunnel is set to "ttl inherit", ping
+ # with TTL of 2 (ping -t 2) never reaches the other endpoint, but ping
+ # with TTL of 3 does.
+ with dummy(sw, vrf_u) as d, \
+ gre(sw, d, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.5") as g, \
+ encap_route(sw, vrf_o, 2, g, ip=ipv4), \
+ encap_route(sw, vrf_o, 2, g, ip=ipv6):
+
+ logging.info("--- TTL inherit")
+
+ sleep(15)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ count=25, ttl=2, fail_expected=True)
+
+ sleep(5)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g, ttl=4)
+
+ # - On the same topology, after offloading a tunnel with "ttl
+ # inherit", set the tunnel to e.g. "ttl 64". Now the other
+ # endpoint should become reachable again even with ping -t 2. Thus
+ # we know that the tunnel was moved to slow path correcly (or the
+ # TTL was reflected in the hardware, we don't care).
+ #
+ # - A similar test could be done for TOS:
+ # - On m2, install tc to drop packets with TOS of 0x20.
+ # - Configure offloaded tunnel with TOS inherit. ping -Q 0x40,
+ # that should work.
+ # - ping -Q 0x20, that shouldn't work.
+ # - Change TOS on tunnel to 0x40, ping -Q 0x20, which should now
+ # work.
+ #
+ # However, tc doesn't match on ip_tos until kernel 4.14, which is too
+ # recent as of this writing.
+
+ logging.info("--- ip t change ttl")
+ sw.run("ip t change name %s ttl 64" % g.get_devname())
+
+ sleep(5)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ ttl=2, require_fastpath=False)
+
+do_task(ctl, [ctl.get_host("machine1"),
+ ctl.get_host("machine2"),
+ ctl.get_host("switch")],
+ [ctl.get_host("machine1").get_interface("if1"),
+ ctl.get_host("machine2").get_interface("if1"),
+ ctl.get_host("machine2").get_interface("mg"),
+ ctl.get_host("machine2").get_interface("v3"),
+ ctl.get_host("switch").get_interface("if1"),
+ ctl.get_host("switch").get_interface("if2")],
+ ctl.get_aliases())
diff --git a/recipes/switchdev/ipip-011-gre-hier-change-ttl.xml b/recipes/switchdev/ipip-011-gre-hier-change-ttl.xml
new file mode 100644
index 0000000..3d7f653
--- /dev/null
+++ b/recipes/switchdev/ipip-011-gre-hier-change-ttl.xml
@@ -0,0 +1,79 @@
+<lnstrecipe xmlns:xi="http://www.w3.org/2003/XInclude">
+ <xi:include href="default_aliases.xml" />
+ <define>
+ <alias name="onet1" value="192.168.1"/>
+ <alias name="onet2" value="192.168.2"/>
+ <alias name="o6net1" value="2002:1"/>
+ <alias name="o6net2" value="2002:2"/>
+ <alias name="unet1" value="192.168.99"/>
+ <alias name="unet2" value="192.168.88"/>
+ <alias name="unet3" value="192.168.77"/>
+ </define>
+ <network>
+ <host id="machine1">
+ <params/>
+ <interfaces>
+ <eth id="if1" label="A">
+ <addresses>
+ <address value="{$onet1}.33/24" />
+ <address value="{$o6net1}::33/64" />
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="machine2">
+ <params/>
+ <interfaces>
+ <dummy id="md">
+ <addresses>
+ <address value="1.2.3.5/32" />
+ <address value="{$onet2}.33/32" />
+ <address value="{$o6net2}::33/128" />
+ </addresses>
+ </dummy>
+ <gre id="mg">
+ <options>
+ <option name="local_ip" value="1.2.3.5"/>
+ <option name="remote_ip" value="1.2.3.4"/>
+ </options>
+ </gre>
+ <veth_pair>
+ <veth id="v4">
+ <addresses>
+ <address value="{$unet3}.2/24" />
+ </addresses>
+ </veth>
+ <veth id="v3" netns="ns2">
+ <addresses>
+ <address value="{$unet3}.1/24" />
+ </addresses>
+ </veth>
+ </veth_pair>
+ <veth_pair>
+ <veth id="v2" netns="ns2">
+ <addresses>
+ <address value="{$unet2}.2/24" />
+ </addresses>
+ </veth>
+ <veth id="v1" netns="ns1">
+ <addresses>
+ <address value="{$unet2}.1/24" />
+ </addresses>
+ </veth>
+ </veth_pair>
+ <eth id="if1" label="B" netns="ns1">
+ <addresses>
+ <address value="{$unet1}.2/24" />
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="switch">
+ <interfaces>
+ <eth id="if1" label="A" />
+ <eth id="if2" label="B" />
+ </interfaces>
+ </host>
+ </network>
+ <task python="ipip-011-gre-hier-change-ttl.py" />
+</lnstrecipe>
--
2.4.11
6 years, 1 month
[PATCH lnst 07/11] recipes: switchdev: ipip: Test change of GRE ikey,
okey
by Petr Machata
Signed-off-by: Petr Machata <petrm(a)mellanox.com>
---
recipes/switchdev/ipip-010-gre-hier-change.py | 96 ++++++++++++++++++++++++++
recipes/switchdev/ipip-010-gre-hier-change.xml | 12 ++++
2 files changed, 108 insertions(+)
create mode 100644 recipes/switchdev/ipip-010-gre-hier-change.py
create mode 100644 recipes/switchdev/ipip-010-gre-hier-change.xml
diff --git a/recipes/switchdev/ipip-010-gre-hier-change.py b/recipes/switchdev/ipip-010-gre-hier-change.py
new file mode 100644
index 0000000..63bb9a4
--- /dev/null
+++ b/recipes/switchdev/ipip-010-gre-hier-change.py
@@ -0,0 +1,96 @@
+"""
+Copyright 2017 Mellanox Technologies. All rights reserved.
+Licensed under the GNU General Public License, version 2 as
+published by the Free Software Foundation; see COPYING for details.
+"""
+
+__author__ = """
+petrm(a)mellanox.com (Petr Machata)
+"""
+
+from lnst.Controller.Task import ctl
+from TestLib import TestLib, vrf, dummy, gre
+from ipip_common import ping_test, encap_route, \
+ add_forward_route, connect_host_ifaces, \
+ test_ip, ipv4, ipv6
+from time import sleep
+import logging
+
+def do_task(ctl, hosts, ifaces, aliases):
+ m1, m2, sw = hosts
+ m1_if1, m2_if1, sw_if1, sw_if2 = ifaces
+
+ m1_if1.add_nhs_route(ipv4(test_ip(2, 0)), [ipv4(test_ip(1, 1, []))])
+ m1_if1.add_nhs_route(ipv6(test_ip(2, 0)), [ipv6(test_ip(1, 1, []))])
+ m2_if1.add_nhs_route("1.2.3.4/32", [ipv4(test_ip(99, 1, []))])
+
+ vrf_None = None
+ tl = TestLib(ctl, aliases)
+ sw_if1.reset(ip=test_ip(1, 1))
+ sw_if2.reset(ip=test_ip(99,1))
+
+ logging.info("=== Hierarchical configuration, 'ip t change'")
+ with vrf(sw) as vrf_u, \
+ vrf(sw) as vrf_o, \
+ dummy(sw, vrf_u, ip=["1.2.3.4/32"]) as d:
+ connect_host_ifaces(sw, sw_if1, vrf_o, sw_if2, vrf_u)
+ sw_if1.reset()
+ sw_if2.reset()
+ add_forward_route(sw, vrf_u, "1.2.3.5")
+
+ logging.info("--- ikey change")
+ with encap_route(m2, vrf_None, 1, "gre2", ip=ipv4), \
+ encap_route(m2, vrf_None, 1, "gre2", ip=ipv6), \
+ gre(sw, d, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.5",
+ ikey=2, okey=1111) as g, \
+ encap_route(sw, vrf_o, 2, g, ip=ipv4), \
+ encap_route(sw, vrf_o, 2, g, ip=ipv6):
+
+ sleep(15)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True)
+
+ sw.run("ip t change name %s ikey 2222" % g.get_devname())
+
+ sleep(5)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g)
+
+ logging.info("--- okey change")
+ with encap_route(m2, vrf_None, 1, "gre2", ip=ipv4), \
+ encap_route(m2, vrf_None, 1, "gre2", ip=ipv6), \
+ gre(sw, d, vrf_o,
+ tos="inherit",
+ local_ip="1.2.3.4",
+ remote_ip="1.2.3.5",
+ ikey=2222, okey=1) as g, \
+ encap_route(sw, vrf_o, 2, g, ip=ipv4), \
+ encap_route(sw, vrf_o, 2, g, ip=ipv6):
+
+ sleep(15)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True, ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g,
+ count=25, fail_expected=True)
+
+ sw.run("ip t change name %s okey 1111" % g.get_devname())
+
+ sleep(5)
+ ping_test(tl, m1, sw, ipv6(test_ip(2, 33, [])), m1_if1, g,
+ ipv6=True)
+ ping_test(tl, m1, sw, ipv4(test_ip(2, 33, [])), m1_if1, g)
+
+do_task(ctl, [ctl.get_host("machine1"),
+ ctl.get_host("machine2"),
+ ctl.get_host("switch")],
+ [ctl.get_host("machine1").get_interface("if1"),
+ ctl.get_host("machine2").get_interface("if1"),
+ ctl.get_host("switch").get_interface("if1"),
+ ctl.get_host("switch").get_interface("if2")],
+ ctl.get_aliases())
diff --git a/recipes/switchdev/ipip-010-gre-hier-change.xml b/recipes/switchdev/ipip-010-gre-hier-change.xml
new file mode 100644
index 0000000..c0af6dc
--- /dev/null
+++ b/recipes/switchdev/ipip-010-gre-hier-change.xml
@@ -0,0 +1,12 @@
+<lnstrecipe xmlns:xi="http://www.w3.org/2003/XInclude">
+ <xi:include href="default_aliases.xml" />
+ <define>
+ <alias name="onet1" value="192.168.1"/>
+ <alias name="onet2" value="192.168.2"/>
+ <alias name="o6net1" value="2002:1"/>
+ <alias name="o6net2" value="2002:2"/>
+ <alias name="unet" value="192.168.99"/>
+ </define>
+ <xi:include href="ipip_common_topology.xml" />
+ <task python="ipip-010-gre-hier-change.py" />
+</lnstrecipe>
--
2.4.11
6 years, 1 month