When we changed how the Netperf test module reports results we silently broke SCTP_STREAM mode. The reason is that SCTP_STREAM does not have omni output selection available (checked with both 2.6.0 and 2.7.0). SCTP_STREAM data must be parsed separately.
Fixes:
Signed-off-by: Jan Tluka jtluka@redhat.com --- test_modules/Netperf.py | 67 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 19 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index 2033c57..89664fb 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -18,6 +18,8 @@ class Netperf(TestGeneric): supported_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR", "SCTP_STREAM", "SCTP_STREAM_MANY", "SCTP_RR"]
+ omni_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR"] + def __init__(self, command): super(Netperf, self).__init__(command)
@@ -52,13 +54,18 @@ class Netperf(TestGeneric): else: self._threshold_interval = None
+ def _is_omni(self): + return self._testname in self.omni_tests + def _compose_cmd(self): """ composes commands for netperf and netserver based on xml recipe """ if self._role == "client": - # -P 0 disables banner header of output - cmd = "netperf -H %s -f k -P 0" % self._netperf_server + cmd = "netperf -H %s -f k" % self._netperf_server + if self._is_omni(): + # -P 0 disables banner header of output + cmd += " -P 0" if self._port is not None: """ client connects on this port @@ -112,7 +119,8 @@ class Netperf(TestGeneric): cmd += " -s 1"
# Print only relevant output - cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"' + if self._is_omni(): + cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
elif self._role == "server": cmd = "netserver -D" @@ -136,27 +144,48 @@ class Netperf(TestGeneric): def _parse_output(self, output): res_val = {}
- pattern_throughput = "THROUGHPUT=(\d+.\d+)" - throughput = re.search(pattern_throughput, output) + if not self._is_omni(): + # pattern for SCTP streams and other tests + # decimal decimal decimal float (float) + pattern = "\d+\s+\d+\s+\d+\s+\d+.\d+\s+(\d+(?:.\d+){0,1})" + if self._cpu_util: + # cpu utilization data in format: float float + pattern += "\s+(\d+(?:.\d+){0,1})\s+(\d+(?:.\d+){0,1})"
- if throughput is None: - rate_in_kb = 0.0 + r2 = re.search(pattern, output.lower()) + + if r2 is None: + rate_in_kb = 0.0 + else: + rate_in_kb = float(r2.group(1)) + if self._cpu_util: + res_val["LOCAL_CPU_UTIL"] = float(r2.group(2)) + res_val["REMOTE_CPU_UTIL"] = float(r2.group(3)) + + res_val["rate"] = rate_in_kb*1000 + res_val["unit"] = "bps" else: - rate_in_kb = float(throughput.group(1)) + pattern_throughput = "THROUGHPUT=(\d+.\d+)" + throughput = re.search(pattern_throughput, output)
- res_val["rate"] = rate_in_kb*1000 - res_val["unit"] = "bps" + if throughput is None: + rate_in_kb = 0.0 + else: + rate_in_kb = float(throughput.group(1))
- if self._cpu_util is not None: - if self._cpu_util == "local" or self._cpu_util == "both": - pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+.\d+)" - loc_cpu_util = re.search(pattern_loc_cpu_util, output) - res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1)) + res_val["rate"] = rate_in_kb*1000 + res_val["unit"] = "bps"
- if self._cpu_util == "remote" or self._cpu_util == "both": - pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+.\d+)" - rem_cpu_util = re.search(pattern_rem_cpu_util, output) - res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1)) + if self._cpu_util is not None: + if self._cpu_util == "local" or self._cpu_util == "both": + pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+.\d+)" + loc_cpu_util = re.search(pattern_loc_cpu_util, output) + res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1)) + + if self._cpu_util == "remote" or self._cpu_util == "both": + pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+.\d+)" + rem_cpu_util = re.search(pattern_rem_cpu_util, output) + res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1))
if self._confidence is not None: confidence = self._parse_confidence(output)
Each netperf test takes an optional arguments specified after double dash "--". For example UDP_STREAM test takes -m option to specify how big the messages are sent.
Signed-off-by: Jan Tluka jtluka@redhat.com --- test_modules/Netperf.py | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index 89664fb..a691fa5 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -33,6 +33,7 @@ class Netperf(TestGeneric): self._duration = self.get_opt("duration") self._port = self.get_opt("port") self._testname = self.get_opt("testname", default="TCP_STREAM") + self._testoptions = self.get_opt("testoptions") self._confidence = self.get_opt("confidence") self._bind = self.get_opt("bind", opt_type="addr") self._cpu_util = self.get_opt("cpu_util") @@ -122,6 +123,12 @@ class Netperf(TestGeneric): if self._is_omni(): cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
+ if self._testoptions: + if cmd.find(" -- ") == -1: + cmd += " -- %s" % self._testoptions + else: + cmd += self._testoptions + elif self._role == "server": cmd = "netserver -D" if self._bind is not None:
There's missing mbits/sec parsing in _parse_threshold(). This patch fixes that.
Signed-off-by: Jan Tluka jtluka@redhat.com --- test_modules/Netperf.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index a691fa5..18bf825 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -237,14 +237,18 @@ class Netperf(TestGeneric): threshold_rate *= 1000 elif threshold_unit_size == 'K': threshold_rate *= 1024 - elif threshold_unit_size == 'g': + elif threshold_unit_size == 'm': threshold_rate *= 1000*1000 - elif threshold_unit_size == 'G': + elif threshold_unit_size == 'M': threshold_rate *= 1024*1024 + elif threshold_unit_size == 'g': + threshold_rate *= 1000*1000*1000 + elif threshold_unit_size == 'G': + threshold_rate *= 1024*1024*1024 elif threshold_unit_size == 't': - threshold_rate *= 1000 * 1000 * 1000 + threshold_rate *= 1000 * 1000 * 1000 * 1000 elif threshold_unit_size == 'T': - threshold_rate *= 1024 * 1024 * 1024 + threshold_rate *= 1024 * 1024 * 1024 * 1024 if threshold_unit_type == "bytes": threshold_rate *= 8 threshold_unit_type = "bps"
Currently Netperf module transforms each rate into bps internally. When reporting the rates/thresholds like this is not user friendly.
I changed the reporting so that rate is reported like this:
Measured rate 858.66 +-0.00 Mbits/sec is higher than threshold 700.00 +-0.00 Mbits/sec
instead of this:
Measured rate 914154070.00 +-0.00 bps is higher than threshold 600.00 +-100.00
The data in res_data is left intact, the change is only for logging.
Signed-off-by: Jan Tluka jtluka@redhat.com --- test_modules/Netperf.py | 69 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 59 insertions(+), 10 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index 18bf825..31bea31 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -294,6 +294,43 @@ class Netperf(TestGeneric): if e.errno == errno.EINTR: server.kill()
+ def _pretty_rate(self, rate, unit=None): + pretty_rate = {} + if unit is None: + if rate < 1024: + pretty_rate["unit"] = "bits/sec" + pretty_rate["rate"] = rate + elif rate < 1024 * 1024: + pretty_rate["unit"] = "Kbits/sec" + pretty_rate["rate"] = rate / 1024 + elif rate < 1024 * 1024 * 1024: + pretty_rate["unit"] = "Mbits/sec" + pretty_rate["rate"] = rate / (1024 * 1024) + elif rate < 1024 * 1024 * 1024 * 1024: + pretty_rate["unit"] = "Gbits/sec" + pretty_rate["rate"] = rate / (1024 * 1024 * 1024) + elif rate < 1024 * 1024 * 1024 * 1024 * 1024: + pretty_rate["unit"] = "tbits/sec" + pretty_rate["rate"] = rate / (1024 * 1024 * 1024 * 1024) + else: + if unit == "bits/sec": + pretty_rate["unit"] = "bits/sec" + pretty_rate["rate"] = rate + elif unit == "Kbits/sec": + pretty_rate["unit"] = "Kbits/sec" + pretty_rate["rate"] = rate / 1024 + elif unit == "Mbits/sec": + pretty_rate["unit"] = "Mbits/sec" + pretty_rate["rate"] = rate / (1024 * 1024) + elif unit == "Gbits/sec": + pretty_rate["unit"] = "Gbits/sec" + pretty_rate["rate"] = rate / (1024 * 1024 * 1024) + elif unit == "Tbits/sec": + pretty_rate["unit"] = "Tbits/sec" + pretty_rate["rate"] = rate / (1024 * 1024 * 1024) + + return pretty_rate + def _run_client(self, cmd): logging.debug("running as client...")
@@ -354,6 +391,12 @@ class Netperf(TestGeneric): res_data["rate"] = rate res_data["rate_deviation"] = rate_deviation
+ rate_pretty = self._pretty_rate(rate) + rate_dev_pretty = self._pretty_rate(rate_deviation, unit=rate_pretty["unit"]) + threshold_pretty = self._pretty_rate(self._threshold["rate"]) + threshold_dev_pretty = self._pretty_rate(self._threshold_deviation["rate"], + unit = threshold_pretty["unit"]) + res_val = False if self._threshold_interval is not None: result_interval = (rate - rate_deviation, @@ -361,18 +404,24 @@ class Netperf(TestGeneric):
if self._threshold_interval[0] > result_interval[1]: res_val = False - res_data["msg"] = "Measured rate %.2f +-%.2f bps is lower "\ - "than threshold %.2f +-%.2f" %\ - (rate, rate_deviation, - self._threshold["rate"], - self._threshold_deviation["rate"]) + res_data["msg"] = "Measured rate %.2f +-%.2f %s is lower "\ + "than threshold %.2f +-%.2f %s" %\ + (rate_pretty["rate"], + rate_deviation, + rate_pretty["unit"], + threshold_pretty["rate"], + threshold_dev_pretty["rate"], + threshold_pretty["unit"]) else: res_val = True - res_data["msg"] = "Measured rate %.2f +-%.2f bps is higher "\ - "than threshold %.2f +-%.2f" %\ - (rate, rate_deviation, - self._threshold["rate"], - self._threshold_deviation["rate"]) + res_data["msg"] = "Measured rate %.2f +-%.2f %s is higher "\ + "than threshold %.2f +-%.2f %s" %\ + (rate_pretty["rate"], + rate_deviation, + rate_pretty["unit"], + threshold_pretty["rate"], + threshold_dev_pretty["rate"], + threshold_pretty["unit"]) else: if rate > 0.0: res_val = True
Please scratch this. There's a bug in second patch.
Tue, Apr 05, 2016 at 12:25:52PM CEST, jtluka@redhat.com wrote:
When we changed how the Netperf test module reports results we silently broke SCTP_STREAM mode. The reason is that SCTP_STREAM does not have omni output selection available (checked with both 2.6.0 and 2.7.0). SCTP_STREAM data must be parsed separately.
Fixes:
Signed-off-by: Jan Tluka jtluka@redhat.com
test_modules/Netperf.py | 67 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 19 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index 2033c57..89664fb 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -18,6 +18,8 @@ class Netperf(TestGeneric): supported_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR", "SCTP_STREAM", "SCTP_STREAM_MANY", "SCTP_RR"]
- omni_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR"]
- def __init__(self, command): super(Netperf, self).__init__(command)
@@ -52,13 +54,18 @@ class Netperf(TestGeneric): else: self._threshold_interval = None
- def _is_omni(self):
return self._testname in self.omni_tests
- def _compose_cmd(self): """ composes commands for netperf and netserver based on xml recipe """ if self._role == "client":
# -P 0 disables banner header of output
cmd = "netperf -H %s -f k -P 0" % self._netperf_server
cmd = "netperf -H %s -f k" % self._netperf_server
if self._is_omni():
# -P 0 disables banner header of output
cmd += " -P 0" if self._port is not None: """ client connects on this port
@@ -112,7 +119,8 @@ class Netperf(TestGeneric): cmd += " -s 1"
# Print only relevant output
cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
if self._is_omni():
cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"' elif self._role == "server": cmd = "netserver -D"
@@ -136,27 +144,48 @@ class Netperf(TestGeneric): def _parse_output(self, output): res_val = {}
pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
throughput = re.search(pattern_throughput, output)
if not self._is_omni():
# pattern for SCTP streams and other tests
# decimal decimal decimal float (float)
pattern = "\d+\s+\d+\s+\d+\s+\d+\.\d+\s+(\d+(?:\.\d+){0,1})"
if self._cpu_util:
# cpu utilization data in format: float float
pattern += "\s+(\d+(?:\.\d+){0,1})\s+(\d+(?:\.\d+){0,1})"
if throughput is None:
rate_in_kb = 0.0
r2 = re.search(pattern, output.lower())
if r2 is None:
rate_in_kb = 0.0
else:
rate_in_kb = float(r2.group(1))
if self._cpu_util:
res_val["LOCAL_CPU_UTIL"] = float(r2.group(2))
res_val["REMOTE_CPU_UTIL"] = float(r2.group(3))
res_val["rate"] = rate_in_kb*1000
res_val["unit"] = "bps" else:
rate_in_kb = float(throughput.group(1))
pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
throughput = re.search(pattern_throughput, output)
res_val["rate"] = rate_in_kb*1000
res_val["unit"] = "bps"
if throughput is None:
rate_in_kb = 0.0
else:
rate_in_kb = float(throughput.group(1))
if self._cpu_util is not None:
if self._cpu_util == "local" or self._cpu_util == "both":
pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+\.\d+)"
loc_cpu_util = re.search(pattern_loc_cpu_util, output)
res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1))
res_val["rate"] = rate_in_kb*1000
res_val["unit"] = "bps"
if self._cpu_util == "remote" or self._cpu_util == "both":
pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+\.\d+)"
rem_cpu_util = re.search(pattern_rem_cpu_util, output)
res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1))
if self._cpu_util is not None:
if self._cpu_util == "local" or self._cpu_util == "both":
pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+\.\d+)"
loc_cpu_util = re.search(pattern_loc_cpu_util, output)
res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1))
if self._cpu_util == "remote" or self._cpu_util == "both":
pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+\.\d+)"
rem_cpu_util = re.search(pattern_rem_cpu_util, output)
res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1)) if self._confidence is not None: confidence = self._parse_confidence(output)
-- 2.4.11
lnst-developers@lists.fedorahosted.org