Change in vdsm[master]: profiling: memory: port from dowser to tracemalloc
by fromani@redhat.com
Francesco Romani has uploaded a new change for review.
Change subject: profiling: memory: port from dowser to tracemalloc
......................................................................
profiling: memory: port from dowser to tracemalloc
** WORK IN PROGRESS **
This patch implements integration with tracemalloc
(http://pytracemalloc.readthedocs.org/), replacing dowser module.
tracemalloc is
- included in python 3.4, thus received good amount of peer review
- more comprehensive, as it plugs inside cpython
- augmented with nice UI to analyze memory profile snapshot
It, however, requires a python VM compliant with PEP 445, and
an additional extension module installed in the system
Change-Id: If00a3af13ea48bb75d40ed11e56de04d90452ff7
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
M lib/vdsm/profiling/memory.py
1 file changed, 32 insertions(+), 30 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/24/47624/1
diff --git a/lib/vdsm/profiling/memory.py b/lib/vdsm/profiling/memory.py
index 31f833c..80a07ac 100755
--- a/lib/vdsm/profiling/memory.py
+++ b/lib/vdsm/profiling/memory.py
@@ -24,19 +24,20 @@
"""
import logging
+import os
import threading
+import time
+from vdsm import constants
from vdsm.config import config
-from vdsm.utils import traceback
from .errors import UsageError
-# Import modules lazily when profile is started
-dowser = None
-cherrypy = None
+# Import tracemalloc lazily when profile is started
+tracemalloc = None
+
_lock = threading.Lock()
-_thread = None
def start():
@@ -48,7 +49,7 @@
def stop():
""" Stops application memory profiling """
if is_enabled():
- _stop_profiling()
+ _stop_profiling(_make_snapshot_name())
def is_enabled():
@@ -56,48 +57,49 @@
def is_running():
- return _thread is not None
+ return tracemalloc and tracemalloc.is_tracing()
-@traceback()
-def _memory_viewer():
- cherrypy.tree.mount(dowser.Root())
+def snapshot(filename=None):
+ with _lock:
+ if not is_running():
+ raise UsageError('Memory profiler must be running '
+ 'to take snapshots')
+ snapshot_name = filename or _make_snapshot_name()
+ with open(snapshot_name, 'wb') as snap:
+ snap.write(tracemalloc.take_snapshot())
+ return snapshot_name
- cherrypy.config.update({
- 'server.socket_host': '0.0.0.0',
- 'server.socket_port': config.getint('devel', 'memory_profile_port')})
- cherrypy.engine.start()
+def _make_snapshot_name():
+ return os.path.join(constants.P_VDSM_RUN,
+ 'vdsm_memory_%s.pickle' % _make_timestamp())
+
+
+def _make_timestamp():
+ return time.strftime('%Y%m%d_%H%M%S')
def _start_profiling():
- global cherrypy
- global dowser
- global _thread
+ global tracemalloc
logging.debug("Starting memory profiling")
- import cherrypy
- import dowser
+ import tracemalloc
# this nonsense makes pyflakes happy
- cherrypy
- dowser
+ tracemalloc
with _lock:
if is_running():
raise UsageError('Memory profiler is already running')
- _thread = threading.Thread(name='memprofile',
- target=_memory_viewer)
- _thread.daemon = True
- _thread.start()
+ tracemalloc.start()
-def _stop_profiling():
+def _stop_profiling(filename):
global _thread
logging.debug("Stopping memory profiling")
with _lock:
if is_running():
- cherrypy.engine.exit()
- cherrypy.engine.block()
- _thread.join()
- _thread = None
+ snapshot(filename)
+ tracemalloc.clear_traces()
+ tracemalloc.stop()
--
To view, visit https://gerrit.ovirt.org/47624
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: If00a3af13ea48bb75d40ed11e56de04d90452ff7
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
7 years, 7 months
Change in vdsm[master]: profiling: memory: profile snapshots in vdsClient
by fromani@redhat.com
Francesco Romani has uploaded a new change for review.
Change subject: profiling: memory: profile snapshots in vdsClient
......................................................................
profiling: memory: profile snapshots in vdsClient
** WORK IN PROGRESS **
Add the ability to take memory snapshots in vdsClient.
Change-Id: I8c7c5a074953bd54a8c2704d8aa2b074e277fdde
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
M client/vdsClient.py
M vdsm/API.py
M vdsm/rpc/bindingxmlrpc.py
3 files changed, 23 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/25/47625/1
diff --git a/client/vdsClient.py b/client/vdsClient.py
index fe263f2..739374b 100755
--- a/client/vdsClient.py
+++ b/client/vdsClient.py
@@ -1596,6 +1596,13 @@
return stats['status']['code'], stats['status']['message']
return 0, ''
+ def do_memoryProfileSnapshot(self):
+ stats = self.s.memoryProfileSnapshot()
+ if stats['status']['code']:
+ return stats['status']['code'], stats['status']['message']
+ print 'snapshot saved on host on %s' % stats['name']
+ return 0, ''
+
def do_setMOMPolicy(self, policyFile):
stats = self.s.setMOMPolicy(policyFile)
if stats['status']['code']:
@@ -2635,6 +2642,11 @@
('<level> [logName][,logName]...', 'set log verbosity'
' level (10=DEBUG, 50=CRITICAL'
)),
+ 'memoryProfileSnapshot': (serv.do_memoryProfileSnapshot,
+ ('',
+ 'save memory profile snapshot for later '
+ 'inspection')),
+
'setMOMPolicy': (serv.do_setMOMPolicy,
('<policyfile>', 'set MOM policy')),
'setMOMPolicyParameters': (serv.do_setMOMPolicyParameters,
diff --git a/vdsm/API.py b/vdsm/API.py
index 0b5c8f7..1345dd8 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -51,6 +51,7 @@
from virt.vmdevices import hwclass
from vdsm.compat import pickle
from vdsm.define import doneCode, errCode, Kbytes, Mbytes
+from vdsm.profiling import memory
import caps
from vdsm.config import config
import hooks
@@ -1409,6 +1410,11 @@
return dict(status=doneCode)
+ def memoryProfileSnapshot(self):
+ name = memory.snapshot()
+ logging.debug('saved memory profiling snapshot on %s', name)
+ return {'status': doneCode, 'snapshot': name}
+
# VM-related functions
def getVMList(self, fullStatus=False, vmList=(), onlyUUID=False):
""" return a list of known VMs with full (or partial) config each """
diff --git a/vdsm/rpc/bindingxmlrpc.py b/vdsm/rpc/bindingxmlrpc.py
index c521cd8..c0212df 100644
--- a/vdsm/rpc/bindingxmlrpc.py
+++ b/vdsm/rpc/bindingxmlrpc.py
@@ -649,6 +649,10 @@
api = API.Global()
return api.setLogLevel(level)
+ def memoryProfileSnapshot(self):
+ api = API.Global()
+ return api.memoryProfileSnapshot()
+
def setMOMPolicy(self, policy):
api = API.Global()
return api.setMOMPolicy(policy)
@@ -1088,6 +1092,7 @@
(self.addNetwork, 'addNetwork'),
(self.delNetwork, 'delNetwork'),
(self.editNetwork, 'editNetwork'),
+ (self.memoryProfileSnapshot, 'memoryProfileSnapshot'),
(self.setupNetworks, 'setupNetworks'),
(self.ping, 'ping'),
(self.setSafeNetworkConfig, 'setSafeNetworkConfig'),
--
To view, visit https://gerrit.ovirt.org/47625
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I8c7c5a074953bd54a8c2704d8aa2b074e277fdde
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
7 years, 7 months
Change in vdsm[master]: WIP: add simple tracemalloc inspection tool
by fromani@redhat.com
Francesco Romani has uploaded a new change for review.
Change subject: WIP: add simple tracemalloc inspection tool
......................................................................
WIP: add simple tracemalloc inspection tool
Change-Id: Ideb2652f345edb6f4a4d66c5299b601e53e85d33
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
A contrib/memory-stats
A contrib/tracemalloc.py
2 files changed, 592 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/15/48115/1
diff --git a/contrib/memory-stats b/contrib/memory-stats
new file mode 100755
index 0000000..5625ad1
--- /dev/null
+++ b/contrib/memory-stats
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+import argparse
+import os
+import sys
+import time
+
+import tracemalloc
+
+
+def eprint(s):
+ sys.stderr.write('%s\n' % s)
+
+
+def _load(filename):
+ if filename:
+ eprint('Loading snapshot: "%s"' % filename)
+ before = time.time()
+ snap = tracemalloc.Snapshot.load(filename)
+ eprint('Loaded snapshot: "%s" in %.3f seconds' % (
+ filename, time.time() - before))
+ return snap
+ return None
+
+
+def display_top(snapshot1, snapshot2=None, group_by='lineno', limit=10):
+ if snapshot2 is None:
+ top_stats = snapshot1.statistics(group_by)
+ else:
+ top_stats = snapshot2.compare_to(snapshot1, group_by)
+ print_stats(top_stats, limit)
+
+
+def _print_sep(width=52):
+ print('-' * width)
+
+
+def print_stats(top_stats, limit=10):
+ total = float(sum(stat.size for stat in top_stats))
+ print("* Total allocated size: %.2f KiB" % (total / 1024.))
+ print("* Top %s lines:" % limit)
+ _print_sep()
+ for index, stat in enumerate(top_stats[:limit], 1):
+ frame = stat.traceback[0]
+ # replace "/path/to/module/file.py" with "module/file.py"
+ filename = os.sep.join(frame.filename.split(os.sep)[-2:])
+ where = "%s:%s" % (filename, frame.lineno)
+ print("#%3s: %-32s: %.2f KiB (%.2f%%)"
+ % (index, where,
+ stat.size / 1024.,
+ (100. * stat.size) / total))
+
+ other = top_stats[limit:]
+ if other:
+ size = sum(stat.size for stat in other)
+ _print_sep()
+ where = "%i entries" % len(other)
+ print("rest: %-32s: %.2f KiB (%.2f%%)"
+ % (where, size / 1024., (100. * size) / total))
+
+
+ap = argparse.ArgumentParser(description='analyze tracemalloc snapshot')
+ap.add_argument('snapshot', nargs='?', help='memory snapshot to analyze')
+ap.add_argument('-c', '--compare-to', metavar='OS', dest='to_compare',
+ help='memory snapshot to compare')
+ap.add_argument('-g', '--group-by', metavar='ATTR', dest='groupby',
+ default='lineno',
+ help='group samples by either: lineno, filename, traceback')
+ap.add_argument('-l', '--limit', metavar='L', dest='limit', default=10,
+ type=int,
+ help='limit output to L top entries')
+
+
+args = ap.parse_args()
+
+if not args.snapshot:
+ ap.print_help()
+ sys.exit(1)
+
+display_top(_load(args.snapshot),
+ _load(args.to_compare),
+ args.groupby,
+ args.limit)
diff --git a/contrib/tracemalloc.py b/contrib/tracemalloc.py
new file mode 100644
index 0000000..47b633e
--- /dev/null
+++ b/contrib/tracemalloc.py
@@ -0,0 +1,490 @@
+# Copy of tracemalloc.py from pytracemalloc without _tracemalloc,
+# to be able to read snapshot without having to install the _tracemaloc
+# module.
+from collections import Sequence, Iterable
+import fnmatch
+import linecache
+import os.path
+import pickle
+
+
+try:
+ from functools import total_ordering
+except ImportError:
+ # Python 2.6
+ def total_ordering(cls):
+ # Function backported from Python 2.7
+ convert = {
+ '__lt__': [('__gt__', lambda self, other: _not_op_and_not_eq(self.__lt__, self, other)),
+ ('__le__', lambda self, other: _op_or_eq(self.__lt__, self, other)),
+ ('__ge__', lambda self, other: _not_op(self.__lt__, other))],
+ '__le__': [('__ge__', lambda self, other: _not_op_or_eq(self.__le__, self, other)),
+ ('__lt__', lambda self, other: _op_and_not_eq(self.__le__, self, other)),
+ ('__gt__', lambda self, other: _not_op(self.__le__, other))],
+ '__gt__': [('__lt__', lambda self, other: _not_op_and_not_eq(self.__gt__, self, other)),
+ ('__ge__', lambda self, other: _op_or_eq(self.__gt__, self, other)),
+ ('__le__', lambda self, other: _not_op(self.__gt__, other))],
+ '__ge__': [('__le__', lambda self, other: _not_op_or_eq(self.__ge__, self, other)),
+ ('__gt__', lambda self, other: _op_and_not_eq(self.__ge__, self, other)),
+ ('__lt__', lambda self, other: _not_op(self.__ge__, other))]
+ }
+ roots = [op for op in convert if getattr(cls, op, None) is not getattr(object, op, None)]
+ if not roots:
+ raise ValueError('must define at least one ordering operation: < > <= >=')
+ root = max(roots)
+ for opname, opfunc in convert[root]:
+ if opname not in roots:
+ opfunc.__name__ = opname
+ opfunc.__doc__ = getattr(int, opname).__doc__
+ setattr(cls, opname, opfunc)
+ return cls
+
+
+def _format_size(size, sign):
+ for unit in ('B', 'KiB', 'MiB', 'GiB', 'TiB'):
+ if abs(size) < 100 and unit != 'B':
+ # 3 digits (xx.x UNIT)
+ if sign:
+ return "%+.1f %s" % (size, unit)
+ else:
+ return "%.1f %s" % (size, unit)
+ if abs(size) < 10 * 1024 or unit == 'TiB':
+ # 4 or 5 digits (xxxx UNIT)
+ if sign:
+ return "%+.0f %s" % (size, unit)
+ else:
+ return "%.0f %s" % (size, unit)
+ size /= 1024
+
+
+class Statistic(object):
+ """
+ Statistic difference on memory allocations between two Snapshot instance.
+ """
+
+ __slots__ = ('traceback', 'size', 'count')
+
+ def __init__(self, traceback, size, count):
+ self.traceback = traceback
+ self.size = size
+ self.count = count
+
+ def __hash__(self):
+ return hash((self.traceback, self.size, self.count))
+
+ def __eq__(self, other):
+ return (self.traceback == other.traceback
+ and self.size == other.size
+ and self.count == other.count)
+
+ def __str__(self):
+ text = ("%s: size=%s, count=%i"
+ % (self.traceback,
+ _format_size(self.size, False),
+ self.count))
+ if self.count:
+ average = self.size / self.count
+ text += ", average=%s" % _format_size(average, False)
+ return text
+
+ def __repr__(self):
+ return ('<Statistic traceback=%r size=%i count=%i>'
+ % (self.traceback, self.size, self.count))
+
+ def _sort_key(self):
+ return (self.size, self.count, self.traceback)
+
+
+class StatisticDiff(object):
+ """
+ Statistic difference on memory allocations between an old and a new
+ Snapshot instance.
+ """
+ __slots__ = ('traceback', 'size', 'size_diff', 'count', 'count_diff')
+
+ def __init__(self, traceback, size, size_diff, count, count_diff):
+ self.traceback = traceback
+ self.size = size
+ self.size_diff = size_diff
+ self.count = count
+ self.count_diff = count_diff
+
+ def __hash__(self):
+ return hash((self.traceback, self.size, self.size_diff,
+ self.count, self.count_diff))
+
+ def __eq__(self, other):
+ return (self.traceback == other.traceback
+ and self.size == other.size
+ and self.size_diff == other.size_diff
+ and self.count == other.count
+ and self.count_diff == other.count_diff)
+
+ def __str__(self):
+ text = ("%s: size=%s (%s), count=%i (%+i)"
+ % (self.traceback,
+ _format_size(self.size, False),
+ _format_size(self.size_diff, True),
+ self.count,
+ self.count_diff))
+ if self.count:
+ average = self.size / self.count
+ text += ", average=%s" % _format_size(average, False)
+ return text
+
+ def __repr__(self):
+ return ('<StatisticDiff traceback=%r size=%i (%+i) count=%i (%+i)>'
+ % (self.traceback, self.size, self.size_diff,
+ self.count, self.count_diff))
+
+ def _sort_key(self):
+ return (abs(self.size_diff), self.size,
+ abs(self.count_diff), self.count,
+ self.traceback)
+
+
+def _compare_grouped_stats(old_group, new_group):
+ statistics = []
+ for traceback, stat in new_group.items():
+ previous = old_group.pop(traceback, None)
+ if previous is not None:
+ stat = StatisticDiff(traceback,
+ stat.size, stat.size - previous.size,
+ stat.count, stat.count - previous.count)
+ else:
+ stat = StatisticDiff(traceback,
+ stat.size, stat.size,
+ stat.count, stat.count)
+ statistics.append(stat)
+
+ for traceback, stat in old_group.items():
+ stat = StatisticDiff(traceback, 0, -stat.size, 0, -stat.count)
+ statistics.append(stat)
+ return statistics
+
+
+@total_ordering
+class Frame(object):
+ """
+ Frame of a traceback.
+ """
+ __slots__ = ("_frame",)
+
+ def __init__(self, frame):
+ # frame is a tuple: (filename: str, lineno: int)
+ self._frame = frame
+
+ @property
+ def filename(self):
+ return self._frame[0]
+
+ @property
+ def lineno(self):
+ return self._frame[1]
+
+ def __eq__(self, other):
+ return (self._frame == other._frame)
+
+ def __lt__(self, other):
+ return (self._frame < other._frame)
+
+ def __hash__(self):
+ return hash(self._frame)
+
+ def __str__(self):
+ return "%s:%s" % (self.filename, self.lineno)
+
+ def __repr__(self):
+ return "<Frame filename=%r lineno=%r>" % (self.filename, self.lineno)
+
+
+@total_ordering
+class Traceback(Sequence):
+ """
+ Sequence of Frame instances sorted from the most recent frame
+ to the oldest frame.
+ """
+ __slots__ = ("_frames",)
+
+ def __init__(self, frames):
+ Sequence.__init__(self)
+ # frames is a tuple of frame tuples: see Frame constructor for the
+ # format of a frame tuple
+ self._frames = frames
+
+ def __len__(self):
+ return len(self._frames)
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ return tuple(Frame(trace) for trace in self._frames[index])
+ else:
+ return Frame(self._frames[index])
+
+ def __contains__(self, frame):
+ return frame._frame in self._frames
+
+ def __hash__(self):
+ return hash(self._frames)
+
+ def __eq__(self, other):
+ return (self._frames == other._frames)
+
+ def __lt__(self, other):
+ return (self._frames < other._frames)
+
+ def __str__(self):
+ return str(self[0])
+
+ def __repr__(self):
+ return "<Traceback %r>" % (tuple(self),)
+
+ def format(self, limit=None):
+ lines = []
+ if limit is not None and limit < 0:
+ return lines
+ for frame in self[:limit]:
+ lines.append(' File "%s", line %s'
+ % (frame.filename, frame.lineno))
+ line = linecache.getline(frame.filename, frame.lineno).strip()
+ if line:
+ lines.append(' %s' % line)
+ return lines
+
+
+class Trace(object):
+ """
+ Trace of a memory block.
+ """
+ __slots__ = ("_trace",)
+
+ def __init__(self, trace):
+ # trace is a tuple: (size, traceback), see Traceback constructor
+ # for the format of the traceback tuple
+ self._trace = trace
+
+ @property
+ def size(self):
+ return self._trace[0]
+
+ @property
+ def traceback(self):
+ return Traceback(self._trace[1])
+
+ def __eq__(self, other):
+ return (self._trace == other._trace)
+
+ def __hash__(self):
+ return hash(self._trace)
+
+ def __str__(self):
+ return "%s: %s" % (self.traceback, _format_size(self.size, False))
+
+ def __repr__(self):
+ return ("<Trace size=%s, traceback=%r>"
+ % (_format_size(self.size, False), self.traceback))
+
+
+class _Traces(Sequence):
+ def __init__(self, traces):
+ Sequence.__init__(self)
+ # traces is a tuple of trace tuples: see Trace constructor
+ self._traces = traces
+
+ def __len__(self):
+ return len(self._traces)
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ return tuple(Trace(trace) for trace in self._traces[index])
+ else:
+ return Trace(self._traces[index])
+
+ def __contains__(self, trace):
+ return trace._trace in self._traces
+
+ def __eq__(self, other):
+ return (self._traces == other._traces)
+
+ def __repr__(self):
+ return "<Traces len=%s>" % len(self)
+
+
+def _normalize_filename(filename):
+ filename = os.path.normcase(filename)
+ if filename.endswith(('.pyc', '.pyo')):
+ filename = filename[:-1]
+ return filename
+
+
+class Filter(object):
+ def __init__(self, inclusive, filename_pattern,
+ lineno=None, all_frames=False):
+ self.inclusive = inclusive
+ self._filename_pattern = _normalize_filename(filename_pattern)
+ self.lineno = lineno
+ self.all_frames = all_frames
+
+ @property
+ def filename_pattern(self):
+ return self._filename_pattern
+
+ def __match_frame(self, filename, lineno):
+ filename = _normalize_filename(filename)
+ if not fnmatch.fnmatch(filename, self._filename_pattern):
+ return False
+ if self.lineno is None:
+ return True
+ else:
+ return (lineno == self.lineno)
+
+ def _match_frame(self, filename, lineno):
+ return self.__match_frame(filename, lineno) ^ (not self.inclusive)
+
+ def _match_traceback(self, traceback):
+ if self.all_frames:
+ if any(self.__match_frame(filename, lineno)
+ for filename, lineno in traceback):
+ return self.inclusive
+ else:
+ return (not self.inclusive)
+ else:
+ filename, lineno = traceback[0]
+ return self._match_frame(filename, lineno)
+
+
+class Snapshot(object):
+ """
+ Snapshot of traces of memory blocks allocated by Python.
+ """
+
+ def __init__(self, traces, traceback_limit):
+ # traces is a tuple of trace tuples: see _Traces constructor for
+ # the exact format
+ self.traces = _Traces(traces)
+ self.traceback_limit = traceback_limit
+
+ def dump(self, filename):
+ """
+ Write the snapshot into a file.
+ """
+ with open(filename, "wb") as fp:
+ pickle.dump(self, fp, pickle.HIGHEST_PROTOCOL)
+
+ @staticmethod
+ def load(filename):
+ """
+ Load a snapshot from a file.
+ """
+ with open(filename, "rb") as fp:
+ return pickle.load(fp)
+
+ def _filter_trace(self, include_filters, exclude_filters, trace):
+ traceback = trace[1]
+ if include_filters:
+ if not any(trace_filter._match_traceback(traceback)
+ for trace_filter in include_filters):
+ return False
+ if exclude_filters:
+ if any(not trace_filter._match_traceback(traceback)
+ for trace_filter in exclude_filters):
+ return False
+ return True
+
+ def filter_traces(self, filters):
+ """
+ Create a new Snapshot instance with a filtered traces sequence, filters
+ is a list of Filter instances. If filters is an empty list, return a
+ new Snapshot instance with a copy of the traces.
+ """
+ if not isinstance(filters, Iterable):
+ raise TypeError("filters must be a list of filters, not %s"
+ % type(filters).__name__)
+ if filters:
+ include_filters = []
+ exclude_filters = []
+ for trace_filter in filters:
+ if trace_filter.inclusive:
+ include_filters.append(trace_filter)
+ else:
+ exclude_filters.append(trace_filter)
+ new_traces = [trace for trace in self.traces._traces
+ if self._filter_trace(include_filters,
+ exclude_filters,
+ trace)]
+ else:
+ new_traces = self.traces._traces[:]
+ return Snapshot(new_traces, self.traceback_limit)
+
+ def _group_by(self, key_type, cumulative):
+ if key_type not in ('traceback', 'filename', 'lineno'):
+ raise ValueError("unknown key_type: %r" % (key_type,))
+ if cumulative and key_type not in ('lineno', 'filename'):
+ raise ValueError("cumulative mode cannot by used "
+ "with key type %r" % key_type)
+
+ stats = {}
+ tracebacks = {}
+ if not cumulative:
+ for trace in self.traces._traces:
+ size, trace_traceback = trace
+ try:
+ traceback = tracebacks[trace_traceback]
+ except KeyError:
+ if key_type == 'traceback':
+ frames = trace_traceback
+ elif key_type == 'lineno':
+ frames = trace_traceback[:1]
+ else: # key_type == 'filename':
+ frames = ((trace_traceback[0][0], 0),)
+ traceback = Traceback(frames)
+ tracebacks[trace_traceback] = traceback
+ try:
+ stat = stats[traceback]
+ stat.size += size
+ stat.count += 1
+ except KeyError:
+ stats[traceback] = Statistic(traceback, size, 1)
+ else:
+ # cumulative statistics
+ for trace in self.traces._traces:
+ size, trace_traceback = trace
+ for frame in trace_traceback:
+ try:
+ traceback = tracebacks[frame]
+ except KeyError:
+ if key_type == 'lineno':
+ frames = (frame,)
+ else: # key_type == 'filename':
+ frames = ((frame[0], 0),)
+ traceback = Traceback(frames)
+ tracebacks[frame] = traceback
+ try:
+ stat = stats[traceback]
+ stat.size += size
+ stat.count += 1
+ except KeyError:
+ stats[traceback] = Statistic(traceback, size, 1)
+ return stats
+
+ def statistics(self, key_type, cumulative=False):
+ """
+ Group statistics by key_type. Return a sorted list of Statistic
+ instances.
+ """
+ grouped = self._group_by(key_type, cumulative)
+ statistics = list(grouped.values())
+ statistics.sort(reverse=True, key=Statistic._sort_key)
+ return statistics
+
+ def compare_to(self, old_snapshot, key_type, cumulative=False):
+ """
+ Compute the differences with an old snapshot old_snapshot. Get
+ statistics as a sorted list of StatisticDiff instances, grouped by
+ group_by.
+ """
+ new_group = self._group_by(key_type, cumulative)
+ old_group = old_snapshot._group_by(key_type, cumulative)
+ statistics = _compare_grouped_stats(old_group, new_group)
+ statistics.sort(reverse=True, key=StatisticDiff._sort_key)
+ return statistics
--
To view, visit https://gerrit.ovirt.org/48115
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ideb2652f345edb6f4a4d66c5299b601e53e85d33
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
7 years, 7 months
Change in vdsm[master]: v2v: Log detailed output of virt-v2v
by Tomas Golembiovsky
Tomas Golembiovsky has uploaded a new change for review.
Change subject: v2v: Log detailed output of virt-v2v
......................................................................
v2v: Log detailed output of virt-v2v
The detailed log virt-v2v output is often necessary to debug conversion
failures. We provide '-v -x' arguments to virt-v2v to get the detailed
output and store the logs in the VDSM run directory.
Change-Id: I6a8d9284316a551edeaffdd66dfcd299fa02478e
Bug-Url: https://bugzilla.redhat.com/1350465
Signed-off-by: Tomáš Golembiovský <tgolembi(a)redhat.com>
---
M lib/vdsm/v2v.py
M tests/fake-virt-v2v
2 files changed, 63 insertions(+), 27 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/34/59834/1
diff --git a/lib/vdsm/v2v.py b/lib/vdsm/v2v.py
index defbe14..0948741 100644
--- a/lib/vdsm/v2v.py
+++ b/lib/vdsm/v2v.py
@@ -384,6 +384,7 @@
self._irs = irs
self._prepared_volumes = []
self._passwd_file = os.path.join(_V2V_DIR, "%s.tmp" % vmid)
+ self._base_command = [_VIRT_V2V.cmd, '-v', '-x']
def execute(self):
raise NotImplementedError("Subclass must implement this")
@@ -394,7 +395,10 @@
deathSignal=signal.SIGTERM,
nice=NICENESS.HIGH,
ioclass=IOCLASS.IDLE,
- env=self._environment())
+ env=self._environment(),
+ outLog=os.path.join(_V2V_DIR,
+ "%s.log" % self._vmid),
+ errToOutLog=True)
def _get_disk_format(self):
fmt = self._vminfo.get('format', 'raw').lower()
@@ -490,11 +494,11 @@
self._password = password
def _command(self):
- cmd = [_VIRT_V2V.cmd,
- '-ic', self._uri,
- '-o', 'vdsm',
- '-of', self._get_disk_format(),
- '-oa', self._vminfo.get('allocation', 'sparse').lower()]
+ cmd = self._base_command
+ cmd.extend(['-ic', self._uri,
+ '-o', 'vdsm',
+ '-of', self._get_disk_format(),
+ '-oa', self._vminfo.get('allocation', 'sparse').lower()])
cmd.extend(self._disk_parameters())
cmd.extend(['--password-file',
self._passwd_file,
@@ -521,19 +525,19 @@
self._ova_path = ova_path
def _command(self):
- cmd = [_VIRT_V2V.cmd,
- '-i', 'ova', self._ova_path,
- '-o', 'vdsm',
- '-of', self._get_disk_format(),
- '-oa', self._vminfo.get('allocation', 'sparse').lower(),
- '--vdsm-vm-uuid',
- self._vmid,
- '--vdsm-ovf-output',
- _V2V_DIR,
- '--machine-readable',
- '-os',
- self._get_storage_domain_path(
- self._prepared_volumes[0]['path'])]
+ cmd = self._base_command
+ cmd.extend(['-i', 'ova', self._ova_path,
+ '-o', 'vdsm',
+ '-of', self._get_disk_format(),
+ '-oa', self._vminfo.get('allocation', 'sparse').lower(),
+ '--vdsm-vm-uuid',
+ self._vmid,
+ '--vdsm-ovf-output',
+ _V2V_DIR,
+ '--machine-readable',
+ '-os',
+ self._get_storage_domain_path(
+ self._prepared_volumes[0]['path'])])
cmd.extend(self._disk_parameters())
return cmd
@@ -559,11 +563,11 @@
self._ssh_agent = SSHAgent()
def _command(self):
- cmd = [_VIRT_V2V.cmd,
- '-ic', self._uri,
- '-o', 'vdsm',
- '-of', self._get_disk_format(),
- '-oa', self._vminfo.get('allocation', 'sparse').lower()]
+ cmd = self._base_command
+ cmd.extend(['-ic', self._uri,
+ '-o', 'vdsm',
+ '-of', self._get_disk_format(),
+ '-oa', self._vminfo.get('allocation', 'sparse').lower()])
cmd.extend(self._disk_parameters())
cmd.extend(['--vdsm-vm-uuid',
self._vmid,
@@ -782,7 +786,8 @@
description)
for chunk in self._iter_progress(stream):
progress = self._parse_progress(chunk)
- yield DiskProgress(progress)
+ if progress is not None:
+ yield DiskProgress(progress)
if progress == 100:
break
@@ -807,8 +812,7 @@
def _parse_progress(self, chunk):
m = self.DISK_PROGRESS_RE.match(chunk)
if m is None:
- raise OutputParserError('error parsing progress, chunk: %r'
- % chunk)
+ return None
try:
return int(m.group(1))
except ValueError:
diff --git a/tests/fake-virt-v2v b/tests/fake-virt-v2v
index 67aeab8..df9e5a2 100755
--- a/tests/fake-virt-v2v
+++ b/tests/fake-virt-v2v
@@ -47,6 +47,12 @@
parser.add_argument('--machine-readable', dest='machineReadable',
action='store_true',
help='Set the terminal output to be readable')
+parser.add_argument('-v', dest='verbose',
+ action='store_true',
+ help='Enable verbose messages for debugging.')
+parser.add_argument('-x', dest='libguestfsTrace',
+ action='store_true',
+ help='Enable tracing of libguestfs API calls.')
parser.add_argument('vmname')
options = parser.parse_args(sys.argv)
@@ -56,6 +62,11 @@
def write_output(msg):
sys.stdout.write(msg)
sys.stdout.flush()
+
+def write_trace(msg):
+ sys.stderr.write(msg)
+ sys.stderr.flush()
+
def write_progress():
@@ -69,10 +80,31 @@
write_output('[ %d.0] Creating an overlay to protect\n' % elapsed_time)
elapsed_time = elapsed_time + 1
+
+# Immitate some libguestfs trace messages
+if options.libguestfsTrace:
+ write_trace("libguestfs: trace: internal_autosync = 0")
+ write_trace("libguestfs: sending SIGTERM to process 7053")
+ write_trace("libguestfs: trace: shutdown = 0")
+ write_trace("libguestfs: trace: close")
+ write_trace("libguestfs: closing guestfs handle 0x1e265f0 (state 0)")
+ write_trace("libguestfs: command: run: rm")
+ write_trace("libguestfs: command: run: \ -rf /tmp/libguestfs1lFBAz")
+
for i, o in enumerate(options.vdsmImageId):
write_output('[ %d.0] Copying disk %d/2 to %s/%s/images/%s\n' %
(elapsed_time, i+1, options.outputStorage,
options.vdsmVmId, o))
+
+ # Immitate some verbose messages
+ # NOTE: Most verbose messages go to stderr, but some go to stdout. This can
+ # potentialy mess with out parsing routine.
+ if options.verbose:
+ write_output("target_file = %s" % options.vdsmVolId)
+ write_output("target_format = raw")
+ write_output("target_estimated_size = 3827919137")
+ write_output("target_overlay = /var/tmp/v2vovl344e53.qcow2")
+
write_progress()
write_output('[ %d.0] Creating output metadata\n' % elapsed_time)
write_output('[ %d.0] Finishing off\n' % elapsed_time)
--
To view, visit https://gerrit.ovirt.org/59834
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I6a8d9284316a551edeaffdd66dfcd299fa02478e
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Tomas Golembiovsky <tgolembi(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: utils: Add changehash function for change detection
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: utils: Add changehash function for change detection
......................................................................
utils: Add changehash function for change detection
We use Python built-in hash to detect changes in vm state without sending
the state in each response. This function is not suitable for this
usage. Now we use generic utils.changehash(), implemented using md5
hexdigest.
Change-Id: I2242a594383e2d2fe64e3a581f18b8ac662648b0
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M lib/vdsm/utils.py
M vdsm/virt/vm.py
2 files changed, 13 insertions(+), 2 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/45/33045/1
diff --git a/lib/vdsm/utils.py b/lib/vdsm/utils.py
index 23c63e8..1b4a9d5 100644
--- a/lib/vdsm/utils.py
+++ b/lib/vdsm/utils.py
@@ -37,6 +37,7 @@
import glob
import io
import itertools
+import hashlib
import logging
import re
import sys
@@ -1133,3 +1134,13 @@
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags |= os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+
+def changehash(s):
+ """
+ Returns a hash of string s, suitable for change detection.
+
+ Tipically changehash(s) is sent to client frequently. When a client detect
+ that changehash(s) changed, it ask for s itself, which may be much bigger.
+ """
+ return hashlib.md5(s).hexdigest()
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 941f283..b1567f9 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -1500,7 +1500,7 @@
self.guestAgent = guestagent.GuestAgent(
self._guestSocketFile, self.cif.channelListener, self.log)
self._lastXMLDesc = '<domain><uuid>%s</uuid></domain>' % self.id
- self._devXmlHash = '0'
+ self._devXmlHash = utils.changehash('')
self._released = False
self._releaseLock = threading.Lock()
self.saveState()
@@ -4495,7 +4495,7 @@
self._lastXMLDesc = self._dom.XMLDesc(0)
devxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
getElementsByTagName('devices')[0]
- self._devXmlHash = str(hash(devxml.toxml()))
+ self._devXmlHash = utils.changehash(devxml.toxml())
return self._lastXMLDesc
--
To view, visit http://gerrit.ovirt.org/33045
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I2242a594383e2d2fe64e3a581f18b8ac662648b0
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: Adding toolTests.py to python3 check modules
by ybronhei@redhat.com
Yaniv Bronhaim has uploaded a new change for review.
Change subject: Adding toolTests.py to python3 check modules
......................................................................
Adding toolTests.py to python3 check modules
Change-Id: I52f11a1a10cae46773d05d0c09da80ad8eb3b772
Signed-off-by: Yaniv Bronhaim <ybronhei(a)redhat.com>
---
M lib/vdsm/tool/configfile.py
M tests/Makefile.am
2 files changed, 3 insertions(+), 2 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/52/48052/1
diff --git a/lib/vdsm/tool/configfile.py b/lib/vdsm/tool/configfile.py
index 26ec114..0c58205 100644
--- a/lib/vdsm/tool/configfile.py
+++ b/lib/vdsm/tool/configfile.py
@@ -18,12 +18,12 @@
#
from __future__ import absolute_import
-import ConfigParser
import functools
import os
import tempfile
import re
import selinux
+from six.moves import configparser as ConfigParser
import io
from .. import utils
@@ -263,5 +263,5 @@
def read(self, path):
with open(path, 'r') as f:
return self.wrapped.readfp(
- io.StringIO(u'[root]\n' + f.read().decode())
+ io.StringIO(u'[root]\n' + f.read())
)
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 205289e..3b1f8bf 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -30,6 +30,7 @@
apiData.py \
cmdutilsTests.py \
concurrentTests.py \
+ toolTests.py \
$(NULL)
device_modules = \
--
To view, visit https://gerrit.ovirt.org/48052
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I52f11a1a10cae46773d05d0c09da80ad8eb3b772
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yaniv Bronhaim <ybronhei(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: clusterLock: Acquire, release, and inquire volume leases
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: clusterLock: Acquire, release, and inquire volume leases
......................................................................
clusterLock: Acquire, release, and inquire volume leases
Although we have been creating volume leases on storage for awhile now,
there have been no APIs to manage them. Add this support to
the clusterLock implementations.
Change-Id: Icca901ccd27358767c023cd55b7a3823531d2a5a
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/clusterlock.py
M vdsm/storage/sd.py
2 files changed, 80 insertions(+), 23 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/22/38622/1
diff --git a/vdsm/storage/clusterlock.py b/vdsm/storage/clusterlock.py
index 51d2906..4077068 100644
--- a/vdsm/storage/clusterlock.py
+++ b/vdsm/storage/clusterlock.py
@@ -73,6 +73,10 @@
"""Raised when the clusterlock class is not supporting inquire"""
+class ResourceLeasesNotSupportedError(Exception):
+ """Raised when the clusterlock class does not support resource leases"""
+
+
class SafeLease(object):
log = logging.getLogger("Storage.SafeLease")
@@ -145,7 +149,13 @@
raise se.AcquireLockFailure(self._sdUUID, rc, out, err)
self.log.debug("Clustered lock acquired successfully")
+ def acquireResource(self, resource, lockDisk, shared=False):
+ raise ResourceLeasesNotSupportedError()
+
def inquire(self):
+ raise InquireNotSupportedError()
+
+ def inquireResource(self, resource, lockDisk):
raise InquireNotSupportedError()
def getLockUtilFullPath(self):
@@ -165,6 +175,8 @@
self.log.debug("Cluster lock released successfully")
+ def releaseResource(self, resource, lockDisk):
+ raise ResourceLeasesNotSupportedError()
initSANLockLog = logging.getLogger("Storage.initSANLock")
@@ -286,13 +298,9 @@
# HOST_FAIL.
return HOST_STATUS_FREE
- # The hostId parameter is maintained here only for compatibility with
- # ClusterLock. We could consider to remove it in the future but keeping it
- # for logging purpose is desirable.
- def acquire(self, hostId):
+ def _acquire(self, resource, lockDisk, shared=False):
with nested(self._lock, SANLock._sanlock_lock):
- self.log.info("Acquiring cluster lock for domain %s (id: %s)",
- self._sdUUID, hostId)
+ self.log.info("Acquiring resource %s, shared=%s", resource, shared)
while True:
if SANLock._sanlock_fd is None:
@@ -304,27 +312,42 @@
"Cannot register to sanlock", str(e))
try:
- sanlock.acquire(self._sdUUID, SDM_LEASE_NAME,
- self.getLockDisk(),
- slkfd=SANLock._sanlock_fd)
+ sanlock.acquire(self._sdUUID, resource, lockDisk,
+ slkfd=SANLock._sanlock_fd, shared=shared)
except sanlock.SanlockException as e:
if e.errno != os.errno.EPIPE:
raise se.AcquireLockFailure(
self._sdUUID, e.errno,
- "Cannot acquire cluster lock", str(e))
+ "Cannot acquire sanlock resource", str(e))
SANLock._sanlock_fd = None
continue
break
- self.log.debug("Cluster lock for domain %s successfully acquired "
- "(id: %s)", self._sdUUID, hostId)
+ self.log.debug("Resource %s successfully acquired", resource)
+
+ # The hostId parameter is maintained here only for compatibility with
+ # ClusterLock. We could consider to remove it in the future but keeping it
+ # for logging purpose is desirable.
+ def acquire(self, hostId):
+ self.log.info("Acquiring cluster lock for domain %s (id: %s)",
+ self._sdUUID, hostId)
+ self._acquire(SDM_LEASE_NAME, self.getLockDisk())
+ self.log.debug("Cluster lock for domain %s successfully acquired "
+ "(id: %s)", self._sdUUID, hostId)
+
+ def acquireResource(self, resource, lockDisk, shared=False):
+ self._acquire(resource, lockDisk, shared)
+ res, owners = self._inquire(resource, lockDisk)
+ self.log.debug("ALITKE: acquire: res:%s owners:%s", res, owners)
+
+ def _inquire(self, resource, lockDisk):
+ res = sanlock.read_resource(*lockDisk[0])
+ owners = sanlock.read_resource_owners(self._sdUUID, resource, lockDisk)
+ return res, owners
def inquire(self):
- resource = sanlock.read_resource(self._leasesPath, SDM_LEASE_OFFSET)
- owners = sanlock.read_resource_owners(self._sdUUID, SDM_LEASE_NAME,
- self.getLockDisk())
-
+ resource, owners = self._inquire(SDM_LEASE_NAME, self.getLockDisk())
if len(owners) == 1:
return resource.get("version"), owners[0].get("host_id")
elif len(owners) > 1:
@@ -334,19 +357,32 @@
return None, None
- def release(self):
+ def inquireResource(self, resource, lockDisk):
+ resource, owners = self._inquire(SDM_LEASE_NAME, self.getLockDisk())
+ return (resource.get("version"),
+ [owner.get("host_id") for owner in owners])
+
+ def _release(self, resource, lockDisk):
with self._lock:
- self.log.info("Releasing cluster lock for domain %s", self._sdUUID)
+ self.log.info("Releasing resource %s", resource)
try:
- sanlock.release(self._sdUUID, SDM_LEASE_NAME,
- self.getLockDisk(), slkfd=SANLock._sanlock_fd)
+ sanlock.release(self._sdUUID, resource, lockDisk,
+ slkfd=SANLock._sanlock_fd)
except sanlock.SanlockException as e:
- raise se.ReleaseLockFailure(self._sdUUID, e)
+ raise se.ReleaseLockFailure(resource, e)
self._sanlockfd = None
- self.log.debug("Cluster lock for domain %s successfully released",
- self._sdUUID)
+ self.log.debug("Resource %s successfully released", resource)
+
+ def release(self):
+ self.log.info("Releasing cluster lock for domain %s", self._sdUUID)
+ self._release(SDM_LEASE_NAME, self.getLockDisk())
+ self.log.debug("Cluster lock for domain %s successfully released",
+ self._sdUUID)
+
+ def releaseResource(self, resource, lockDisk):
+ self._release(resource, lockDisk)
class LocalLock(object):
@@ -462,10 +498,16 @@
self.log.debug("Local lock for domain %s successfully acquired "
"(id: %s)", self._sdUUID, hostId)
+ def acquireResource(self, resource, lockDisk, shared=False):
+ raise ResourceLeasesNotSupportedError()
+
def inquire(self):
with self._globalLockMapSync:
hostId, lockFile = self._getLease()
return self.LVER, hostId if lockFile else None, None
+
+ def inquireResource(self, resource, lockDisk):
+ raise InquireNotSupportedError()
def release(self):
with self._globalLockMapSync:
@@ -483,3 +525,6 @@
self.log.debug("Local lock for domain %s successfully released",
self._sdUUID)
+
+ def releaseResource(self, resource, lockDisk):
+ raise ResourceLeasesNotSupportedError()
\ No newline at end of file
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index a771abf..9e6f281 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -514,6 +514,18 @@
def inquireClusterLock(self):
return self._clusterLock.inquire()
+ def acquireVolumeLease(self, imgUUID, volUUID, shared=False):
+ lockDisk = self.getVolumeLease(imgUUID, volUUID)
+ self._clusterLock.acquireResource(volUUID, [lockDisk], shared)
+
+ def releaseVolumeLease(self, imgUUID, volUUID):
+ lockDisk = self.getVolumeLease(imgUUID, volUUID)
+ self._clusterLock.releaseResource(volUUID, [lockDisk])
+
+ def inquireVolumeLease(self, imgUUID, volUUID):
+ lockDisk = self.getVolumeLease(imgUUID, volUUID)
+ return self._clusterLock.inquireResource(volUUID, [lockDisk])
+
def attach(self, spUUID):
self.invalidateMetadata()
pools = self.getPools()
--
To view, visit https://gerrit.ovirt.org/38622
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Icca901ccd27358767c023cd55b7a3823531d2a5a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: storage: Factor out getAllVolumes
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: storage: Factor out getAllVolumes
......................................................................
storage: Factor out getAllVolumes
The SDM garbage collector needs to a mapping of volumes by imageID.
Factor out this logic from getAllVolumes so it can be reused later.
Change-Id: If8c55edf6416dddc4a4f30b91422b3e968df2b99
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/fileSD.py
1 file changed, 17 insertions(+), 12 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/77/40377/1
diff --git a/vdsm/storage/fileSD.py b/vdsm/storage/fileSD.py
index 1220939..042f836 100644
--- a/vdsm/storage/fileSD.py
+++ b/vdsm/storage/fileSD.py
@@ -18,7 +18,6 @@
# Refer to the README and COPYING files for full details of the license
#
-import collections
import os
import errno
import logging
@@ -405,6 +404,21 @@
"""
pass
+ def getImageToVolumesMap(self):
+ volMetaPattern = os.path.join(self.mountpoint, self.sdUUID,
+ sd.DOMAIN_IMAGES, "*", "*.meta")
+ volMetaPaths = self.oop.glob.glob(volMetaPattern)
+ images = {}
+ for metaPath in volMetaPaths:
+ head, tail = os.path.split(metaPath)
+ volUUID, volExt = os.path.splitext(tail)
+ imgUUID = os.path.basename(head)
+ try:
+ images[imgUUID].append(volUUID)
+ except KeyError:
+ images[imgUUID] = [volUUID]
+ return images
+
def getAllVolumes(self):
"""
Return dict {volUUID: ((imgUUIDs,), parentUUID)} of the domain.
@@ -422,17 +436,8 @@
Template volumes have no parent, and thus we report BLANK_UUID as their
parentUUID.
"""
- volMetaPattern = os.path.join(self.mountpoint, self.sdUUID,
- sd.DOMAIN_IMAGES, "*", "*.meta")
- volMetaPaths = self.oop.glob.glob(volMetaPattern)
-
- # First create mapping from images to volumes
- images = collections.defaultdict(list)
- for metaPath in volMetaPaths:
- head, tail = os.path.split(metaPath)
- volUUID, volExt = os.path.splitext(tail)
- imgUUID = os.path.basename(head)
- images[imgUUID].append(volUUID)
+ # First get the mapping from images to volumes
+ images = self.getImageToVolumesMap()
# Using images to volumes mapping, we can create volumes to images
# mapping, detecting template volumes and template images, based on
--
To view, visit https://gerrit.ovirt.org/40377
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: If8c55edf6416dddc4a4f30b91422b3e968df2b99
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: clusterlock: Add reference counting
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: clusterlock: Add reference counting
......................................................................
clusterlock: Add reference counting
The clusterLock can now be acquired by multiple threads of execution
since it is used by SDM verbs now. We need reference counting to ensure
that one thread does not release the clusterLock while another thread
still needs it.
Change-Id: I846116ae16e88a51bdce20f97ddf22859dea3086
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/clusterlock.py
1 file changed, 55 insertions(+), 45 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/78/40378/1
diff --git a/vdsm/storage/clusterlock.py b/vdsm/storage/clusterlock.py
index 86d47ad..96bba2b 100644
--- a/vdsm/storage/clusterlock.py
+++ b/vdsm/storage/clusterlock.py
@@ -211,6 +211,7 @@
def __init__(self, sdUUID, idsPath, leasesPath, *args):
self._lock = threading.Lock()
+ self._clusterLockUsers = 0
self._sdUUID = sdUUID
self._idsPath = idsPath
self._leasesPath = leasesPath
@@ -302,16 +303,22 @@
# ClusterLock. We could consider to remove it in the future but keeping it
# for logging purpose is desirable.
def acquireClusterLock(self, hostId):
- self.log.info("Acquiring cluster lock for domain %s (id: %s)",
- self._sdUUID, hostId)
- self._acquire(SDM_LEASE_NAME, self.getLockDisk())
- self.log.debug("Cluster lock for domain %s successfully acquired "
- "(id: %s)", self._sdUUID, hostId)
+ with nested(self._lock, SANLock._sanlock_lock):
+ self.log.info("Acquiring cluster lock for domain %s (id: %s)",
+ self._sdUUID, hostId)
+ if self._clusterLockUsers == 0:
+ self._acquire(SDM_LEASE_NAME, self.getLockDisk())
+ self._clusterLockUsers = self._clusterLockUsers + 1
+ self.log.debug("Cluster lock for domain %s successfully acquired "
+ "(id: %s, users: %i)", self._sdUUID, hostId,
+ self._clusterLockUsers)
def acquireResource(self, resource, lockDisk, shared=False):
- self.log.info("Acquiring resource lock for %s", resource)
- self._acquire(resource, lockDisk, shared)
- self.log.debug("Resource lock for %s successfully acquired", resource)
+ with nested(self._lock, SANLock._sanlock_lock):
+ self.log.info("Acquiring resource lock for %s", resource)
+ self._acquire(resource, lockDisk, shared)
+ self.log.debug("Resource lock for %s successfully acquired",
+ resource)
def inquireClusterLock(self):
resource, owners = self._inquire(SDM_LEASE_NAME, self.getLockDisk())
@@ -330,43 +337,47 @@
[owner.get("host_id") for owner in owners])
def releaseClusterLock(self):
- self.log.info("Releasing cluster lock for domain %s", self._sdUUID)
- self._release(SDM_LEASE_NAME, self.getLockDisk())
- self.log.debug("Cluster lock for domain %s successfully released",
- self._sdUUID)
+ with self._lock:
+ self.log.info("Releasing cluster lock for domain %s", self._sdUUID)
+ if self._clusterLockUsers == 1:
+ self._release(SDM_LEASE_NAME, self.getLockDisk())
+ self._clusterLockUsers = self._clusterLockUsers - 1
+ self.log.debug("Cluster lock for domain %s successfully released "
+ "(users: %i)", self._sdUUID, self._clusterLockUsers)
def releaseResource(self, resource, lockDisk):
- self.log.info("Releasing resource lock for %s", resource)
- self._release(resource, lockDisk)
- self.log.debug("Resource lock for %s successfully released", resource)
+ with self._lock:
+ self.log.info("Releasing resource lock for %s", resource)
+ self._release(resource, lockDisk)
+ self.log.debug("Resource lock for %s successfully released",
+ resource)
def _acquire(self, resource, lockDisk, shared=False):
- with nested(self._lock, SANLock._sanlock_lock):
- self.log.info("Acquiring resource %s, shared=%s", resource, shared)
+ self.log.info("Acquiring resource %s, shared=%s", resource, shared)
- while True:
- if SANLock._sanlock_fd is None:
- try:
- SANLock._sanlock_fd = sanlock.register()
- except sanlock.SanlockException as e:
- raise se.AcquireLockFailure(
- self._sdUUID, e.errno,
- "Cannot register to sanlock", str(e))
-
+ while True:
+ if SANLock._sanlock_fd is None:
try:
- sanlock.acquire(self._sdUUID, resource, lockDisk,
- slkfd=SANLock._sanlock_fd, shared=shared)
+ SANLock._sanlock_fd = sanlock.register()
except sanlock.SanlockException as e:
- if e.errno != os.errno.EPIPE:
- raise se.AcquireLockFailure(
- self._sdUUID, e.errno,
- "Cannot acquire sanlock resource", str(e))
- SANLock._sanlock_fd = None
- continue
+ raise se.AcquireLockFailure(
+ self._sdUUID, e.errno, "Cannot register to sanlock",
+ str(e))
- break
+ try:
+ sanlock.acquire(self._sdUUID, resource, lockDisk,
+ slkfd=SANLock._sanlock_fd, shared=shared)
+ except sanlock.SanlockException as e:
+ if e.errno != os.errno.EPIPE:
+ raise se.AcquireLockFailure(
+ self._sdUUID, e.errno,
+ "Cannot acquire sanlock resource", str(e))
+ SANLock._sanlock_fd = None
+ continue
- self.log.debug("Resource %s successfully acquired", resource)
+ break
+
+ self.log.debug("Resource %s successfully acquired", resource)
def _inquire(self, resource, lockDisk):
res = sanlock.read_resource(*lockDisk[0])
@@ -374,17 +385,16 @@
return res, owners
def _release(self, resource, lockDisk):
- with self._lock:
- self.log.info("Releasing resource %s", resource)
+ self.log.info("Releasing resource %s", resource)
- try:
- sanlock.release(self._sdUUID, resource, lockDisk,
- slkfd=SANLock._sanlock_fd)
- except sanlock.SanlockException as e:
- raise se.ReleaseLockFailure(resource, e)
+ try:
+ sanlock.release(self._sdUUID, resource, lockDisk,
+ slkfd=SANLock._sanlock_fd)
+ except sanlock.SanlockException as e:
+ raise se.ReleaseLockFailure(resource, e)
- self._sanlockfd = None
- self.log.debug("Resource %s successfully released", resource)
+ self._sanlockfd = None
+ self.log.debug("Resource %s successfully released", resource)
class LocalLock(object):
--
To view, visit https://gerrit.ovirt.org/40378
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I846116ae16e88a51bdce20f97ddf22859dea3086
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: XXX: Insert sleep when creating block SD
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: XXX: Insert sleep when creating block SD
......................................................................
XXX: Insert sleep when creating block SD
Change-Id: I10ed63d747f1353da824af8ec56ac8a6f66d666b
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/blockSD.py
1 file changed, 4 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/53/43553/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 0b8bdc7..0d5e403 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -896,6 +896,10 @@
# Create VMS file system
_createVMSfs(os.path.join("/dev", vgName, MASTERLV))
+ # XXX: When running an iSCSI server and initiator on the same host we
+ # need to wait a bit for IO to settle before deactivating LVs
+ import time
+ time.sleep(10)
lvm.deactivateLVs(vgName, MASTERLV)
path = lvm.lvPath(vgName, sd.METADATA)
--
To view, visit https://gerrit.ovirt.org/43553
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I10ed63d747f1353da824af8ec56ac8a6f66d666b
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 8 months