master - lvmdbusd: Fix hang when lvm compiled with 'enable-notify-dbus'
by tasleson
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=dd5d865020acd5...
Commit: dd5d865020acd545712d4bcc0f3236143de4d76d
Parent: 5274c2f11b9e262588c1dec86609e829618d2e76
Author: Tony Asleson <tasleson(a)redhat.com>
AuthorDate: Tue Jun 28 12:07:21 2016 -0500
Committer: Tony Asleson <tasleson(a)redhat.com>
CommitterDate: Tue Jun 28 12:09:28 2016 -0500
lvmdbusd: Fix hang when lvm compiled with 'enable-notify-dbus'
The following operations would hang if lvm was compiled with
'enable-notify-dbus' and the client specified -1 for the timeout:
* LV snapshot merge
* VG move
* LV move
This was caused because the implementation of these three dbus methods is
different. Most of the dbus method calls are executed by gathering information
needed to fulfill it, placing that information on a thread safe queue and
returning. The results later to be returned to the client with callbacks.
With this approach we can process an arbitrary number of commands without any
of them blocking other dbus commands. However, the 3 dbus methods listed
above did not utilize this functionality because they were implemented with a
separate thread that handles the fork & exec of lvm. This is done because these
operations can be very slow to complete. However, because of this the lvm
command that we were waiting on is trying to call back into the dbus service to
notify it that something changed. Because the code was blocking the process
that handles the incoming dbus activity the lvm command blocked. We were stuck
until the client timed-out the connection, which then causes the service to
unblock and continue. If the client did not have a timeout, we would have been
hung indefinitely.
The fix is to always utilize the worker queue on all dbus methods. We need to
ensure that lvm is tested with 'enable-notify-dbus' enabled and disabled.
---
daemons/lvmdbusd/background.py | 77 ++++++++++-----------------------------
daemons/lvmdbusd/job.py | 2 +-
daemons/lvmdbusd/lv.py | 34 ++++++++++++-----
daemons/lvmdbusd/request.py | 5 ++-
daemons/lvmdbusd/vg.py | 19 +++++++---
5 files changed, 62 insertions(+), 75 deletions(-)
diff --git a/daemons/lvmdbusd/background.py b/daemons/lvmdbusd/background.py
index 4265154..ca3d60e 100644
--- a/daemons/lvmdbusd/background.py
+++ b/daemons/lvmdbusd/background.py
@@ -43,40 +43,22 @@ def lv_merge_cmd(merge_options, lv_full_name):
return cmd
-def _create_background_dbus_job(job_state):
- job_obj = Job(None, job_state)
- cfg.om.register_object(job_obj)
- return job_obj.dbus_object_path()
-
-
-def _move_merge(interface_name, cmd, time_out):
- # Create job object to be used while running the command
- rc = '/'
- job_state = JobState(None)
+def _move_merge(interface_name, cmd, job_state):
add(cmd, job_state)
- if time_out == -1:
- # Waiting forever
- done = job_state.Wait(time_out)
- if not done:
- ec, err_msg = job_state.GetError
- raise dbus.exceptions.DBusException(
- interface_name,
- 'Exit code %s, stderr = %s' % (str(ec), err_msg))
- elif time_out == 0:
- # Immediately create and return a job
- rc = _create_background_dbus_job(job_state)
- else:
- # Willing to wait for a bit
- done = job_state.Wait(time_out)
- if not done:
- rc = _create_background_dbus_job(job_state)
+ done = job_state.Wait(-1)
+ if not done:
+ ec, err_msg = job_state.GetError
+ raise dbus.exceptions.DBusException(
+ interface_name,
+ 'Exit code %s, stderr = %s' % (str(ec), err_msg))
- return rc
+ cfg.load()
+ return '/'
def move(interface_name, lv_name, pv_src_obj, pv_source_range,
- pv_dests_and_ranges, move_options, time_out):
+ pv_dests_and_ranges, move_options, job_state):
"""
Common code for the pvmove handling.
:param interface_name: What dbus interface we are providing for
@@ -85,8 +67,8 @@ def move(interface_name, lv_name, pv_src_obj, pv_source_range,
:param pv_source_range: (0,0 to ignore, else start, end segments)
:param pv_dests_and_ranges: Array of PV object paths and start/end segs
:param move_options: Hash with optional arguments
- :param time_out:
- :return: Object path to job object
+ :param job_state: Used to convey information about jobs between processes
+ :return: '/' When complete, the empty object path
"""
pv_dests = []
pv_src = cfg.om.get_object_by_path(pv_src_obj)
@@ -112,18 +94,18 @@ def move(interface_name, lv_name, pv_src_obj, pv_source_range,
pv_source_range,
pv_dests)
- return _move_merge(interface_name, cmd, time_out)
+ return _move_merge(interface_name, cmd, job_state)
else:
raise dbus.exceptions.DBusException(
interface_name, 'pv_src_obj (%s) not found' % pv_src_obj)
-def merge(interface_name, lv_uuid, lv_name, merge_options, time_out):
+def merge(interface_name, lv_uuid, lv_name, merge_options, job_state):
# Make sure we have a dbus object representing it
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
if dbo:
cmd = lv_merge_cmd(merge_options, dbo.lvm_id)
- return _move_merge(interface_name, cmd, time_out)
+ return _move_merge(interface_name, cmd, job_state)
else:
raise dbus.exceptions.DBusException(
interface_name,
@@ -143,17 +125,6 @@ def background_reaper():
time.sleep(3)
-def process_background_result(job_object, exit_code, error_msg):
- cfg.load()
- job_object.set_result(exit_code, error_msg)
- return None
-
-
-# noinspection PyUnusedLocal
-def empty_cb(disregard):
- pass
-
-
def background_execute(command, background_job, skip_first_line=False):
# Wrap this whole operation in an exception handler, otherwise if we
@@ -181,23 +152,15 @@ def background_execute(command, background_job, skip_first_line=False):
if process.returncode == 0:
background_job.Percent = 100
- # Queue up the result so that it gets executed in same thread as others.
- r = RequestEntry(
- -1, process_background_result,
- (background_job, process.returncode, out[1]),
- empty_cb, empty_cb, False)
- cfg.worker_q.put(r)
+ background_job.set_result(process.returncode, out[1])
+
except Exception:
- # In the unlikely event that we blew up, lets notify fill out the
- # job object so that the client doesn't hang potentially forever!
+ # In the unlikely event that we blow up, we need to unblock caller which
+ # is waiting on an answer.
st = traceback.format_exc()
error = "Exception in background thread: \n%s" % st
log_error(error)
- r = RequestEntry(
- -1, process_background_result,
- (background_job, 1, error),
- empty_cb, empty_cb, False)
- cfg.worker_q.put(r)
+ background_job.set_result(1, error)
def add(command, reporting_job):
diff --git a/daemons/lvmdbusd/job.py b/daemons/lvmdbusd/job.py
index b16f8e6..d7f8187 100644
--- a/daemons/lvmdbusd/job.py
+++ b/daemons/lvmdbusd/job.py
@@ -17,7 +17,7 @@ import threading
# noinspection PyPep8Naming
class JobState(object):
- def __init__(self, request):
+ def __init__(self, request=None):
self.rlock = threading.RLock()
self._percent = 0
diff --git a/daemons/lvmdbusd/lv.py b/daemons/lvmdbusd/lv.py
index 5c7b3b5..8f063dc 100644
--- a/daemons/lvmdbusd/lv.py
+++ b/daemons/lvmdbusd/lv.py
@@ -22,6 +22,7 @@ from .loader import common
from .state import State
from . import background
from .utils import round_size
+from .job import JobState
# Try and build a key for a LV, so that we sort the LVs with least dependencies
@@ -444,14 +445,21 @@ class Lv(LvCommon):
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='o(tt)a(ott)ia{sv}',
- out_signature='o')
+ out_signature='o',
+ async_callbacks=('cb', 'cbe'))
def Move(self, pv_src_obj, pv_source_range,
pv_dests_and_ranges,
- tmo, move_options):
- return background.move(
- LV_INTERFACE, self.lvm_id, pv_src_obj,
- pv_source_range, pv_dests_and_ranges,
- move_options, tmo)
+ tmo, move_options, cb, cbe):
+
+ job_state = JobState()
+
+ r = RequestEntry(
+ tmo, background.move,
+ (LV_INTERFACE, self.lvm_id, pv_src_obj, pv_source_range,
+ pv_dests_and_ranges, move_options, job_state), cb, cbe, False,
+ job_state)
+
+ cfg.worker_q.put(r)
@staticmethod
def _snap_shot(lv_uuid, lv_name, name, optional_size,
@@ -875,7 +883,13 @@ class LvSnapShot(Lv):
@dbus.service.method(
dbus_interface=SNAPSHOT_INTERFACE,
in_signature='ia{sv}',
- out_signature='o')
- def Merge(self, tmo, merge_options):
- return background.merge(SNAPSHOT_INTERFACE, self.Uuid, self.lvm_id,
- merge_options, tmo)
+ out_signature='o',
+ async_callbacks=('cb', 'cbe'))
+ def Merge(self, tmo, merge_options, cb, cbe):
+ job_state = JobState()
+
+ r = RequestEntry(tmo, background.merge,
+ (SNAPSHOT_INTERFACE, self.Uuid, self.lvm_id,
+ merge_options, job_state), cb, cbe, False,
+ job_state)
+ cfg.worker_q.put(r)
diff --git a/daemons/lvmdbusd/request.py b/daemons/lvmdbusd/request.py
index 3e29b82..ce6a6ef 100644
--- a/daemons/lvmdbusd/request.py
+++ b/daemons/lvmdbusd/request.py
@@ -18,7 +18,7 @@ from .utils import log_error
class RequestEntry(object):
def __init__(self, tmo, method, arguments, cb, cb_error,
- return_tuple=True):
+ return_tuple=True, job_state=None):
self.tmo = tmo
self.method = method
self.arguments = arguments
@@ -33,6 +33,7 @@ class RequestEntry(object):
self._rc = 0
self._rc_error = None
self._return_tuple = return_tuple
+ self._job_state = job_state
if self.tmo < 0:
# Client is willing to block forever
@@ -53,7 +54,7 @@ class RequestEntry(object):
r.timer_expired()
def _return_job(self):
- self._job = Job(self)
+ self._job = Job(self, self._job_state)
cfg.om.register_object(self._job, True)
if self._return_tuple:
self.cb(('/', self._job.dbus_object_path()))
diff --git a/daemons/lvmdbusd/vg.py b/daemons/lvmdbusd/vg.py
index c700667..4cc938e 100644
--- a/daemons/lvmdbusd/vg.py
+++ b/daemons/lvmdbusd/vg.py
@@ -20,6 +20,7 @@ from .loader import common
from .state import State
from . import background
from .utils import round_size
+from .job import JobState
# noinspection PyUnusedLocal
@@ -352,12 +353,20 @@ class Vg(AutomatedProperties):
@dbus.service.method(
dbus_interface=VG_INTERFACE,
in_signature='o(tt)a(ott)ia{sv}',
- out_signature='o')
+ out_signature='o',
+ async_callbacks=('cb', 'cbe'))
def Move(self, pv_src_obj, pv_source_range, pv_dests_and_ranges,
- tmo, move_options):
- return background.move(
- VG_INTERFACE, None, pv_src_obj, pv_source_range,
- pv_dests_and_ranges, move_options, tmo)
+ tmo, move_options, cb, cbe):
+
+ job_state = JobState()
+
+ r = RequestEntry(
+ tmo, background.move,
+ (VG_INTERFACE, None, pv_src_obj, pv_source_range,
+ pv_dests_and_ranges, move_options, job_state), cb, cbe, False,
+ job_state)
+
+ cfg.worker_q.put(r)
@staticmethod
def _lv_create(uuid, vg_name, name, size_bytes, pv_dests_and_ranges,
7 years, 5 months
master - lvmdbusd: request.py, initialize _job to None
by tasleson
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=5274c2f11b9e26...
Commit: 5274c2f11b9e262588c1dec86609e829618d2e76
Parent: 4f26eae40f07399f11a2abe675aef850b0dd5239
Author: Tony Asleson <tasleson(a)redhat.com>
AuthorDate: Tue Jun 28 11:45:05 2016 -0500
Committer: Tony Asleson <tasleson(a)redhat.com>
CommitterDate: Tue Jun 28 12:09:28 2016 -0500
lvmdbusd: request.py, initialize _job to None
_job is a job object, not a boolean so initialize it to None instead.
---
daemons/lvmdbusd/request.py | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/daemons/lvmdbusd/request.py b/daemons/lvmdbusd/request.py
index 02f8c88..3e29b82 100644
--- a/daemons/lvmdbusd/request.py
+++ b/daemons/lvmdbusd/request.py
@@ -29,7 +29,7 @@ class RequestEntry(object):
self.lock = threading.RLock()
self.done = False
self._result = None
- self._job = False
+ self._job = None
self._rc = 0
self._rc_error = None
self._return_tuple = return_tuple
7 years, 5 months
master - lvmdbusd: Correct pv_seg_start -> pvseg_start
by tasleson
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=4f26eae40f0739...
Commit: 4f26eae40f07399f11a2abe675aef850b0dd5239
Parent: 0aadd6b0fb4eaa9e59d6f750f2efe2fdfdacdd8f
Author: Tony Asleson <tasleson(a)redhat.com>
AuthorDate: Mon Jun 27 17:07:20 2016 -0500
Committer: Tony Asleson <tasleson(a)redhat.com>
CommitterDate: Tue Jun 28 12:09:28 2016 -0500
lvmdbusd: Correct pv_seg_start -> pvseg_start
Code was using the incorrect column names for pvseg_start which only
became apparent when utilizing the JSON.
---
daemons/lvmdbusd/cmdhandler.py | 4 ++--
daemons/lvmdbusd/lvmdb.py | 11 +++++------
2 files changed, 7 insertions(+), 8 deletions(-)
diff --git a/daemons/lvmdbusd/cmdhandler.py b/daemons/lvmdbusd/cmdhandler.py
index 2ca23e7..138142f 100644
--- a/daemons/lvmdbusd/cmdhandler.py
+++ b/daemons/lvmdbusd/cmdhandler.py
@@ -447,7 +447,7 @@ def lvm_full_report_json():
'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
'vg_uuid']
- pv_seg_columns = ['pv_seg_start', 'pv_seg_size', 'segtype',
+ pv_seg_columns = ['pvseg_start', 'pvseg_size', 'segtype',
'pv_uuid', 'lv_uuid', 'pv_name']
vg_columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free',
@@ -491,7 +491,7 @@ def pv_retrieve_with_segs(device=None):
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count',
'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
- 'vg_uuid', 'pv_seg_start', 'pvseg_size', 'segtype']
+ 'vg_uuid', 'pvseg_start', 'pvseg_size', 'segtype']
# Lvm has some issues where it returns failure when querying pvs when other
# operations are in process, see:
diff --git a/daemons/lvmdbusd/lvmdb.py b/daemons/lvmdbusd/lvmdb.py
index f070bb6..51794d5 100755
--- a/daemons/lvmdbusd/lvmdb.py
+++ b/daemons/lvmdbusd/lvmdb.py
@@ -83,7 +83,7 @@ class DataStore(object):
for p in pvs:
DataStore._insert_record(
c_pvs, p['pv_uuid'], p,
- ['pv_seg_start', 'pvseg_size', 'segtype'])
+ ['pvseg_start', 'pvseg_size', 'segtype'])
for p in c_pvs.values():
# Capture which PVs are associated with which VG
@@ -124,16 +124,15 @@ class DataStore(object):
if 'pvseg' in r:
for s in r['pvseg']:
- # TODO Why is json pvseg_start, not pv_seg_start?
r = c_pvs[s['pv_uuid']]
- r.setdefault('pv_seg_start', []).append(s['pvseg_start'])
+ r.setdefault('pvseg_start', []).append(s['pvseg_start'])
r.setdefault('pvseg_size', []).append(s['pvseg_size'])
r.setdefault('segtype', []).append(s['segtype'])
# TODO: Remove this bug work around when we have orphan segs.
for i in c_pvs.values():
- if 'pv_seg_start' not in i:
- i['pv_seg_start'] = '0'
+ if 'pvseg_start' not in i:
+ i['pvseg_start'] = '0'
i['pvseg_size'] = i['pv_pe_count']
i['segtype'] = 'free'
@@ -469,7 +468,7 @@ class DataStore(object):
def pv_pe_segments(self, pv_uuid):
pv = self.pvs[pv_uuid]
- return list(zip(pv['pv_seg_start'], pv['pvseg_size']))
+ return list(zip(pv['pvseg_start'], pv['pvseg_size']))
def pv_contained_lv(self, pv_device):
rc = []
7 years, 5 months
master - lvmdbusd: Correct command line args for JSON
by tasleson
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=0aadd6b0fb4eaa...
Commit: 0aadd6b0fb4eaa9e59d6f750f2efe2fdfdacdd8f
Parent: 4b337b20d47953e130edc8a456e9431eba58a778
Author: Tony Asleson <tasleson(a)redhat.com>
AuthorDate: Mon Jun 27 17:04:44 2016 -0500
Committer: Tony Asleson <tasleson(a)redhat.com>
CommitterDate: Tue Jun 28 12:09:28 2016 -0500
lvmdbusd: Correct command line args for JSON
Use the updated syntax.
---
daemons/lvmdbusd/cmdhandler.py | 10 +++++-----
1 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/daemons/lvmdbusd/cmdhandler.py b/daemons/lvmdbusd/cmdhandler.py
index 0440dbd..2ca23e7 100644
--- a/daemons/lvmdbusd/cmdhandler.py
+++ b/daemons/lvmdbusd/cmdhandler.py
@@ -467,11 +467,11 @@ def lvm_full_report_json():
cmd = _dc('fullreport', [
'-a', # Need hidden too
- '-o', '/pv/' + ','.join(pv_columns),
- '-o', '/vg/' + ','.join(vg_columns),
- '-o', '/lv/' + ','.join(lv_columns),
- '-o', '/seg/' + ','.join(lv_seg_columns),
- '-o', '/pvseg/' + ','.join(pv_seg_columns),
+ '--configreport', 'pv', '-o', ','.join(pv_columns),
+ '--configreport', 'vg', '-o', ','.join(vg_columns),
+ '--configreport', 'lv', '-o', ','.join(lv_columns),
+ '--configreport', 'seg', '-o', ','.join(lv_seg_columns),
+ '--configreport', 'pvseg', '-o', ','.join(pv_seg_columns),
'--reportformat', 'json'
])
7 years, 5 months
master - lvmdbusd: Remove sorting in db layer
by tasleson
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=4b337b20d47953...
Commit: 4b337b20d47953e130edc8a456e9431eba58a778
Parent: e514284c65a7cffa24549a34612392731f6e06af
Author: Tony Asleson <tasleson(a)redhat.com>
AuthorDate: Mon Jun 6 15:08:20 2016 -0500
Committer: Tony Asleson <tasleson(a)redhat.com>
CommitterDate: Tue Jun 28 12:09:28 2016 -0500
lvmdbusd: Remove sorting in db layer
When using the JSON this does not yield a totally sorted list as we don't
have a complete set of LVs, so remove this sort.
---
daemons/lvmdbusd/lvmdb.py | 8 +-------
1 files changed, 1 insertions(+), 7 deletions(-)
diff --git a/daemons/lvmdbusd/lvmdb.py b/daemons/lvmdbusd/lvmdb.py
index 5ac90e3..f070bb6 100755
--- a/daemons/lvmdbusd/lvmdb.py
+++ b/daemons/lvmdbusd/lvmdb.py
@@ -245,16 +245,10 @@ class DataStore(object):
# Each item item in the report is a collection of information pertaining
# to the vg
for r in _all['report']:
- tmp_lv = []
# Get the lv data for this VG.
if 'lv' in r:
- tmp_lv.extend(r['lv'])
-
- # Sort them
- sorted_tmp_lv = sorted(tmp_lv, key=lambda vk: vk['lv_name'])
-
# Add them to result set
- for i in sorted_tmp_lv:
+ for i in r['lv']:
full_name = "%s/%s" % (i['vg_name'], i['lv_name'])
c_lv_full_lookup[full_name] = i['lv_uuid']
c_lvs[i['lv_uuid']] = i
7 years, 5 months
master - lvmdbusd: Add hidden in for json too
by tasleson
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=e514284c65a7cf...
Commit: e514284c65a7cffa24549a34612392731f6e06af
Parent: ec45be9976a3c0633d89e0516f7947c305c61e47
Author: Tony Asleson <tasleson(a)redhat.com>
AuthorDate: Fri Jun 3 16:52:17 2016 -0500
Committer: Tony Asleson <tasleson(a)redhat.com>
CommitterDate: Tue Jun 28 12:09:28 2016 -0500
lvmdbusd: Add hidden in for json too
---
daemons/lvmdbusd/cmdhandler.py | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/daemons/lvmdbusd/cmdhandler.py b/daemons/lvmdbusd/cmdhandler.py
index 46fb68c..0440dbd 100644
--- a/daemons/lvmdbusd/cmdhandler.py
+++ b/daemons/lvmdbusd/cmdhandler.py
@@ -466,6 +466,7 @@ def lvm_full_report_json():
lv_seg_columns = ['seg_pe_ranges', 'segtype', 'lv_uuid']
cmd = _dc('fullreport', [
+ '-a', # Need hidden too
'-o', '/pv/' + ','.join(pv_columns),
'-o', '/vg/' + ','.join(vg_columns),
'-o', '/lv/' + ','.join(lv_columns),
7 years, 5 months
master - lvmdbusd: Initial support for json output
by tasleson
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=ec45be9976a3c0...
Commit: ec45be9976a3c0633d89e0516f7947c305c61e47
Parent: 79446ffad7358dd8c130ff498514150d1e0ea08b
Author: Tony Asleson <tasleson(a)redhat.com>
AuthorDate: Fri Jun 3 13:18:21 2016 -0500
Committer: Tony Asleson <tasleson(a)redhat.com>
CommitterDate: Tue Jun 28 12:09:28 2016 -0500
lvmdbusd: Initial support for json output
---
daemons/lvmdbusd/cmdhandler.py | 54 ++++++++++++
daemons/lvmdbusd/lvmdb.py | 175 +++++++++++++++++++++++++++++++++++-----
2 files changed, 209 insertions(+), 20 deletions(-)
diff --git a/daemons/lvmdbusd/cmdhandler.py b/daemons/lvmdbusd/cmdhandler.py
index 1522ebf..46fb68c 100644
--- a/daemons/lvmdbusd/cmdhandler.py
+++ b/daemons/lvmdbusd/cmdhandler.py
@@ -22,6 +22,11 @@ except SystemError:
from utils import pv_dest_ranges, log_debug, log_error
from lvm_shell_proxy import LVMShellProxy
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
SEP = '{|}'
total_time = 0.0
@@ -426,6 +431,55 @@ def lv_detach_cache(lv_full_name, detach_options, destroy_cache):
return call(cmd)
+def supports_json():
+ cmd = ['help']
+ rc, out, err = call(cmd)
+ if rc == 0:
+ if 'fullreport' in err:
+ return True
+ return False
+
+
+def lvm_full_report_json():
+ pv_columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free',
+ 'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
+ 'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count',
+ 'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
+ 'vg_uuid']
+
+ pv_seg_columns = ['pv_seg_start', 'pv_seg_size', 'segtype',
+ 'pv_uuid', 'lv_uuid', 'pv_name']
+
+ vg_columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free',
+ 'vg_sysid', 'vg_extent_size', 'vg_extent_count',
+ 'vg_free_count', 'vg_profile', 'max_lv', 'max_pv',
+ 'pv_count', 'lv_count', 'snap_count', 'vg_seqno',
+ 'vg_mda_count', 'vg_mda_free', 'vg_mda_size',
+ 'vg_mda_used_count', 'vg_attr', 'vg_tags']
+
+ lv_columns = ['lv_uuid', 'lv_name', 'lv_path', 'lv_size',
+ 'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid',
+ 'origin', 'data_percent',
+ 'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv',
+ 'metadata_lv', 'lv_parent', 'lv_role', 'lv_layout']
+
+ lv_seg_columns = ['seg_pe_ranges', 'segtype', 'lv_uuid']
+
+ cmd = _dc('fullreport', [
+ '-o', '/pv/' + ','.join(pv_columns),
+ '-o', '/vg/' + ','.join(vg_columns),
+ '-o', '/lv/' + ','.join(lv_columns),
+ '-o', '/seg/' + ','.join(lv_seg_columns),
+ '-o', '/pvseg/' + ','.join(pv_seg_columns),
+ '--reportformat', 'json'
+ ])
+
+ rc, out, err = call(cmd)
+ if rc == 0:
+ return json.loads(out)
+ return None
+
+
def pv_retrieve_with_segs(device=None):
d = []
err = ""
diff --git a/daemons/lvmdbusd/lvmdb.py b/daemons/lvmdbusd/lvmdb.py
index d14ae3b..5ac90e3 100755
--- a/daemons/lvmdbusd/lvmdb.py
+++ b/daemons/lvmdbusd/lvmdb.py
@@ -13,6 +13,7 @@ from collections import OrderedDict
import pprint as prettyprint
import os
+import sys
try:
from . import cmdhandler
@@ -23,7 +24,7 @@ except SystemError:
class DataStore(object):
- def __init__(self):
+ def __init__(self, usejson=None):
self.pvs = {}
self.vgs = {}
self.lvs = {}
@@ -41,6 +42,11 @@ class DataStore(object):
# self.refresh()
self.num_refreshes = 0
+ if usejson is None:
+ self.json = cmdhandler.supports_json()
+ else:
+ self.json = usejson
+
@staticmethod
def _insert_record(table, key, record, allowed_multiple):
if key in table:
@@ -94,6 +100,58 @@ class DataStore(object):
return c_pvs, c_lookup, c_pvs_in_vgs
@staticmethod
+ def _parse_pvs_json(_all):
+
+ c_pvs = OrderedDict()
+ c_lookup = {}
+ c_pvs_in_vgs = {}
+
+ # Each item item in the report is a collection of information pertaining
+ # to the vg
+ for r in _all['report']:
+ tmp_pv = []
+
+ # Get the pv data for this VG.
+ if 'pv' in r:
+ tmp_pv.extend(r['pv'])
+
+ # Sort them
+ sorted_tmp_pv = sorted(tmp_pv, key=lambda pk: pk['pv_name'])
+
+ # Add them to result set
+ for p in sorted_tmp_pv:
+ c_pvs[p['pv_uuid']] = p
+
+ if 'pvseg' in r:
+ for s in r['pvseg']:
+ # TODO Why is json pvseg_start, not pv_seg_start?
+ r = c_pvs[s['pv_uuid']]
+ r.setdefault('pv_seg_start', []).append(s['pvseg_start'])
+ r.setdefault('pvseg_size', []).append(s['pvseg_size'])
+ r.setdefault('segtype', []).append(s['segtype'])
+
+ # TODO: Remove this bug work around when we have orphan segs.
+ for i in c_pvs.values():
+ if 'pv_seg_start' not in i:
+ i['pv_seg_start'] = '0'
+ i['pvseg_size'] = i['pv_pe_count']
+ i['segtype'] = 'free'
+
+ for p in c_pvs.values():
+ # Capture which PVs are associated with which VG
+ if p['vg_uuid'] not in c_pvs_in_vgs:
+ c_pvs_in_vgs[p['vg_uuid']] = []
+
+ if p['vg_name']:
+ c_pvs_in_vgs[p['vg_uuid']].append(
+ (p['pv_name'], p['pv_uuid']))
+
+ # Lookup for translating between /dev/<name> and pv uuid
+ c_lookup[p['pv_name']] = p['pv_uuid']
+
+ return c_pvs, c_lookup, c_pvs_in_vgs
+
+ @staticmethod
def _parse_vgs(_vgs):
vgs = sorted(_vgs, key=lambda vk: vk['vg_name'])
@@ -107,20 +165,31 @@ class DataStore(object):
return c_vgs, c_lookup
@staticmethod
- def _parse_lvs(_lvs):
- lvs = sorted(_lvs, key=lambda vk: vk['lv_name'])
+ def _parse_vgs_json(_all):
- c_lvs = OrderedDict()
- c_lvs_in_vgs = {}
- c_lvs_hidden = {}
- c_lv_full_lookup = {}
+ tmp_vg = []
+ for r in _all['report']:
+ # Get the pv data for this VG.
+ if 'vg' in r:
+ tmp_vg.extend(r['vg'])
- for i in lvs:
- full_name = "%s/%s" % (i['vg_name'], i['lv_name'])
- c_lv_full_lookup[full_name] = i['lv_uuid']
- DataStore._insert_record(
- c_lvs, i['lv_uuid'], i,
- ['seg_pe_ranges', 'segtype'])
+ # Sort for consistent output, however this is optional
+ vgs = sorted(tmp_vg, key=lambda vk: vk['vg_name'])
+
+ c_vgs = OrderedDict()
+ c_lookup = {}
+
+ for i in vgs:
+ c_lookup[i['vg_name']] = i['vg_uuid']
+ c_vgs[i['vg_uuid']] = i
+
+ return c_vgs, c_lookup
+
+ @staticmethod
+ def _parse_lvs_common(c_lvs, c_lv_full_lookup):
+
+ c_lvs_in_vgs = OrderedDict()
+ c_lvs_hidden = OrderedDict()
for i in c_lvs.values():
if i['vg_uuid'] not in c_lvs_in_vgs:
@@ -151,6 +220,55 @@ class DataStore(object):
return c_lvs, c_lvs_in_vgs, c_lvs_hidden, c_lv_full_lookup
@staticmethod
+ def _parse_lvs(_lvs):
+ lvs = sorted(_lvs, key=lambda vk: vk['lv_name'])
+
+ c_lvs = OrderedDict()
+ c_lv_full_lookup = OrderedDict()
+
+ for i in lvs:
+ full_name = "%s/%s" % (i['vg_name'], i['lv_name'])
+ c_lv_full_lookup[full_name] = i['lv_uuid']
+ DataStore._insert_record(
+ c_lvs, i['lv_uuid'], i,
+ ['seg_pe_ranges', 'segtype'])
+
+ return DataStore._parse_lvs_common(c_lvs, c_lv_full_lookup)
+
+
+ @staticmethod
+ def _parse_lvs_json(_all):
+
+ c_lvs = OrderedDict()
+ c_lv_full_lookup = {}
+
+ # Each item item in the report is a collection of information pertaining
+ # to the vg
+ for r in _all['report']:
+ tmp_lv = []
+ # Get the lv data for this VG.
+ if 'lv' in r:
+ tmp_lv.extend(r['lv'])
+
+ # Sort them
+ sorted_tmp_lv = sorted(tmp_lv, key=lambda vk: vk['lv_name'])
+
+ # Add them to result set
+ for i in sorted_tmp_lv:
+ full_name = "%s/%s" % (i['vg_name'], i['lv_name'])
+ c_lv_full_lookup[full_name] = i['lv_uuid']
+ c_lvs[i['lv_uuid']] = i
+
+ # Add in the segment data
+ if 'seg' in r:
+ for s in r['seg']:
+ r = c_lvs[s['lv_uuid']]
+ r.setdefault('seg_pe_ranges', []).append(s['seg_pe_ranges'])
+ r.setdefault('segtype', []).append(s['segtype'])
+
+ return DataStore._parse_lvs_common(c_lvs, c_lv_full_lookup)
+
+ @staticmethod
def _make_list(l):
if not isinstance(l, list):
l = [l]
@@ -278,13 +396,22 @@ class DataStore(object):
log_debug("lvmdb - refresh entry")
# Grab everything first then parse it
- _raw_pvs = cmdhandler.pv_retrieve_with_segs()
- _raw_vgs = cmdhandler.vg_retrieve(None)
- _raw_lvs = cmdhandler.lv_retrieve_with_segments()
+ if self.json:
+ # Do a single lvm retrieve for everything in json
+ a = cmdhandler.lvm_full_report_json()
- _pvs, _pvs_lookup, _pvs_in_vgs = self._parse_pvs(_raw_pvs)
- _vgs, _vgs_lookup = self._parse_vgs(_raw_vgs)
- _lvs, _lvs_in_vgs, _lvs_hidden, _lvs_lookup = self._parse_lvs(_raw_lvs)
+ _pvs, _pvs_lookup, _pvs_in_vgs = self._parse_pvs_json(a)
+ _vgs, _vgs_lookup = self._parse_vgs_json(a)
+ _lvs, _lvs_in_vgs, _lvs_hidden, _lvs_lookup = self._parse_lvs_json(a)
+
+ else:
+ _raw_pvs = cmdhandler.pv_retrieve_with_segs()
+ _raw_vgs = cmdhandler.vg_retrieve(None)
+ _raw_lvs = cmdhandler.lv_retrieve_with_segments()
+
+ _pvs, _pvs_lookup, _pvs_in_vgs = self._parse_pvs(_raw_pvs)
+ _vgs, _vgs_lookup = self._parse_vgs(_raw_vgs)
+ _lvs, _lvs_in_vgs, _lvs_hidden, _lvs_lookup = self._parse_lvs(_raw_lvs)
# Set all
self.pvs = _pvs
@@ -389,12 +516,20 @@ class DataStore(object):
if __name__ == "__main__":
pp = prettyprint.PrettyPrinter(indent=4)
- ds = DataStore()
+ use_json = False
+
+ if len(sys.argv) != 1:
+ print(len(sys.argv))
+ use_json = True
+
+ ds = DataStore(use_json)
ds.refresh()
+ print("PVS")
for v in ds.pvs.values():
pp.pprint(v)
+ print("VGS")
for v in ds.vgs.values():
pp.pprint(v)
7 years, 5 months
master - raid: Infrastructure for raid takeover.
by Alasdair Kergon
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=79446ffad7358d...
Commit: 79446ffad7358dd8c130ff498514150d1e0ea08b
Parent: ff3c4ed1c0e2e554e9fb5f9b7ff20fdf5a835852
Author: Alasdair G Kergon <agk(a)redhat.com>
AuthorDate: Tue Jun 28 02:42:30 2016 +0100
Committer: Alasdair G Kergon <agk(a)redhat.com>
CommitterDate: Tue Jun 28 02:42:30 2016 +0100
raid: Infrastructure for raid takeover.
---
WHATS_NEW | 1 +
lib/config/defaults.h | 3 +-
lib/metadata/metadata-exported.h | 1 -
lib/metadata/raid_manip.c | 490 ++++++++++++++++++++++++++++++++++++--
lib/metadata/takeover_matrix.h | 120 ++++++++++
tools/lvconvert.c | 7 +-
6 files changed, 596 insertions(+), 26 deletions(-)
diff --git a/WHATS_NEW b/WHATS_NEW
index cfa707f..3184412 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,6 @@
Version 2.02.159 -
=================================
+ Add infrastructure for raid takeover lvconvert options.
Version 2.02.158 - 25th June 2016
=================================
diff --git a/lib/config/defaults.h b/lib/config/defaults.h
index 3d449ff..addb14f 100644
--- a/lib/config/defaults.h
+++ b/lib/config/defaults.h
@@ -66,7 +66,8 @@
#define DEFAULT_MIRROR_LOG_FAULT_POLICY "allocate"
#define DEFAULT_MIRROR_IMAGE_FAULT_POLICY "remove"
#define DEFAULT_MIRROR_MAX_IMAGES 8 /* limited by kernel DM_KCOPYD_MAX_REGIONS */
-#define DEFAULT_RAID_MAX_IMAGES 8
+// FIXME Increase this to 64
+#define DEFAULT_RAID_MAX_IMAGES 8 /* limited by kernel failed devices bitfield in superblock (raid4/5/6 max 253) */
#define DEFAULT_RAID_FAULT_POLICY "warn"
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index f47fa1d..7765d23 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -1196,7 +1196,6 @@ int lv_raid_merge(struct logical_volume *lv);
int lv_raid_convert(struct logical_volume *lv,
const struct segment_type *new_segtype,
int yes, int force,
- const unsigned image_count,
const unsigned stripes,
const unsigned new_stripe_size,
struct dm_list *allocate_pvs);
diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c
index 132eef3..2cef777 100644
--- a/lib/metadata/raid_manip.c
+++ b/lib/metadata/raid_manip.c
@@ -96,7 +96,7 @@ static int _raid_in_sync(struct logical_volume *lv)
if (sync_percent == DM_PERCENT_0) {
/*
* FIXME We repeat the status read here to workaround an
- * unresolved kernel bug when we see 0 even though the
+ * unresolved kernel bug when we see 0 even though the
* the array is 100% in sync.
* https://bugzilla.redhat.com/1210637
*/
@@ -174,9 +174,9 @@ static int _raid_remove_top_layer(struct logical_volume *lv,
* @lv
*
* If LV is active:
- * clear first block of device
+ * clear first block of device
* otherwise:
- * activate, clear, deactivate
+ * activate, clear, deactivate
*
* Returns: 1 on success, 0 on failure
*/
@@ -839,10 +839,10 @@ static int _extract_image_components(struct lv_segment *seg, uint32_t idx,
* @new_count: The absolute count of images (e.g. '2' for a 2-way mirror)
* @target_pvs: The list of PVs that are candidates for removal
* @shift: If set, use _shift_and_rename_image_components().
- * Otherwise, leave the [meta_]areas as AREA_UNASSIGNED and
- * seg->area_count unchanged.
+ * Otherwise, leave the [meta_]areas as AREA_UNASSIGNED and
+ * seg->area_count unchanged.
* @extracted_[meta|data]_lvs: The LVs removed from the array. If 'shift'
- * is set, then there will likely be name conflicts.
+ * is set, then there will likely be name conflicts.
*
* This function extracts _both_ portions of the indexed image. It
* does /not/ commit the results. (IOW, erroring-out requires no unwinding
@@ -851,9 +851,9 @@ static int _extract_image_components(struct lv_segment *seg, uint32_t idx,
* Returns: 1 on success, 0 on failure
*/
static int _raid_extract_images(struct logical_volume *lv, uint32_t new_count,
- struct dm_list *target_pvs, int shift,
- struct dm_list *extracted_meta_lvs,
- struct dm_list *extracted_data_lvs)
+ struct dm_list *target_pvs, int shift,
+ struct dm_list *extracted_meta_lvs,
+ struct dm_list *extracted_data_lvs)
{
int ss, s, extract, lvl_idx = 0;
struct lv_list *lvl_array;
@@ -1461,9 +1461,428 @@ static int _convert_mirror_to_raid1(struct logical_volume *lv,
}
/*
- * lv_raid_reshape
- * @lv
- * @new_segtype
+ * Individual takeover functions.
+ */
+#define TAKEOVER_FN_ARGS \
+ struct logical_volume *lv, \
+ const struct segment_type *new_segtype, \
+ int yes, \
+ int force, \
+ unsigned new_image_count, \
+ const unsigned new_stripes, \
+ unsigned new_stripe_size, \
+ struct dm_list *allocate_pvs
+
+typedef int (*takeover_fn_t)(TAKEOVER_FN_ARGS);
+
+/*
+ * Common takeover functions.
+ */
+static int _takeover_noop(TAKEOVER_FN_ARGS)
+{
+ log_error("Logical volume %s is already of requested type %s",
+ display_lvname(lv), lvseg_name(first_seg(lv)));
+
+ return 0;
+}
+
+static int _takeover_unsupported(TAKEOVER_FN_ARGS)
+{
+ log_error("Converting the segment type for %s from %s to %s is not supported.",
+ display_lvname(lv), lvseg_name(first_seg(lv)), new_segtype->name);
+
+ return 0;
+}
+
+/*
+ * Will this particular takeover combination be possible?
+ */
+static int _takeover_not_possible(takeover_fn_t takeover_fn)
+{
+ if (takeover_fn == _takeover_noop || takeover_fn == _takeover_unsupported)
+ return 0;
+
+ return 1;
+}
+
+static int _takeover_unsupported_yet(const struct logical_volume *lv, const struct segment_type *new_segtype)
+{
+ log_error("Converting the segment type for %s from %s to %s is not supported yet.",
+ display_lvname(lv), lvseg_name(first_seg(lv)), new_segtype->name);
+
+ return 0;
+}
+
+/*
+ * Customised takeover functions
+ */
+static int _takeover_from_linear_to_raid0(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_linear_to_raid1(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_linear_to_raid10(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_linear_to_raid45(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_mirrored_to_raid0(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_mirrored_to_raid0_meta(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_mirrored_to_raid1(TAKEOVER_FN_ARGS)
+{
+ return _convert_mirror_to_raid1(lv, new_segtype);
+}
+
+static int _takeover_from_mirrored_to_raid10(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_mirrored_to_raid45(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_to_linear(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_to_mirrored(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_to_raid0_meta(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_to_raid1(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_to_raid10(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_to_raid45(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_to_raid6(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_to_striped(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+/*
+static int _takeover_from_raid0_meta_to_linear(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_meta_to_mirrored(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_meta_to_raid0(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_meta_to_raid1(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_meta_to_raid10(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_meta_to_raid45(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_meta_to_raid6(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid0_meta_to_striped(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+*/
+
+static int _takeover_from_raid1_to_linear(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid1_to_mirrored(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid1_to_raid0(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid1_to_raid0_meta(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid1_to_raid1(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid1_to_raid10(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid1_to_raid45(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid1_to_striped(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid45_to_linear(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid45_to_mirrored(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid45_to_raid0(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid45_to_raid0_meta(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid45_to_raid1(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid45_to_raid54(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid45_to_raid6(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid45_to_striped(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid6_to_raid0(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid6_to_raid0_meta(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid6_to_raid45(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid6_to_striped(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_striped_to_raid0(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_striped_to_raid01(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_striped_to_raid0_meta(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_striped_to_raid10(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_striped_to_raid45(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_striped_to_raid6(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+/*
+static int _takeover_from_raid01_to_raid01(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid01_to_raid10(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid01_to_striped(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid10_to_linear(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid10_to_mirrored(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid10_to_raid0(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid10_to_raid01(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid10_to_raid0_meta(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid10_to_raid1(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid10_to_raid10(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+
+static int _takeover_from_raid10_to_striped(TAKEOVER_FN_ARGS)
+{
+ return _takeover_unsupported_yet(lv, new_segtype);
+}
+*/
+
+/*
+ * Import takeover matrix.
+ */
+#include "takeover_matrix.h"
+
+static unsigned _segtype_ix(const struct segment_type *segtype, uint32_t area_count)
+{
+ int i = 2, j;
+
+ /* Linear special case */
+ if (segtype_is_striped(segtype)) {
+ if (area_count == 1)
+ return 0; /* linear */
+ if (!segtype_is_raid0(segtype))
+ return 1; /* striped */
+ }
+
+ while ((j = _segtype_index[i++]))
+ if (segtype->flags & j)
+ break;
+
+ return (i - 1);
+}
+
+/* Call appropriate takeover function */
+static takeover_fn_t _get_takeover_fn(const struct lv_segment *seg, const struct segment_type *new_segtype, unsigned new_image_count)
+{
+ return _takeover_fns[_segtype_ix(seg->segtype, seg->area_count)][_segtype_ix(new_segtype, new_image_count)];
+}
+
+/*
+ * Check for maximum number of raid devices.
+ * Constrained by kernel MD maximum device limits _and_ dm-raid superblock
+ * bitfield constraints.
+ */
+static int _check_max_raid_devices(uint32_t image_count)
+{
+ if (image_count > DEFAULT_RAID_MAX_IMAGES) {
+ log_error("Unable to handle arrays with more than %u devices",
+ DEFAULT_RAID_MAX_IMAGES);
+ return 0;
+ }
+ return 1;
+}
+
+/* Number of data (not parity) rimages */
+static uint32_t _data_rimages_count(const struct lv_segment *seg, const uint32_t total_rimages)
+{
+ return total_rimages - seg->segtype->parity_devs;
+}
+
+/*
+ * lv_raid_convert
*
* Convert an LV from one RAID type (or 'mirror' segtype) to another.
*
@@ -1472,33 +1891,60 @@ static int _convert_mirror_to_raid1(struct logical_volume *lv,
int lv_raid_convert(struct logical_volume *lv,
const struct segment_type *new_segtype,
int yes, int force,
- unsigned new_image_count,
const unsigned new_stripes,
const unsigned new_stripe_size,
struct dm_list *allocate_pvs)
{
struct lv_segment *seg = first_seg(lv);
+ uint32_t stripes, stripe_size;
+ uint32_t new_image_count = seg->area_count;
+ takeover_fn_t takeover_fn;
if (!new_segtype) {
log_error(INTERNAL_ERROR "New segtype not specified");
return 0;
}
+ if (!_check_max_raid_devices(new_image_count))
+ return_0;
+
+ stripes = new_stripes ?: _data_rimages_count(seg, seg->area_count);
+
+ /* FIXME Ensure caller does *not* set wrong default value! */
+ /* Define new stripe size if not passed in */
+ stripe_size = new_stripe_size ?: seg->stripe_size;
+
+ takeover_fn = _get_takeover_fn(first_seg(lv), new_segtype, new_image_count);
+
+ /* Exit without doing activation checks if the combination isn't possible */
+ if (_takeover_not_possible(takeover_fn))
+ return takeover_fn(lv, new_segtype, yes, force, new_image_count, new_stripes, new_stripe_size, allocate_pvs);
+
+ /* FIXME If not active, prompt and activate */
+ /* LV must be active to perform raid conversion operations */
+ if (!lv_is_active(lv)) {
+ log_error("%s must be active to perform this operation.",
+ display_lvname(lv));
+ return 0;
+ }
+
+ /* In clustered VGs, the LV must be active on this node exclusively. */
if (vg_is_clustered(lv->vg) && !lv_is_active_exclusive_locally(lv)) {
- log_error("%s/%s must be active exclusive locally to"
- " perform this operation.", lv->vg->name, lv->name);
+ log_error("%s must be active exclusive locally to "
+ "perform this operation.", display_lvname(lv));
return 0;
}
- if (seg_is_mirror(seg) && segtype_is_raid1(new_segtype))
- return _convert_mirror_to_raid1(lv, new_segtype);
+ /* LV must be in sync. */
+ if (!_raid_in_sync(lv)) {
+ log_error("Unable to convert %s while it is not in-sync",
+ display_lvname(lv));
+ return 0;
+ }
- log_error("Converting the segment type for %s/%s from %s to %s is not yet supported.",
- lv->vg->name, lv->name, lvseg_name(seg), new_segtype->name);
- return 0;
+ return takeover_fn(lv, new_segtype, yes, force, new_image_count, new_stripes, new_stripe_size, allocate_pvs);
}
-
static int _remove_partial_multi_segment_image(struct logical_volume *lv,
struct dm_list *remove_pvs)
{
@@ -1876,7 +2322,7 @@ int lv_raid_remove_missing(struct logical_volume *lv)
*/
for (s = 0; s < seg->area_count; s++) {
- if (!lv_is_partial(seg_lv(seg, s)) &&
+ if (!lv_is_partial(seg_lv(seg, s)) &&
(!seg->meta_areas || !seg_metalv(seg, s) || !lv_is_partial(seg_metalv(seg, s))))
continue;
diff --git a/lib/metadata/takeover_matrix.h b/lib/metadata/takeover_matrix.h
new file mode 100644
index 0000000..25a8197
--- /dev/null
+++ b/lib/metadata/takeover_matrix.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ *
+ * This file is part of LVM2.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU Lesser General Public License v.2.1.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define N _takeover_noop
+#define X _takeover_unsupported
+
+#define lin_r0 _takeover_from_linear_to_raid0
+#define lin_r0 _takeover_from_linear_to_raid0
+#define lin_r1 _takeover_from_linear_to_raid1
+#define lin_r10 _takeover_from_linear_to_raid10
+#define lin_r45 _takeover_from_linear_to_raid45
+#define mir_r0 _takeover_from_mirrored_to_raid0
+#define mir_r0m _takeover_from_mirrored_to_raid0_meta
+#define mir_r1 _takeover_from_mirrored_to_raid1
+#define mir_r10 _takeover_from_mirrored_to_raid10
+#define mir_r45 _takeover_from_mirrored_to_raid45
+#define r01_r01 _takeover_from_raid01_to_raid01
+#define r01_r10 _takeover_from_raid01_to_raid10
+#define r01_str _takeover_from_raid01_to_striped
+#define r0__lin _takeover_from_raid0_to_linear
+#define r0__mir _takeover_from_raid0_to_mirrored
+#define r0m_lin _takeover_from_raid0_meta_to_linear
+#define r0m_mir _takeover_from_raid0_meta_to_mirrored
+#define r0m_r0 _takeover_from_raid0_meta_to_raid0
+#define r0m_r1 _takeover_from_raid0_meta_to_raid1
+#define r0m_r10 _takeover_from_raid0_meta_to_raid10
+#define r0m_r45 _takeover_from_raid0_meta_to_raid45
+#define r0m_r6 _takeover_from_raid0_meta_to_raid6
+#define r0m_str _takeover_from_raid0_meta_to_striped
+#define r0__r0m _takeover_from_raid0_to_raid0_meta
+#define r0__r1 _takeover_from_raid0_to_raid1
+#define r0__r10 _takeover_from_raid0_to_raid10
+#define r0__r45 _takeover_from_raid0_to_raid45
+#define r0__r6 _takeover_from_raid0_to_raid6
+#define r0__str _takeover_from_raid0_to_striped
+#define r10_lin _takeover_from_raid10_to_linear
+#define r10_mir _takeover_from_raid10_to_mirrored
+#define r10_r0 _takeover_from_raid10_to_raid0
+#define r10_r01 _takeover_from_raid10_to_raid01
+#define r10_r0m _takeover_from_raid10_to_raid0_meta
+#define r10_r1 _takeover_from_raid10_to_raid1
+#define r10_r10 _takeover_from_raid10_to_raid10
+#define r10_str _takeover_from_raid10_to_striped
+#define r1__lin _takeover_from_raid1_to_linear
+#define r1__mir _takeover_from_raid1_to_mirrored
+#define r1__r0 _takeover_from_raid1_to_raid0
+#define r1__r0m _takeover_from_raid1_to_raid0_meta
+#define r1__r1 _takeover_from_raid1_to_raid1
+#define r1__r10 _takeover_from_raid1_to_raid10
+#define r1__r45 _takeover_from_raid1_to_raid45
+#define r1__str _takeover_from_raid1_to_striped
+#define r45_lin _takeover_from_raid45_to_linear
+#define r45_mir _takeover_from_raid45_to_mirrored
+#define r45_r0 _takeover_from_raid45_to_raid0
+#define r45_r0m _takeover_from_raid45_to_raid0_meta
+#define r45_r1 _takeover_from_raid45_to_raid1
+#define r45_r54 _takeover_from_raid45_to_raid54
+#define r45_r6 _takeover_from_raid45_to_raid6
+#define r45_str _takeover_from_raid45_to_striped
+#define r6__r0 _takeover_from_raid6_to_raid0
+#define r6__r0m _takeover_from_raid6_to_raid0_meta
+#define r6__r45 _takeover_from_raid6_to_raid45
+#define r6__str _takeover_from_raid6_to_striped
+#define str_r0 _takeover_from_striped_to_raid0
+#define str_r01 _takeover_from_striped_to_raid01
+#define str_r0m _takeover_from_striped_to_raid0_meta
+#define str_r10 _takeover_from_striped_to_raid10
+#define str_r45 _takeover_from_striped_to_raid45
+#define str_r6 _takeover_from_striped_to_raid6
+
+static uint64_t _segtype_index[] = {
+ 1, /* linear */
+ 1, /* striped */
+ SEG_MIRROR,
+ SEG_RAID0,
+ // SEG_RAID0_META,
+ SEG_RAID1,
+ SEG_RAID4 | SEG_RAID5_LS | SEG_RAID5_LA | SEG_RAID5_LS | SEG_RAID5_RS | SEG_RAID5_RA | SEG_RAID5_N,
+ SEG_RAID6_LS_6 | SEG_RAID6_LA_6 | SEG_RAID6_RS_6 | SEG_RAID6_RA_6 | SEG_RAID6_NC | SEG_RAID6_NR | SEG_RAID6_ZR | SEG_RAID6_N_6,
+ 0, // SEG_RAID10_NEAR | SEG_RAID10_FAR | SEG_RAID10_OFFSET,
+ 0, // SEG_RAID01,
+ 0
+};
+
+/*
+ * Matrix of takeover functions.
+ * Row corresponds to original segment type.
+ * Column corresponds to new segment type.
+ * N represents a combination that has no effect (no-op).
+ * X represents a combination that is unsupported.
+ */
+static takeover_fn_t _takeover_fns[][11] = {
+ /* from, to -> linear striped mirror raid0 raid0_meta raid1 raid4/5 raid6 raid10 raid01 other*/
+ /* | */
+ /* v */
+ /* linear */ { N , X , X , lin_r0, lin_r0 , lin_r1, lin_r45, X , lin_r10, X , X },
+ /* striped */ { X , N , X , str_r0, str_r0m, lin_r1, str_r45, str_r6, str_r10, str_r01, X },
+ /* mirror */ { X , X , N , mir_r0, mir_r0m, mir_r1, mir_r45, X , mir_r10, X , X },
+ /* raid0 */ { r0__lin, r0__str, r0__mir, N , r0__r0m, r0__r1, r0__r45, r0__r6, r0__r10, X , X },
+ /* raid0_meta */ // { r0m_lin, r0m_str, r0m_mir, r0m_r0, N , r0m_r1, r0m_r45, r0m_r6, r0m_r10, X , X },
+ /* raid1 */ { r1__lin, r1__str, r1__mir, r1__r0, r1__r0m, r1__r1, r1__r45, X , r1__r10, X , X },
+ /* raid4/5 */ { r45_lin, r45_str, r45_mir, r45_r0, r45_r0m, r45_r1, r45_r54, r45_r6, X , X , X },
+ /* raid6 */ { X , r6__str, X , r6__r0, r6__r0m, X , r6__r45, X , X , X , X },
+ /* raid10 */ // { r10_lin, r10_str, r10_mir, r10_r0, r10_r0m, r10_r1, X , X , r10_r10, r10_r01, X },
+ /* raid01 */ // { X , r01_str, X , X , X , X , X , X , r01_r10, r01_r01, X },
+ /* other */ { X , X , X , X , X , X , X , X , X , X , X },
+};
+#undef X
+#undef N
diff --git a/tools/lvconvert.c b/tools/lvconvert.c
index f99291a..65143a1 100644
--- a/tools/lvconvert.c
+++ b/tools/lvconvert.c
@@ -1845,13 +1845,16 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
return lv_raid_change_image_count(lv, image_count, lp->pvh);
if ((seg_is_linear(seg) || seg_is_striped(seg) || seg_is_mirrored(seg) || lv_is_raid(lv)) &&
- ((lp->type_str && lp->type_str[0]) || image_count)) {
+ (lp->type_str && lp->type_str[0])) {
if (segtype_is_any_raid0(lp->segtype) &&
!(lp->target_attr & RAID_FEATURE_RAID0)) {
log_error("RAID module does not support RAID0.");
return 0;
}
- return lv_raid_convert(lv, lp->segtype, lp->yes, lp->force, image_count, lp->stripes, lp->stripe_size, lp->pvh);
+ if (!lv_raid_convert(lv, lp->segtype, lp->yes, lp->force, lp->stripes, lp->stripe_size, lp->pvh))
+ return_0;
+ log_print_unless_silent("Logical volume %s successfully converted", display_lvname(lv));
+ return 1;
}
if (lp->replace)
7 years, 5 months
v2_02_158 annotated tag has been created
by Alasdair Kergon
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=6d0c49df5b466e...
Commit: 6d0c49df5b466e3d5f842272d76452fd2b9e5dec
Parent: 0000000000000000000000000000000000000000
Author: Alasdair G Kergon <agk(a)redhat.com>
AuthorDate: 2016-06-25 19:35 +0000
Committer: Alasdair G Kergon <agk(a)redhat.com>
CommitterDate: 2016-06-25 19:35 +0000
annotated tag: v2_02_158 has been created
at 6d0c49df5b466e3d5f842272d76452fd2b9e5dec (tag)
tagging 887f071b2545bb2b60e13de854898ab71a6b0ff5 (commit)
replaces v2_02_157
Release 2.02.158.
An interim development release incorporating major extensions to the reporting
code (including a json facility). The interface changes are not yet frozen.
The vgimportclone script is also replaced with a new built-in implementation.
133 files changed, 4796 insertions(+), 1818 deletions(-)
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.14 (GNU/Linux)
iEYEABECAAYFAldu3m8ACgkQIoGRwVZ+LBcy0gCg2xMhF+rYQSYsuztV/w3u6W3E
pP8An1UtPzdIRLbj3hmFBFFS97lV2aAl
=zD0Y
-----END PGP SIGNATURE-----
Alasdair G Kergon (7):
post-release
tools: Use arg_is_set instead of arg_count.
lvconvert: Refactor argument handling code.
lvconvert: Fix --stripes handling.
Revert "locking: trace errors from dir creation"
doc: Resync kernel docs.
pre-release
David Teigland (9):
lvmlockd: fix dropping PVs in rescanning VG
vgreduce: fix location of lvmlockd global lock
vgscan: fix process_each args
vgcfgrestore: use lvmetad disabled state
lvmetad: process new connections after shutdown signal
lvmlockd: always try to connect to lvmetad
vgcfgrestore: check for missing device
vgcfgrestore: rescan to reenable lvmetad on error path
vgimportclone: add native command
Peter Rajnoha (55):
libdm: report: introduce dm_report_group
libdm: report: implement DM_REPORT_GROUP_SINGLE for a group containing single report only
libdm: report: implement DM_REPORT_GROUP_BASIC for extended report output
libdm: report: implement DM_REPORT_GROUP_JSON for JSON report output
report: add CMDLOG report type
conf: add log/command_log_{sort,cols} config settings
toollib: add report_format_init fn to create report group and to create/add log report handle
conf: add report/output_format config setting
conf: add log/report_command_log config setting
report: separate common report and per-report args
toollib: add report_group and log_rh to processing_handle and initialize cmd processing log report
toollib: add 'parent' field to processing_handle; init report format only if there's no parent
report: add current report to existing group
commands: recognize --reportformat option for pvs,vgs,lvs and devtypes command
commands: recognize --reportformat option for other commands
commands: report: add lvm fullreport command
conf: add report/{vgs,pvs,lvs,pvsegs,segs}_{cols,sort}_full config settings
args: add priorities for grouping args
args: add --configreport arg
report: add _get_report_idx_from_name and _should_process_report_idx helper fns
report: recognize list of fields to report (-o|--options) for each subreport
report: recognize list of keys to sort report by (-O|--sort) for each subreport; make -O|--sort groupable
report: recognize selection (-S|--select) for each subreport; make -S|--select groupable
report: check report type and options used for full report's subreports
toollib: make it possible to also process internal VGs, add 'include_internal' switch to process_each_vg fn
log: log warnings and errors via report if set; add log_set_report* fns
report: add report_current_object_cmdlog fn as a shortcut for reporting current object's log item
log: also pass log_print through report and add log_print_bypass_report for use in libdm-report for direct print without report
libdm: report: remember special field to display selection status in struct row's field_sel_status variable
refactor: move field width calculation and sort preparation from _do_report_object to dm_report_output
libdm: report: add DM_REPORT_OUTPUT_MULTIPLE_TIMES report flag to keep report data even after output is done
libdm: report: add dm_report_set_selection
report: add 'multiple_output' arg to report_init
log: annotate processing code with log_set_report_{context,object_type,id_and_name}
log: annotate processing code with report_current_object_cmdlog
refactor: move 'interactive' field from struct command_line to struct cmd_context as 'is_interactive' switch
tools: add 'lvm lastlog' command for interactive query and display of last command's log
log: use separate 'orphan' report object type for orphan VGs
conf: add log/command_log_selection config setting
libdm: select: recognize special selection string 'all' as an alias for blank selection string
report: use report type description for report's name if using DM_REPORT_GROUP_BASIC output format
report: add --logonly arg to report only log for a command
conf: regenerate profile templates
commands: add --configreport arg for all relevant commands
report: compare --configreport value with basic report type
log: add 'label' log report object type; annotate process_each_label with log_set_report_object_{type, id_and_name} and report_log_ret_code
cleanup: log: use hex numbers instead of decimal for _LOG_*
conf: add more comments for new settings related to output format and log report
filters: add comments about internal filter position in filter chain
log: move original print_log code to _vprint_log and make print_log a wrapper over _vprint_log
libdm: log: remove log_print_bypass_report calls and register new print_log_libdm for libdm during lvm initialization instead
commands: fix typo in arg assignment
commands: help: add missing --reportformat references
man: document --configreport, --logonly, --reportformat
man: lvs: move doc about --all so it's ordered alphabetically in the man page
Zdenek Kabelac (26):
activation: switch to warning
activation: _lv_active returns 0 on error
monitoring: preserve monitoring status during update
locking: trace errors from dir creation
lvresize: move and extend resizefs ignoring check
lvresize: inform about ignored args with --use-polices
lvresize: drop extra sigint handling
cleanup: code lines reodered
cleanup: add dots to some messages
cleanup: use display_lvname
lvresize: update lvresize_params struct
lvresize: check pvh list is vg->pvs
lvresize: pass only needed params to _fsadm_cmd
lvresize: do not pass struct cmd
lvresize: support more resized LVs
lvresize: return 0/1
cleanup: remove unused sizearg variable
cleanup: remove unused code
cleanup: drop setting lv_name
tests: aux prepare_thin_metadata
lvresize: fixes for recent commit
lvresize: let pass even protected names like _tmeta
cleanup: type cleanup
tests: smaller number of devs created to fill metadata
tests: update test for modified output
cleanup: clean warns from older gcc
7 years, 5 months
master - lvmetad: two phase vg_remove
by Alasdair Kergon
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=ff3c4ed1c0e2e5...
Commit: ff3c4ed1c0e2e554e9fb5f9b7ff20fdf5a835852
Parent: a7c45ddc59449fa8b1823bcab31e3fbb64fed97c
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Jun 8 16:02:45 2016 -0500
Committer: Alasdair G Kergon <agk(a)redhat.com>
CommitterDate: Tue Jun 28 02:30:36 2016 +0100
lvmetad: two phase vg_remove
Apply the same idea as vg_update.
Before doing the VG remove on disk, invalidate
the VG in lvmetad. After the VG is removed,
remove the VG in lvmetad. If the command fails
after removing the VG on disk, but before removing
the VG metadata from lvmetad, then a subsequent
command will see the INVALID flag and not use the
stale metadata from lvmetad.
---
lib/cache/lvmetad.c | 36 ++++++++++++++++++++++++++++++++----
lib/cache/lvmetad.h | 6 ++++--
lib/metadata/metadata.c | 8 ++++++--
3 files changed, 42 insertions(+), 8 deletions(-)
diff --git a/lib/cache/lvmetad.c b/lib/cache/lvmetad.c
index 784a529..94059dd 100644
--- a/lib/cache/lvmetad.c
+++ b/lib/cache/lvmetad.c
@@ -648,10 +648,9 @@ static int _lvmetad_handle_reply(daemon_reply reply, const char *id, const char
action = "set VG info";
else if (!strcmp(id, "vg_update"))
action = "update VG";
- else if (!strcmp(id, "vg_remove")) {
+ else if (!strcmp(id, "vg_remove"))
action = "remove VG";
- action_modifies = 1;
- } else if (!strcmp(id, "pv_found")) {
+ else if (!strcmp(id, "pv_found")) {
action = "update PV";
action_modifies = 1;
} else if (!strcmp(id, "pv_gone")) {
@@ -1272,7 +1271,36 @@ int lvmetad_vg_update_finish(struct volume_group *vg)
return 1;
}
-int lvmetad_vg_remove(struct volume_group *vg)
+int lvmetad_vg_remove_pending(struct volume_group *vg)
+{
+ char uuid[64] __attribute__((aligned(8)));
+ daemon_reply reply;
+
+ if (!lvmetad_used() || test_mode())
+ return 1; /* fake it */
+
+ if (!id_write_format(&vg->id, uuid, sizeof(uuid)))
+ return_0;
+
+ /* Sending version/seqno 0 in set_vg_info will set the INVALID flag. */
+
+ log_debug_lvmetad("Sending lvmetad pending remove VG %s", vg->name);
+ reply = _lvmetad_send(vg->cmd, "set_vg_info",
+ "name = %s", vg->name,
+ "uuid = %s", uuid,
+ "version = %d", 0,
+ NULL);
+
+ if (!_lvmetad_handle_reply(reply, "set_vg_info", vg->name, NULL)) {
+ daemon_reply_destroy(reply);
+ return_0;
+ }
+
+ daemon_reply_destroy(reply);
+ return 1;
+}
+
+int lvmetad_vg_remove_finish(struct volume_group *vg)
{
char uuid[64];
daemon_reply reply;
diff --git a/lib/cache/lvmetad.h b/lib/cache/lvmetad.h
index 9ee4e2d..d47ce41 100644
--- a/lib/cache/lvmetad.h
+++ b/lib/cache/lvmetad.h
@@ -86,7 +86,8 @@ int lvmetad_vg_update_finish(struct volume_group *vg);
* only needed during vgremove, which does not wipe PV labels and therefore
* cannot mark the PVs as gone.
*/
-int lvmetad_vg_remove(struct volume_group *vg);
+int lvmetad_vg_remove_pending(struct volume_group *vg);
+int lvmetad_vg_remove_finish(struct volume_group *vg);
/*
* Notify lvmetad that a PV has been found. It is not an error if the PV is
@@ -174,7 +175,8 @@ void lvmetad_clear_disabled(struct cmd_context *cmd);
# define lvmetad_vg_update(vg) (1)
# define lvmetad_vg_update_pending(vg) (1)
# define lvmetad_vg_update_finish(vg) (1)
-# define lvmetad_vg_remove(vg) (1)
+# define lvmetad_vg_remove_pending(vg) (1)
+# define lvmetad_vg_remove_finish(vg) (1)
# define lvmetad_pv_found(cmd, pvid, dev, fmt, label_sector, vg, found_vgnames, changed_vgnames) (1)
# define lvmetad_pv_gone(devno, pv_name) (1)
# define lvmetad_pv_gone_by_dev(dev) (1)
diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
index e100bc6..d8bb726 100644
--- a/lib/metadata/metadata.c
+++ b/lib/metadata/metadata.c
@@ -568,6 +568,11 @@ int vg_remove_direct(struct volume_group *vg)
struct pv_list *pvl;
int ret = 1;
+ if (!lvmetad_vg_remove_pending(vg)) {
+ log_error("Failed to update lvmetad for pending remove.");
+ return 0;
+ }
+
if (!vg_remove_mdas(vg)) {
log_error("vg_remove_mdas %s failed", vg->name);
return 0;
@@ -599,8 +604,7 @@ int vg_remove_direct(struct volume_group *vg)
}
}
- /* FIXME Handle partial failures from above. */
- if (!lvmetad_vg_remove(vg))
+ if (!lvmetad_vg_remove_finish(vg))
stack;
lockd_vg_update(vg);
7 years, 5 months