Dan Kenigsberg has uploaded a new change for review.
Change subject: Deprecate volume mtime
......................................................................
Deprecate volume mtime
In pre-historic 2.y days, Engine used volume's "mtime" attribute to
deduces the ancestry of a volume. See for example BZ#513396.
In case of an error reading the metadata, "0" or "" was returned.
This was a bad heuristic that was fixed to using proper pointer to
parents and children.
Engine 3.y continues to use the mtime field only when importing ancient
images that lack proper creation time in their OVF.
Maintaining this attribure requires costly readings of volume metadata.
This patch drops the mtime-related code. For backward compatibility with
old Vdsms and Engines that may expect the existance of this attribute,
we keep writing "0" to the metadata and reporting "0" to Engine.
Change-Id: I38b0a636222fa74125d25f0c3c9ea5b3e5701565
Signed-off-by: Dan Kenigsberg <danken(a)redhat.com>
---
M vdsm/storage/blockVolume.py
M vdsm/storage/fileVolume.py
M vdsm/storage/volume.py
M vdsm_api/vdsmapi-schema.json
4 files changed, 3 insertions(+), 29 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/04/20504/1
diff --git a/vdsm/storage/blockVolume.py b/vdsm/storage/blockVolume.py
index 923f93a..20c6f75 100644
--- a/vdsm/storage/blockVolume.py
+++ b/vdsm/storage/blockVolume.py
@@ -623,17 +623,6 @@
getVolumeTrueSize = getVolumeSize
- def getVolumeMtime(self):
- """
- Return the volume mtime in msec epoch
- """
- try:
- mtime = self.getMetaParam(volume.MTIME)
- except se.MetaDataKeyNotFoundError:
- mtime = 0
-
- return mtime
-
def _extendSizeRaw(self, newSize):
# Since this method relies on lvm.extendLV (lvextend) when the
# requested size is equal or smaller than the current size, the
diff --git a/vdsm/storage/fileVolume.py b/vdsm/storage/fileVolume.py
index 1ae90de..c9ec5a1 100644
--- a/vdsm/storage/fileVolume.py
+++ b/vdsm/storage/fileVolume.py
@@ -568,16 +568,6 @@
volPath = self.getVolumePath()
return int(int(self.oop.os.stat(volPath).st_blocks) * BLOCK_SIZE / bs)
- def getVolumeMtime(self):
- """
- Return the volume mtime in msec epoch
- """
- volPath = self.getVolumePath()
- try:
- return self.getMetaParam(volume.MTIME)
- except se.MetaDataKeyNotFoundError:
- return self.oop.os.stat(volPath).st_mtime
-
def _extendSizeRaw(self, newSize):
volPath = self.getVolumePath()
curSizeBytes = self.oop.os.stat(volPath).st_size
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index fc7a3e5..341a54d 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -807,9 +807,6 @@
self.updateInvalidatedSize()
try:
- # Mtime is the time of the last prepare for RW
- if rw:
- self.setMetaParam(MTIME, int(time.time()))
if justme:
return True
pvol = self.getParentVolume()
@@ -845,7 +842,7 @@
"domain": meta.get(DOMAIN, ""),
"image": self.getImage(),
"ctime": meta.get(CTIME, ""),
- "mtime": meta.get(MTIME, ""),
+ "mtime": "0",
"legality": meta.get(LEGALITY, ""),
}
@@ -864,7 +861,7 @@
IMAGE: str(imgUUID),
DESCRIPTION: str(desc),
PUUID: str(puuid),
- MTIME: int(time.time()),
+ MTIME: 0,
LEGALITY: str(legality),
}
@@ -888,13 +885,11 @@
avsize = self.getVolumeTrueSize(bs=1)
info['apparentsize'] = str(vsize)
info['truesize'] = str(avsize)
- info['mtime'] = self.getVolumeMtime()
info['status'] = "OK"
except se.StorageException as e:
self.log.debug("exception: %s:%s" % (str(e.message), str(e.value)))
info['apparentsize'] = "0"
info['truesize'] = "0"
- info['mtime'] = "0"
info['status'] = "INVALID"
# Both engine and dumpStorageTable don't use this option so
diff --git a/vdsm_api/vdsmapi-schema.json b/vdsm_api/vdsmapi-schema.json
index 73889d1..19a9191 100644
--- a/vdsm_api/vdsmapi-schema.json
+++ b/vdsm_api/vdsmapi-schema.json
@@ -6465,7 +6465,7 @@
#
# @ctime: The Volume creation time in seconds since the epoch
#
-# @mtime: The Volume modification time in seconds since the epoch
+# @mtime: Deprecated
#
# @legality: Indicates whether the volume is legal to use
#
--
To view, visit http://gerrit.ovirt.org/20504
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I38b0a636222fa74125d25f0c3c9ea5b3e5701565
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Dan Kenigsberg <danken(a)redhat.com>
Antoni Segura Puimedon has uploaded a new change for review.
Change subject: ipwrapper: Introduce the linkPool
......................................................................
ipwrapper: Introduce the linkPool
The linkPool is a Link object pool that is kept updated with the
events from ip monitor link (with a daemon thread).
TODO: Find out why setting the ip monitor process as daemon has no
effect and leaves orphan processes when exiting the interpreter.
Change-Id: I63e0e7e6938709b57509fce2efcd3d36d8bd1eb8
Signed-off-by: Antoni S. Puimedon <asegurap(a)redhat.com>
---
M lib/vdsm/ipwrapper.py
1 file changed, 62 insertions(+), 8 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/16/22116/1
diff --git a/lib/vdsm/ipwrapper.py b/lib/vdsm/ipwrapper.py
index 394e9a4..9183a7c 100644
--- a/lib/vdsm/ipwrapper.py
+++ b/lib/vdsm/ipwrapper.py
@@ -20,7 +20,9 @@
from collections import namedtuple
from glob import iglob
import errno
+import logging
import os
+import threading
from netaddr.core import AddrFormatError
from netaddr import IPAddress
@@ -80,7 +82,7 @@
_hiddenNics = config.get('vars', 'hidden_nics').split(',')
_hiddenVlans = config.get('vars', 'hidden_vlans').split(',')
- def __init__(self, address, index, linkType, mtu, name, qdisc, state,
+ def __init__(self, address, index, linkType, mtu, name, state, qdisc=None,
vlanid=None, vlanprotocol=None, master=None, **kwargs):
self.address = address
self.index = index
@@ -144,6 +146,14 @@
name, device = attrs['name'].split('@')
attrs['name'] = name
attrs['device'] = device
+ return cls(**attrs)
+
+ @classmethod
+ def fromEvent(cls, event):
+ """Creates a Link object from an Event object."""
+ attrs = event.attrs
+ if 'linkType' not in event.attrs:
+ attrs['linkType'] = cls._detectType(event.device)
return cls(**attrs)
@staticmethod
@@ -550,7 +560,7 @@
MonitorEvent = namedtuple('MonitorEvent', ['index', 'device', 'flags',
- 'state'])
+ 'state', 'attrs'])
class Monitor(object):
@@ -574,7 +584,9 @@
raise StopIteration
elif self.proc.returncode is None:
for line in self.proc.stdout:
- yield self._parseLine(line)
+ event = self._parseLine(line)
+ if event:
+ yield event
else:
for event in self.events():
yield event
@@ -595,8 +607,14 @@
data = Link._parse(line)
# We can't get the type of a device no longer in the system
- if (state != cls.LINK_STATE_DELETED and
- not os.path.exists('/sys/class/net/' + data['name'])):
+ if state == cls.LINK_STATE_DELETED:
+ try:
+ link = linkPool[data['index']]
+ except KeyError:
+ # The link is not on the linkPool, no need for a delete event
+ return None
+ data['name'] = link.name
+ elif not os.path.exists('/sys/class/net/' + data['name']):
try:
data['name'] = _dev_get_by_index(data['index'])
except OSError as ose:
@@ -605,7 +623,8 @@
else:
raise
state = state if state or not 'state' in data else data['state']
- return MonitorEvent(data['index'], data['name'], data['flags'], state)
+ return MonitorEvent(data['index'], data['name'], data['flags'], state,
+ data)
@classmethod
def _parse(cls, text):
@@ -621,6 +640,41 @@
def _dev_get_by_index(ifindex):
for filepath in iglob('/sys/class/net/*/ifindex'):
with open(filepath) as ifFile:
- if ifFile.read().strip() == ifindex:
- return filepath.split('/')[-2]
+ try:
+ if ifFile.read().strip() == ifindex:
+ return filepath.split('/')[-2]
+ except IOError as ioe:
+ if ioe.errno in (errno.ENOENT, errno.EINVAL):
+ # The device is probably being removed
+ continue
+ raise
+
raise OSError(errno.ENOENT, 'No device found for index ' + ifindex)
+
+
+linkPool = {}
+
+
+def _linkPoolUpdater():
+ mon = Monitor()
+ mon.start()
+ for event in mon:
+ if event.state == Monitor.LINK_STATE_DELETED:
+ try:
+ del linkPool[event.index]
+ except KeyError:
+ logging.debug('Failed to remove link pool device %s which was '
+ 'not on the pool', event.device)
+ elif event.state is not None: # filter out typical wlan heartbeats
+ try:
+ linkPool[event.index] = Link.fromEvent(event)
+ except TypeError:
+ raise
+
+
+# netinfo link pool handling
+for link in getLinks():
+ linkPool[link.index] = link
+linkPoolThread = threading.Thread(target=_linkPoolUpdater, name='linkPool')
+linkPoolThread.daemon = True
+linkPoolThread.start()
--
To view, visit http://gerrit.ovirt.org/22116
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I63e0e7e6938709b57509fce2efcd3d36d8bd1eb8
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Antoni Segura Puimedon <asegurap(a)redhat.com>
Hello Timothy Asir, Saggi Mizrahi, Aravinda VK, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/19674
to review the following change.
Change subject: gluster: interpret and use correct xml elements
......................................................................
gluster: interpret and use correct xml elements
xml output of rebalance and remove-brick status returns filesSkipped
in <skipped> tag and value of <statusStr> tag cleaned up to use it as
proper enum.
Change-Id: I9b90283ac2b9f4b54acfccffa02a30e38e5e9d85
Signed-off-by: Bala.FA <barumuga(a)redhat.com>
---
M vdsm/gluster/cli.py
1 file changed, 7 insertions(+), 4 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/74/19674/1
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index 8349ebe..38269d8 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -19,6 +19,7 @@
#
import xml.etree.cElementTree as etree
+import re
from vdsm import utils
from vdsm import netinfo
@@ -611,27 +612,29 @@
else:
return
+ st = re.sub('[ -]', '_', tree.find('aggregate/statusStr').text.upper())
status = {
'summary': {
'runtime': float(tree.find('aggregate/runtime').text),
'filesScanned': int(tree.find('aggregate/lookups').text),
'filesMoved': int(tree.find('aggregate/files').text),
'filesFailed': int(tree.find('aggregate/failures').text),
- 'filesSkipped': int(tree.find('aggregate/failures').text),
+ 'filesSkipped': int(tree.find('aggregate/skipped').text),
'totalSizeMoved': int(tree.find('aggregate/size').text),
- 'status': tree.find('aggregate/statusStr').text.upper()},
+ 'status': st},
'hosts': []}
for el in tree.findall('node'):
+ st = re.sub('[ -]', '_', el.find('statusStr').text.upper())
status['hosts'].append({'name': el.find('nodeName').text,
'id': el.find('id').text,
'runtime': float(el.find('runtime').text),
'filesScanned': int(el.find('lookups').text),
'filesMoved': int(el.find('files').text),
'filesFailed': int(el.find('failures').text),
- 'filesSkipped': int(el.find('failures').text),
+ 'filesSkipped': int(el.find('skipped').text),
'totalSizeMoved': int(el.find('size').text),
- 'status': el.find('statusStr').text.upper()})
+ 'status': st})
return status
--
To view, visit http://gerrit.ovirt.org/19674
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9b90283ac2b9f4b54acfccffa02a30e38e5e9d85
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Aravinda VK <avishwan(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Timothy Asir <tjeyasin(a)redhat.com>
Hello Bala.FA,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/22693
to review the following change.
Change subject: gluster: additional parsing of host UUID in verb glusterVolumesList
......................................................................
gluster: additional parsing of host UUID in verb glusterVolumesList
This patch adds parsing of host UUID for all the bricks in the
verb glusterVolumesList.
Change-Id: I9057f3aea0c0ed8fb4d4ec26eaf66fe80ec581e2
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1038988
Signed-off-by: ndarshan <dnarayan(a)redhat.com>
---
M vdsm/gluster/cli.py
1 file changed, 2 insertions(+), 2 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/93/22693/1
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index 167f01c..44d82dc 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -353,10 +353,10 @@
value['transportType'] = [TransportType.RDMA]
else:
value['transportType'] = [TransportType.TCP, TransportType.RDMA]
- value['bricks'] = []
+ value['bricks'] = {}
value['options'] = {}
for b in el.findall('bricks/brick'):
- value['bricks'].append(b.text)
+ value['bricks'][b.text] = b.get('uuid')
for o in el.findall('options/option'):
value['options'][o.find('name').text] = o.find('value').text
volumes[value['volumeName']] = value
--
To view, visit http://gerrit.ovirt.org/22693
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9057f3aea0c0ed8fb4d4ec26eaf66fe80ec581e2
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Darshan N <dnarayan(a)redhat.com>
Gerrit-Reviewer: Bala.FA <barumuga(a)redhat.com>
Federico Simoncelli has uploaded a new change for review.
Change subject: sp: refactor out the metadata access from StoragePool
......................................................................
sp: refactor out the metadata access from StoragePool
Change-Id: I75493d1db60e51cccd5231b516f963c970d24c99
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M debian/vdsm.install
M vdsm.spec.in
M vdsm/storage/Makefile.am
M vdsm/storage/hsm.py
M vdsm/storage/sp.py
A vdsm/storage/spbackends.py
6 files changed, 375 insertions(+), 280 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/32/22132/1
diff --git a/debian/vdsm.install b/debian/vdsm.install
index ef46ed3..43ffd5a 100644
--- a/debian/vdsm.install
+++ b/debian/vdsm.install
@@ -113,6 +113,7 @@
./usr/share/vdsm/storage/sdc.py
./usr/share/vdsm/storage/securable.py
./usr/share/vdsm/storage/sp.py
+./usr/share/vdsm/storage/spbackends.py
./usr/share/vdsm/storage/storageConstants.py
./usr/share/vdsm/storage/storageServer.py
./usr/share/vdsm/storage/storage_exception.py
diff --git a/vdsm.spec.in b/vdsm.spec.in
index f248b74..2a70661 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -926,6 +926,7 @@
%{_datadir}/%{vdsm_name}/storage/sd.py*
%{_datadir}/%{vdsm_name}/storage/securable.py*
%{_datadir}/%{vdsm_name}/storage/sp.py*
+%{_datadir}/%{vdsm_name}/storage/spbackends.py*
%{_datadir}/%{vdsm_name}/storage/storageConstants.py*
%{_datadir}/%{vdsm_name}/storage/storage_exception.py*
%{_datadir}/%{vdsm_name}/storage/storage_mailbox.py*
diff --git a/vdsm/storage/Makefile.am b/vdsm/storage/Makefile.am
index 2d3e9a6..09b40ad 100644
--- a/vdsm/storage/Makefile.am
+++ b/vdsm/storage/Makefile.am
@@ -57,6 +57,7 @@
sd.py \
securable.py \
sp.py \
+ spbackends.py \
storageConstants.py \
storage_exception.py \
storage_mailbox.py \
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 2ae508e..7ec4345 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -40,6 +40,7 @@
from vdsm.config import config
import sp
+import spbackends
import domainMonitor
import sd
import blockSD
@@ -614,7 +615,7 @@
def _getSpmStatusInfo(pool):
return dict(
zip(('spmStatus', 'spmLver', 'spmId'),
- (pool.spmRole,) + pool.getSpmStatus()))
+ (pool.spmRole,) + pool.backend.getSpmStatus()))
@public
def getSpmStatus(self, spUUID, options=None):
@@ -1035,6 +1036,7 @@
return True
pool = sp.StoragePool(spUUID, self.domainMonitor, self.taskMng)
+ pool.backend = spbackends.StoragePoolMetaBackend(pool)
# Must register domain state change callbacks *before* connecting
# the pool, which starts domain monitor threads. Otherwise we will
@@ -1820,8 +1822,8 @@
except:
domDict[d] = sd.validateSDDeprecatedStatus(status)
- return pool.reconstructMaster(hostId, poolName, masterDom, domDict,
- masterVersion, leaseParams)
+ return pool.backend.reconstructMaster(
+ hostId, poolName, masterDom, domDict, masterVersion, leaseParams)
def _logResp_getDeviceList(self, response):
logableDevs = deepcopy(response)
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index d550652..ce60b55 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -41,7 +41,6 @@
from vdsm.config import config
from sdc import sdCache
import storage_exception as se
-from persistentDict import DictValidator, unicodeEncoder, unicodeDecoder
from remoteFileHandler import Timeout
from securable import secured, unsecured
import image
@@ -52,14 +51,6 @@
import mount
POOL_MASTER_DOMAIN = 'mastersd'
-
-MAX_POOL_DESCRIPTION_SIZE = 50
-
-PMDK_DOMAINS = "POOL_DOMAINS"
-PMDK_POOL_DESCRIPTION = "POOL_DESCRIPTION"
-PMDK_LVER = "POOL_SPM_LVER"
-PMDK_SPM_ID = "POOL_SPM_ID"
-PMDK_MASTER_VER = "MASTER_VERSION"
rmanager = rm.ResourceManager.getInstance()
@@ -83,21 +74,6 @@
k, v = domDecl.split(':')
domList[k.strip("'")] = v.strip("'").capitalize()
return domList
-
-SP_MD_FIELDS = {
- # Key dec, enc
- PMDK_DOMAINS: (domainListDecoder, domainListEncoder),
- PMDK_POOL_DESCRIPTION: (unicodeDecoder, unicodeEncoder),
- PMDK_LVER: (int, str),
- PMDK_SPM_ID: (int, str),
- PMDK_MASTER_VER: (int, str)
-}
-
-# Calculate how many domains can be in the pool before overflowing the Metadata
-MAX_DOMAINS = blockSD.SD_METADATA_SIZE - blockSD.METADATA_BASE_SIZE
-MAX_DOMAINS -= MAX_POOL_DESCRIPTION_SIZE + sd.MAX_DOMAIN_DESCRIPTION_SIZE
-MAX_DOMAINS -= blockSD.PVS_METADATA_SIZE
-MAX_DOMAINS /= 48
@secured
@@ -130,65 +106,14 @@
self.domainMonitor = domainMonitor
self._upgradeCallback = partial(StoragePool._upgradePoolDomain,
proxy(self))
+ self.backend = None
def isSafe(self):
return self._safety.isSet()
- @unsecured
- def getSpmStatus(self):
- try:
- # XXX: in case the host id is not acquired yet we won't be
- # able to get the spm id (I should verify this) and the code
- # below would return SPM_ID_FREE. If that's the case and this
- # new behavior introduces a problem then we should prepend:
- # self.masterDomain.acquireHostId(self.id)
- # that could take a long time.
- lVer, spmId = self.masterDomain.inquireClusterLock()
- lVer, spmId = lVer or LVER_INVALID, spmId or SPM_ID_FREE
- except NotImplementedError:
- poolMeta = self._getPoolMD(self.masterDomain)
-
- # if we claim that we were the SPM (but we're currently not) we
- # have to make sure that we're not returning stale data
- if (poolMeta[PMDK_SPM_ID] == self.id
- and not self.spmRole == SPM_ACQUIRED):
- self.invalidateMetadata()
- poolMeta = self._getPoolMD(self.masterDomain)
-
- lVer, spmId = poolMeta[PMDK_LVER], poolMeta[PMDK_SPM_ID]
-
- return lVer, spmId
-
- def setSpmStatus(self, lVer=None, spmId=None):
- self.invalidateMetadata()
- metaParams = dict(filter(lambda (k, v): v is not None,
- ((PMDK_LVER, lVer), (PMDK_SPM_ID, spmId))))
- # this method must be secured (as it changes the pool metadata),
- # but since it is also used during the SPM status transition by
- # default we override the security for setMetaParams.
- # NOTE: this introduces a race when the method is used in the
- # secured mode, but generally you shouldn't need to call this at
- # any time.
- self.setMetaParams(metaParams, __securityOverride=True)
-
- @unsecured
- def getDomainsMap(self):
- self.invalidateMetadata()
- return self.getMetaParam(PMDK_DOMAINS)
-
- def setDomainsMap(self, domains):
- self.setMetaParam(PMDK_DOMAINS, domains)
-
def __del__(self):
if len(self.domainMonitor.poolMonitoredDomains) > 0:
threading.Thread(target=self.stopMonitoringDomains).start()
-
- @unsecured
- def forceFreeSpm(self):
- # DO NOT USE, STUPID, HERE ONLY FOR BC
- # TODO: SCSI Fence the 'lastOwner'
- self.setSpmStatus(LVER_INVALID, SPM_ID_FREE)
- self.spmRole = SPM_FREE
def _upgradePoolDomain(self, sdUUID, isValid):
# This method is called everytime the onDomainStateChange
@@ -260,7 +185,7 @@
continue
try:
- self.setDomainMasterRole(domain, sd.REGULAR_DOMAIN, 0)
+ self.backend.setDomainMasterRole(domain, sd.REGULAR_DOMAIN, 0)
except:
self.log.exception('Unable to set role for domain %s', sdUUID)
@@ -292,7 +217,7 @@
raise se.OperationInProgress("spm start %s" % self.spUUID)
self.updateMonitoringThreads()
- oldlver, oldid = self.getSpmStatus()
+ oldlver, oldid = self.backend.getSpmStatus()
masterDomVersion = self.getVersion()
# If no specific domain version was specified use current master
# domain version
@@ -324,7 +249,8 @@
try:
self.lver = int(oldlver) + 1
- self.setSpmStatus(self.lver, self.id, __securityOverride=True)
+ self.backend.setSpmStatus(self.lver, self.id,
+ __securityOverride=True)
self._maxHostID = maxHostID
# Upgrade the master domain now if needed
@@ -460,8 +386,8 @@
if not stopFailed:
try:
- self.setSpmStatus(spmId=SPM_ID_FREE,
- __securityOverride=True)
+ self.backend.setSpmStatus(spmId=SPM_ID_FREE,
+ __securityOverride=True)
except:
pass # The system can handle this inconsistency
@@ -532,22 +458,6 @@
# Cleanup links to domains under /rhev/datacenter/poolName
self.refresh(msdUUID, masterVersion)
- @unsecured
- def getMasterVersion(self, useMasterDomain=None):
- domain = (self.masterDomain
- if useMasterDomain is None else useMasterDomain)
- return self._getPoolMD(domain)[PMDK_MASTER_VER]
-
- def setDomainMasterRole(self, domain, role, masterVersion):
- poolMeta = self._getPoolMD(domain)
- # NOTE: the transaction here does not ensure the consistency between
- # the domain and pool metadata. For example if the role on the domain
- # has been changed and the pool metadata transaction fails then the
- # domain role is not reverted to the previous value.
- with poolMeta.transaction():
- poolMeta[PMDK_MASTER_VER] = masterVersion
- domain.changeRole(role)
-
# TODO: Remove or rename this function.
def validatePoolSD(self, sdUUID):
if sdUUID not in self.getDomains():
@@ -564,17 +474,6 @@
if self.spUUID not in dom.getPools():
raise se.StorageDomainNotInPool(self.spUUID, dom.sdUUID)
return True
-
- @unsecured
- def getMaximumSupportedDomains(self):
- msdInfo = self.masterDomain.getInfo()
- msdType = sd.name2type(msdInfo["type"])
- msdVersion = int(msdInfo["version"])
- if msdType in sd.BLOCK_DOMAIN_TYPES and \
- msdVersion in blockSD.VERS_METADATA_LV:
- return MAX_DOMAINS
- else:
- return config.getint("irs", "maximum_domains_in_pool")
@unsecured
def _acquireTemporaryClusterLock(self, msdUUID, leaseParams):
@@ -805,80 +704,13 @@
if not misc.isAscii(poolName) and not domain.supportsUnicode():
raise se.UnicodeArgumentException()
- futurePoolMD.update({
- PMDK_SPM_ID: SPM_ID_FREE,
- PMDK_LVER: LVER_INVALID,
- PMDK_MASTER_VER: masterVersion,
- PMDK_POOL_DESCRIPTION: poolName,
- PMDK_DOMAINS: {domain.sdUUID: sd.DOM_ACTIVE_STATUS}})
-
- @unsecured
- def reconstructMaster(self, hostId, poolName, msdUUID, domDict,
- masterVersion, leaseParams):
- self.log.info("spUUID=%s hostId=%s poolName=%s msdUUID=%s domDict=%s "
- "masterVersion=%s leaseparams=(%s)", self.spUUID, hostId,
- poolName, msdUUID, domDict, masterVersion, leaseParams)
-
- if msdUUID not in domDict:
- raise se.InvalidParameterException("masterDomain", msdUUID)
-
- futureMaster = sdCache.produce(msdUUID)
-
- # @deprecated, domain version < 3
- # For backward compatibility we must support a reconstructMaster
- # that doesn't specify an hostId.
- if not hostId:
- self._acquireTemporaryClusterLock(msdUUID, leaseParams)
- temporaryLock = True
- else:
- # Forcing to acquire the host id (if it's not acquired already).
- futureMaster.acquireHostId(hostId)
- futureMaster.acquireClusterLock(hostId)
-
- # The host id must be set for createMaster(...).
- self.id = hostId
- temporaryLock = False
-
- try:
- self.createMaster(poolName, futureMaster, masterVersion,
- leaseParams)
-
- for sdUUID in domDict:
- domDict[sdUUID] = domDict[sdUUID].capitalize()
-
- # Add domain to domain list in pool metadata.
- self.log.info("Set storage pool domains: %s", domDict)
- self._getPoolMD(futureMaster).update({PMDK_DOMAINS: domDict})
-
- self.refresh(msdUUID=msdUUID, masterVersion=masterVersion)
- finally:
- if temporaryLock:
- self._releaseTemporaryClusterLock(msdUUID)
- self.stopMonitoringDomains()
- else:
- futureMaster.releaseClusterLock()
-
- @unsecured
- def copyPoolMD(self, prevMd, newMD):
- prevPoolMD = self._getPoolMD(prevMd)
- domains = prevPoolMD[PMDK_DOMAINS]
- pool_descr = prevPoolMD[PMDK_POOL_DESCRIPTION]
- lver = prevPoolMD[PMDK_LVER]
- spmId = prevPoolMD[PMDK_SPM_ID]
- # This is actually domain metadata, But I can't change this because of
- # backward compatibility
- leaseParams = prevMd.getLeaseParams()
-
- # Now insert pool metadata into new mastersd metadata
-
- newPoolMD = self._getPoolMD(newMD)
- with newPoolMD.transaction():
- newPoolMD.update({
- PMDK_DOMAINS: domains,
- PMDK_POOL_DESCRIPTION: pool_descr,
- PMDK_LVER: lver,
- PMDK_SPM_ID: spmId})
- newMD.changeLeaseParams(leaseParams)
+# FIXME !!!!!!!!!!
+# futurePoolMD.update({
+# PMDK_SPM_ID: SPM_ID_FREE,
+# PMDK_LVER: LVER_INVALID,
+# PMDK_MASTER_VER: masterVersion,
+# PMDK_POOL_DESCRIPTION: poolName,
+# PMDK_DOMAINS: {domain.sdUUID: sd.DOM_ACTIVE_STATUS}})
@unsecured
def _copyLeaseParameters(self, srcDomain, dstDomain):
@@ -893,7 +725,7 @@
msdUUID)
# TODO: is this check still relevant?
- if not masterVersion > self.getMasterVersion():
+ if not masterVersion > self.backend.getMasterVersion():
raise se.StoragePoolWrongMaster(self.spUUID,
self.masterDomain.sdUUID)
@@ -938,7 +770,7 @@
self.log.error("Unexpected error", exc_info=True)
raise se.StorageDomainMasterCopyError(msdUUID)
- self.copyPoolMD(curmsd, newmsd)
+ self.backend.prepareNewMasterDomain(curmsd, newmsd)
path = newmsd.getMDPath()
if not path:
@@ -968,7 +800,8 @@
self.log.debug("masterMigrate - lease acquired successfully")
try:
- self.setDomainMasterRole(newmsd, sd.MASTER_DOMAIN, masterVersion)
+ self.backend.setDomainMasterRole(
+ newmsd, sd.MASTER_DOMAIN, masterVersion)
self.savePoolParams(self.id, newmsd.sdUUID, masterVersion)
except Exception:
self.log.error("Unexpected error", exc_info=True)
@@ -1014,7 +847,7 @@
if sdUUID in domains:
return True
- if len(domains) >= self.getMaximumSupportedDomains():
+ if len(domains) >= self.backend.getMaximumSupportedDomains():
raise se.TooManyDomainsInStoragePoolError()
try:
@@ -1040,7 +873,7 @@
dom.attach(self.spUUID)
domains[sdUUID] = sd.DOM_ATTACHED_STATUS
- self.setDomainsMap(domains)
+ self.backend.setDomainsMap(domains)
self._refreshDomainLinks(dom)
finally:
@@ -1060,7 +893,7 @@
del domains[sdUUID]
- self.setDomainsMap(domains)
+ self.backend.setDomainsMap(domains)
self._cleanupDomainLinks(sdUUID)
# If the domain that we are detaching is the master domain
@@ -1110,6 +943,23 @@
# Remove domain from pool metadata
self.forcedDetachSD(sdUUID)
+ def detachAllDomains(self):
+ """
+ Detach all domains from pool before destroying pool
+
+ Assumed cluster lock and that SPM is already stopped.
+ """
+ # Find regular (i.e. not master) domains from the pool metadata
+ regularDoms = tuple(sdUUID for sdUUID in self.getDomains()
+ if sdUUID != self.masterDomain.sdUUID)
+ # The Master domain should be detached last
+ for sdUUID in regularDoms:
+ self.detachSD(sdUUID)
+
+ # Forced detach master domain
+ self.forcedDetachSD(self.masterDomain.sdUUID)
+ self.masterDomain.detach(self.spUUID)
+
@unsecured
def _convertDomain(self, domain, targetFormat=None):
# Remember to get the sdUUID before upgrading because the object is
@@ -1149,7 +999,7 @@
# Domain conversion requires the links to be present
self._refreshDomainLinks(dom)
- self.setDomainMasterRole(dom, sd.REGULAR_DOMAIN, 0)
+ self.backend.setDomainMasterRole(dom, sd.REGULAR_DOMAIN, 0)
if dom.getDomainClass() == sd.DATA_DOMAIN:
self._convertDomain(dom)
@@ -1157,7 +1007,7 @@
dom.activate()
# set domains also do rebuild
domainStatuses[sdUUID] = sd.DOM_ACTIVE_STATUS
- self.setDomainsMap(domainStatuses)
+ self.backend.setDomainsMap(domainStatuses)
self.updateMonitoringThreads()
return True
@@ -1220,7 +1070,7 @@
"%s", masterDir, dom)
domList[sdUUID] = sd.DOM_ATTACHED_STATUS
- self.setDomainsMap(domList)
+ self.backend.setDomainsMap(domList)
self.updateMonitoringThreads()
@unsecured
@@ -1393,21 +1243,6 @@
if os.path.exists(os.path.join(vms, vmUUID)):
fileUtils.cleanupdir(os.path.join(vms, vmUUID))
- def setDescription(self, descr):
- """
- Set storage pool description.
- 'descr' - pool description
- """
- if len(descr) > MAX_POOL_DESCRIPTION_SIZE:
- raise se.StoragePoolDescriptionTooLongError()
-
- self.log.info("spUUID=%s descr=%s", self.spUUID, descr)
-
- if not misc.isAscii(descr) and not self.masterDomain.supportsUnicode():
- raise se.UnicodeArgumentException()
-
- self.setMetaParam(PMDK_POOL_DESCRIPTION, descr)
-
def extendVolume(self, sdUUID, volumeUUID, size, isShuttingDown=None):
# This method is not exposed through the remote API but it's called
# directly from the mailbox to implement the thin provisioning on
@@ -1423,27 +1258,6 @@
rm.LockType.exclusive):
return sdCache.produce(sdUUID) \
.produceVolume(imgUUID, volUUID).extendSize(int(newSize))
-
- @classmethod
- def _getPoolMD(cls, domain):
- # This might look disgusting but this makes it so that
- # This is the only intrusion needed to satisfy the
- # unholy union between pool and SD metadata
- return DictValidator(domain._metadata._dict, SP_MD_FIELDS)
-
- @property
- def _metadata(self):
- return self._getPoolMD(self.masterDomain)
-
- @unsecured
- def getDescription(self):
- try:
- return self.getMetaParam(PMDK_POOL_DESCRIPTION)
- # There was a bug that cause pool description to
- # disappear. Returning "" might be ugly but it keeps
- # everyone happy.
- except KeyError:
- return ""
@unsecured
def getVersion(self):
@@ -1461,25 +1275,22 @@
raise se.StoragePoolMasterNotFound(self.spUUID,
self.masterDomain.sdUUID)
- try:
- pmd = self._getPoolMD(self.masterDomain)
- except Exception:
- self.log.error("Pool metadata error", exc_info=True)
- raise se.StoragePoolActionError(self.spUUID)
+ lVer, spmId = self.backend.getSpmStatus()
poolInfo = {
'type': msdInfo['type'],
- 'name': pmd[PMDK_POOL_DESCRIPTION],
- 'domains': domainListEncoder(pmd[PMDK_DOMAINS]),
+ 'name': '',
+ 'domains': domainListEncoder(self.backend.getDomainsMap()),
'master_uuid': self.masterDomain.sdUUID,
- 'master_ver': pmd[PMDK_MASTER_VER],
- 'lver': pmd[PMDK_LVER],
- 'spm_id': pmd[PMDK_SPM_ID],
+ 'master_ver': self.backend.getMasterVersion(),
+ 'lver': lVer,
+ 'spm_id': spmId,
'pool_status': 'uninitialized',
'version': str(msdInfo['version']),
'isoprefix': '',
'pool_status': 'connected',
}
+
return poolInfo
@unsecured
@@ -1498,22 +1309,6 @@
if dom.isISO():
return dom
return None
-
- def setMetaParams(self, params):
- self._metadata.update(params)
-
- def setMetaParam(self, key, value):
- """
- Set key:value in pool metadata file
- """
- self._metadata[key] = value
-
- @unsecured
- def getMetaParam(self, key):
- """
- Get parameter from pool metadata file
- """
- return self._metadata[key]
@unsecured
def setMasterDomain(self, msdUUID, masterVersion):
@@ -1540,7 +1335,7 @@
" %s", msdUUID, self.spUUID)
raise se.StoragePoolWrongMaster(self.spUUID, msdUUID)
- version = self.getMasterVersion(useMasterDomain=domain)
+ version = self.backend.getMasterVersion(useMasterDomain=domain)
if version != int(masterVersion):
self.log.error("Requested master domain %s does not have expected "
"version %s it is version %s",
@@ -1551,11 +1346,6 @@
masterVersion)
self.masterDomain = domain
self.updateMonitoringThreads()
-
- @unsecured
- def invalidateMetadata(self):
- if not self.spmRole == SPM_ACQUIRED:
- self._metadata.invalidate()
@unsecured
@misc.samplingmethod
@@ -1589,7 +1379,7 @@
@unsecured
def getDomains(self, activeOnly=False):
return dict((sdUUID, status) for sdUUID, status
- in self.getDomainsMap().iteritems()
+ in self.backend.getDomainsMap().iteritems()
if not activeOnly or status == sd.DOM_ACTIVE_STATUS)
def checkBackupDomain(self):
@@ -1975,23 +1765,6 @@
self._maxHostID
self.spmMailer.setMaxHostID(maxID)
raise se.NotImplementedException
-
- def detachAllDomains(self):
- """
- Detach all domains from pool before destroying pool
-
- Assumed cluster lock and that SPM is already stopped.
- """
- # Find regular (i.e. not master) domains from the pool metadata
- regularDoms = tuple(sdUUID for sdUUID in self.getDomains()
- if sdUUID != self.masterDomain.sdUUID)
- # The Master domain should be detached last
- for sdUUID in regularDoms:
- self.detachSD(sdUUID)
-
- # Forced detach master domain
- self.forcedDetachSD(self.masterDomain.sdUUID)
- self.masterDomain.detach(self.spUUID)
def setVolumeDescription(self, sdUUID, imgUUID, volUUID, description):
self.validatePoolSD(sdUUID)
diff --git a/vdsm/storage/spbackends.py b/vdsm/storage/spbackends.py
new file mode 100644
index 0000000..e0f8013
--- /dev/null
+++ b/vdsm/storage/spbackends.py
@@ -0,0 +1,317 @@
+#
+# Copyright 2013 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+import logging
+import weakref
+
+import blockSD
+import misc
+import sd
+import storage_exception as se
+
+from persistentDict import DictValidator
+from persistentDict import unicodeDecoder
+from persistentDict import unicodeEncoder
+from sdc import sdCache
+from securable import secured
+from securable import unsecured
+from sp import LVER_INVALID
+from sp import SPM_ACQUIRED
+from sp import SPM_FREE
+from sp import SPM_ID_FREE
+from sp import domainListDecoder
+from sp import domainListEncoder
+from vdsm.config import config
+
+
+MAX_POOL_DESCRIPTION_SIZE = 50
+
+PMDK_DOMAINS = "POOL_DOMAINS"
+PMDK_POOL_DESCRIPTION = "POOL_DESCRIPTION"
+PMDK_LVER = "POOL_SPM_LVER"
+PMDK_SPM_ID = "POOL_SPM_ID"
+PMDK_MASTER_VER = "MASTER_VERSION"
+
+
+# Calculate how many domains can be in the pool before overflowing the Metadata
+MAX_DOMAINS = blockSD.SD_METADATA_SIZE - blockSD.METADATA_BASE_SIZE
+MAX_DOMAINS -= MAX_POOL_DESCRIPTION_SIZE + sd.MAX_DOMAIN_DESCRIPTION_SIZE
+MAX_DOMAINS -= blockSD.PVS_METADATA_SIZE
+MAX_DOMAINS /= 48
+
+
+SP_MD_FIELDS = {
+ # Key dec, enc
+ PMDK_DOMAINS: (domainListDecoder, domainListEncoder),
+ PMDK_POOL_DESCRIPTION: (unicodeDecoder, unicodeEncoder),
+ PMDK_LVER: (int, str),
+ PMDK_SPM_ID: (int, str),
+ PMDK_MASTER_VER: (int, str)
+}
+
+
+@secured
+class StoragePoolMetaBackend(object):
+
+ __slots__ = ('pool',)
+
+ log = logging.getLogger('Storage.StoragePoolMetaBackend')
+
+ def __init__(self, pool):
+ self.pool = weakref.proxy(pool)
+
+ ### Read-Only StoragePool Object Accessors ###
+
+ def isSafe(self):
+ return self.pool.isSafe()
+
+ @property
+ def id(self):
+ return self.pool.id
+
+ @property
+ def spmRole(self):
+ return self.pool.spmRole
+
+ @property
+ def spUUID(self):
+ return self.pool.spUUID
+
+ @property
+ def masterDomain(self):
+ return self.pool.masterDomain
+
+ ### StoragePool Abstract Methods Implementation ###
+
+ @unsecured
+ def getSpmStatus(self):
+ try:
+ # XXX: in case the host id is not acquired yet we won't be
+ # able to get the spm id (I should verify this) and the code
+ # below would return SPM_ID_FREE. If that's the case and this
+ # new behavior introduces a problem then we should prepend:
+ # self.masterDomain.acquireHostId(self.id)
+ # that could take a long time.
+ lVer, spmId = self.masterDomain.inquireClusterLock()
+ lVer, spmId = lVer or LVER_INVALID, spmId or SPM_ID_FREE
+ except NotImplementedError:
+ poolMeta = self._getPoolMD(self.masterDomain)
+
+ # if we claim that we were the SPM (but we're currently not) we
+ # have to make sure that we're not returning stale data
+ if (poolMeta[PMDK_SPM_ID] == self.id
+ and not self.spmRole == SPM_ACQUIRED):
+ self.invalidateMetadata()
+ poolMeta = self._getPoolMD(self.masterDomain)
+
+ lVer, spmId = poolMeta[PMDK_LVER], poolMeta[PMDK_SPM_ID]
+
+ return lVer, spmId
+
+ def setSpmStatus(self, lVer=None, spmId=None):
+ self.invalidateMetadata()
+ metaParams = dict(filter(lambda (k, v): v is not None,
+ ((PMDK_LVER, lVer), (PMDK_SPM_ID, spmId))))
+ # this method must be secured (as it changes the pool metadata),
+ # but since it is also used during the SPM status transition by
+ # default we override the security for setMetaParams.
+ # NOTE: this introduces a race when the method is used in the
+ # secured mode, but generally you shouldn't need to call this at
+ # any time.
+ self.setMetaParams(metaParams, __securityOverride=True)
+
+ @unsecured
+ def getDomainsMap(self):
+ self.invalidateMetadata()
+ return self.getMetaParam(PMDK_DOMAINS)
+
+ def setDomainsMap(self, domains):
+ self.setMetaParam(PMDK_DOMAINS, domains)
+
+ @unsecured
+ def getMaximumSupportedDomains(self):
+ msdInfo = self.masterDomain.getInfo()
+ msdType = sd.name2type(msdInfo["type"])
+ msdVersion = int(msdInfo["version"])
+ if msdType in sd.BLOCK_DOMAIN_TYPES and \
+ msdVersion in blockSD.VERS_METADATA_LV:
+ return MAX_DOMAINS
+ else:
+ return config.getint("irs", "maximum_domains_in_pool")
+
+ @unsecured
+ def getMasterVersion(self, useMasterDomain=None):
+ domain = (self.masterDomain
+ if useMasterDomain is None else useMasterDomain)
+ return self._getPoolMD(domain)[PMDK_MASTER_VER]
+
+ def setDomainMasterRole(self, domain, role, masterVersion):
+ poolMeta = self._getPoolMD(domain)
+ # NOTE: the transaction here does not ensure the consistency between
+ # the domain and pool metadata. For example if the role on the domain
+ # has been changed and the pool metadata transaction fails then the
+ # domain role is not reverted to the previous value.
+ with poolMeta.transaction():
+ poolMeta[PMDK_MASTER_VER] = masterVersion
+ domain.changeRole(role)
+
+ @unsecured
+ def prepareNewMasterDomain(self, prevMd, newMD):
+ prevPoolMD = self._getPoolMD(prevMd)
+ domains = prevPoolMD[PMDK_DOMAINS]
+ pool_descr = prevPoolMD[PMDK_POOL_DESCRIPTION]
+ lver = prevPoolMD[PMDK_LVER]
+ spmId = prevPoolMD[PMDK_SPM_ID]
+ # This is actually domain metadata, But I can't change this because of
+ # backward compatibility
+ leaseParams = prevMd.getLeaseParams()
+
+ # Now insert pool metadata into new mastersd metadata
+
+ newPoolMD = self._getPoolMD(newMD)
+ with newPoolMD.transaction():
+ newPoolMD.update({
+ PMDK_DOMAINS: domains,
+ PMDK_POOL_DESCRIPTION: pool_descr,
+ PMDK_LVER: lver,
+ PMDK_SPM_ID: spmId})
+ newMD.changeLeaseParams(leaseParams)
+
+ ### StoragePool Overridden Methods ###
+
+ @unsecured
+ def getInfo(self):
+ poolInfo = super(StoragePoolMetaBackend, self).getInfo()
+
+ # XXX: in the previous implementation the
+ poolInfo.update({'name': self.getDescription()})
+ return poolInfo
+
+ ### Backend Specific Methods ###
+
+ @unsecured
+ def forceFreeSpm(self):
+ # DO NOT USE, STUPID, HERE ONLY FOR BC
+ # TODO: SCSI Fence the 'lastOwner'
+ self.setSpmStatus(LVER_INVALID, SPM_ID_FREE)
+ self.spmRole = SPM_FREE
+
+ @classmethod
+ def _getPoolMD(cls, domain):
+ # This might look disgusting but this makes it so that
+ # This is the only intrusion needed to satisfy the
+ # unholy union between pool and SD metadata
+ return DictValidator(domain._metadata._dict, SP_MD_FIELDS)
+
+ @property
+ def _metadata(self):
+ return self._getPoolMD(self.masterDomain)
+
+ @unsecured
+ def getMetaParam(self, key):
+ """
+ Get parameter from pool metadata file
+ """
+ return self._metadata[key]
+
+ def setMetaParams(self, params):
+ self._metadata.update(params)
+
+ def setMetaParam(self, key, value):
+ """
+ Set key:value in pool metadata file
+ """
+ self._metadata[key] = value
+
+ @unsecured
+ def getDescription(self):
+ try:
+ return self.getMetaParam(PMDK_POOL_DESCRIPTION)
+ # There was a bug that cause pool description to
+ # disappear. Returning "" might be ugly but it keeps
+ # everyone happy.
+ except KeyError:
+ return ""
+
+ def setDescription(self, descr):
+ """
+ Set storage pool description.
+ 'descr' - pool description
+ """
+ if len(descr) > MAX_POOL_DESCRIPTION_SIZE:
+ raise se.StoragePoolDescriptionTooLongError()
+
+ self.log.info("spUUID=%s descr=%s", self.spUUID, descr)
+
+ if not misc.isAscii(descr) and not self.masterDomain.supportsUnicode():
+ raise se.UnicodeArgumentException()
+
+ self.setMetaParam(PMDK_POOL_DESCRIPTION, descr)
+
+ @unsecured
+ def invalidateMetadata(self):
+ if not self.spmRole == SPM_ACQUIRED:
+ self._metadata.invalidate()
+
+ @unsecured
+ def reconstructMaster(self, hostId, poolName, msdUUID, domDict,
+ masterVersion, leaseParams):
+ self.log.info("spUUID=%s hostId=%s poolName=%s msdUUID=%s domDict=%s "
+ "masterVersion=%s leaseparams=(%s)", self.spUUID, hostId,
+ poolName, msdUUID, domDict, masterVersion, leaseParams)
+
+ if msdUUID not in domDict:
+ raise se.InvalidParameterException("masterDomain", msdUUID)
+
+ futureMaster = sdCache.produce(msdUUID)
+
+ # @deprecated, domain version < 3
+ # For backward compatibility we must support a reconstructMaster
+ # that doesn't specify an hostId.
+ if not hostId:
+ self.pool._acquireTemporaryClusterLock(msdUUID, leaseParams)
+ temporaryLock = True
+ else:
+ # Forcing to acquire the host id (if it's not acquired already).
+ futureMaster.acquireHostId(hostId)
+ futureMaster.acquireClusterLock(hostId)
+
+ # The host id must be set for createMaster(...).
+ self.id = hostId
+ temporaryLock = False
+
+ try:
+ self.pool.createMaster(poolName, futureMaster, masterVersion,
+ leaseParams)
+
+ for sdUUID in domDict:
+ domDict[sdUUID] = domDict[sdUUID].capitalize()
+
+ # Add domain to domain list in pool metadata.
+ self.log.info("Set storage pool domains: %s", domDict)
+ self._getPoolMD(futureMaster).update({PMDK_DOMAINS: domDict})
+
+ self.pool.refresh(msdUUID=msdUUID, masterVersion=masterVersion)
+ finally:
+ if temporaryLock:
+ self.pool._releaseTemporaryClusterLock(msdUUID)
+ self.pool.stopMonitoringDomains()
+ else:
+ futureMaster.releaseClusterLock()
--
To view, visit http://gerrit.ovirt.org/22132
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I75493d1db60e51cccd5231b516f963c970d24c99
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>