[NEW PATCH] Related to BZ#726400 - ISO domain on block is not supported. (via gerrit-bot)
by ewarszaw@redhat.com
New patch submitted by Eduardo Warszawski (ewarszaw(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/831
commit 0512921f276b788656b48987cf8a7480cefcb158
Author: Eduardo Warszawski <ewarszaw(a)redhat.com>
Date: Wed Aug 17 18:23:45 2011 +0300
Related to BZ#726400 - ISO domain on block is not supported.
Change-Id: I02639f9da4a978f018ee719025483e90447a140b
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 78ec42b..36645c1 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -909,11 +909,6 @@ class BlockStorageDomain(sd.StorageDomain):
src = lvm.lvPath(self.sdUUID, lvName)
os.symlink(src, dst)
- # create special imageUUID for ISO/Floppy volumes
- isoPath = os.path.join(imagesPath, sd.ISO_IMAGE_UUID)
- if self.isISO():
- fileUtils.createdir(isoPath)
-
def extendVolume(self, volumeUUID, size, isShuttingDown=None):
self._extendlock.acquire()
try:
12 years, 1 month
[NEW PATCH] BZ#723579 - Disown child procs so they wouldn't hold VDSM shutdown (via gerrit-bot)
by smizrahi@redhat.com
New patch submitted by Saggi Mizrahi (smizrahi(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/863
commit b705b2249e4635a1e3f3c4feeede53081c8a6a52
Author: Saggi Mizrahi <smizrahi(a)redhat.com>
Date: Mon Aug 29 16:13:24 2011 +0300
BZ#723579 - Disown child procs so they wouldn't hold VDSM shutdown
This patch touches a protected variable in the python multiprocessing
framework. This is not ideal but necessary to keep VDSM from getting
stuck waiting for a child.
Change-Id: I7b96b6ac5b35e28cb2f8232f580651a3a36db295
diff --git a/vdsm/storage/processPool.py b/vdsm/storage/processPool.py
index fc843c4..6c6632d 100644
--- a/vdsm/storage/processPool.py
+++ b/vdsm/storage/processPool.py
@@ -18,7 +18,7 @@
# Refer to the README and COPYING files for full details of the license
#
-from multiprocessing import Pipe, Process
+from multiprocessing import Pipe, Process, current_process
from threading import Lock
import os
import signal
@@ -111,6 +111,11 @@ class ProcessPool(object):
# The locks remain locked of purpose so no one will
# be able to run further commands
+def disown(proc):
+ # I know touching _children is wrong but there is no public API for
+ # disowning a child
+ current_process()._children.discard(proc)
+
class Helper(object):
def __init__(self):
self.lifeline, childsLifeline = os.pipe()
@@ -118,6 +123,8 @@ class Helper(object):
self.proc = Process(target=_helperMainLoop, args=(hisPipe, childsLifeline, self.lifeline))
self.proc.daemon = True
self.proc.start()
+ disown(self.proc)
+
os.close(childsLifeline)
def kill(self):
12 years, 1 month
[NEW PATCH] BZ#732416- Introducing deleteMultipleVolumes (via gerrit-bot)
by ewarszaw@redhat.com
New patch submitted by Eduardo Warszawski (ewarszaw(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/881
commit ff6df4423c30fb6507b1f6e864247e5a3c4262da
Author: Eduardo Warszawski <ewarszaw(a)redhat.com>
Date: Mon Aug 29 18:59:01 2011 +0300
BZ#732416- Introducing deleteMultipleVolumes
Change-Id: I0d8bbff8d364ef85d62237dd13caede1e60f3449
diff --git a/vdsm/storage/blockVolume.py b/vdsm/storage/blockVolume.py
index 9d2e0d7..9b51d99 100644
--- a/vdsm/storage/blockVolume.py
+++ b/vdsm/storage/blockVolume.py
@@ -648,3 +648,33 @@ def _getVolumeTag(sdUUID, volUUID, tagPrefix):
return tag[len(tagPrefix):]
raise se.MissingTagOnLogicalVolume(volUUID, tagPrefix)
+
+def _postZero(sdUUID, volumes):
+ #Assumed here that the volume is active.
+ #To activate all the volumes of an image at once get its resource.
+ #See http://gerrit.usersys.redhat.com/771
+ #Assert volumes are writable. (Don't do this at home.)
+ lvNames = (vol.volUUID for vol in volumes)
+ try:
+ lvm._lvminfo._changelv(sdUUID, lvNames, "--permission", "rw")
+ except se.StorageException, e:
+ #Hope this only means that some volumes were already writable
+ pass
+ for lv in lvm.getLV(sdUUID):
+ if lv.name in lvNames:
+ # wipe out the whole volume
+ try:
+ misc.ddWatchCopy("/dev/zero", lvm.lvPath(sdUUID, lv.name), vars.task.aborting, int(lv.size),
+ recoveryCallback=volume.baseAsyncTasksRollback)
+ except se.ActionStopped, e:
+ raise e
+ except Exception, e:
+ raise se.VolumesZeroingError(lv.name)
+
+def deleteMultipleVolumes(sdUUID, volumes, postZero):
+ "Delete multiple volumes (LVs) in the same domain (VG)."""
+ if postZero:
+ _postZero(sdUUID, volumes)
+ lvNames = (vol.volUUID for vol in volumes)
+ lvm.removeLVs(sdUUID, lvNames)
+
diff --git a/vdsm/storage/fileVolume.py b/vdsm/storage/fileVolume.py
index f6aae79..c16d68c 100644
--- a/vdsm/storage/fileVolume.py
+++ b/vdsm/storage/fileVolume.py
@@ -23,6 +23,7 @@ import uuid
import storage_exception as se
from sdf import StorageDomainFactory as SDF
+import outOfProcess as oop
import volume
import image
import sd
@@ -31,12 +32,25 @@ import misc
import task
from threadLocal import vars
-
def getDomUuidFromVolumePath(volPath):
# Volume path has pattern:
# /rhev/data-center/spUUID/sdUUID/images/imgUUID/volUUID
return volPath.split('/')[4]
+
+def deleteMultipleVolumes(sdUUID, volumes, postZero):
+ #Posix asserts that the blocks will be zeroed before reuse
+ volPaths = []
+ for vol in volumes:
+ vol.setLegality(volume.ILLEGAL_VOL)
+ volPaths.append(vol.getVolumePath())
+ try:
+ oop.fileUtils.cleanupfiles(volPaths)
+ except OSError:
+ volume.log.error("cannot delete some volumes at paths: %s",
+ volPaths, exc_info=True)
+
+
class FileVolume(volume.Volume):
""" Actually represents a single volume (i.e. part of virtual disk).
"""
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index b97c69f..a68f168 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -207,12 +207,13 @@ class Image:
volumes = [volclass(self.repoPath, sdUUID, imgUUID, volUUID) for volUUID in uuidlist]
# If we got here than go ahead and remove all of them without mercy
- for vol in volumes:
+ if volumes:
try:
- vol.delete(postZero=postZero, force=True)
- except Exception, ex:
- # Volume deletion failed, but we don't really care at this point
- self.log.warn("Problems during image %s deletion (%s). Continue...", imgUUID, str(ex))
+ #No, this is not a classmethod! No validations here
+ volclass.__module__.deleteMultipleVolumes(sdUUID, volumes, postZero)
+ except (se.CannotRemoveLogicalVolume, se.VolumeAccessError):
+ #Any volume deletion failed, but we don't really care at this point
+ self.log.warn("Problems during image %s deletion (%s). Continue...", exc_info=True)
# Now clean the image directory
removedImage = imageDir = self.getImageDir(sdUUID, imgUUID)
diff --git a/vdsm/storage/spm.py b/vdsm/storage/spm.py
index 112b69f..dd20909 100644
--- a/vdsm/storage/spm.py
+++ b/vdsm/storage/spm.py
@@ -1519,6 +1519,12 @@ class SPM:
hsm.HSM.getPool(spUUID) #Validates that the pool is connected. WHY?
hsm.HSM.validateSdUUID(sdUUID)
+ #Need this resource to induce all the LVs in the image to be active
+ #at once if zeroed.
+ #See http://gerrit.usersys.redhat.com/771
+ if postZero:
+ vars.task.getSharedLock(STORAGE, imgUUID)
+
vars.task.getSharedLock(STORAGE, sdUUID)
# Do not validate if forced.
repoPath = os.path.join(self.storage_repository, spUUID)
12 years, 1 month
[NEW PATCH] BZ#732245, BZ#732269 - Handle EAGAIN and EINTR in AsyncProc.communicate (via gerrit-bot)
by smizrahi@redhat.com
New patch submitted by Saggi Mizrahi (smizrahi(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/865
commit 77cbfcc06bc83280d21478a16c4a531de8491e2d
Author: Saggi Mizrahi <smizrahi(a)redhat.com>
Date: Tue Aug 30 11:14:25 2011 +0300
BZ#732245, BZ#732269 - Handle EAGAIN and EINTR in AsyncProc.communicate
Change-Id: I2e0f8494826e9cb0debf8a9d3c9a591ac883b376
diff --git a/vdsm/storage/misc.py b/vdsm/storage/misc.py
index b16df9b..79fc3cc 100644
--- a/vdsm/storage/misc.py
+++ b/vdsm/storage/misc.py
@@ -855,7 +855,16 @@ class AsyncProc(object):
# trun on only if data is waiting to be pushed
self._poller.modify(self._fdin, select.EPOLLOUT)
- for fd, event in self._poller.poll(1):
+ pollres = None
+ while pollres is None:
+ try:
+ pollres = self._poller.poll(1)
+ except OSError as e:
+ if e.errno in (errno.EINTR, errno.EAGAIN):
+ continue
+ raise
+
+ for fd, event in pollres:
stream = self._fdMap[fd]
if event & select.EPOLLOUT and self._stdin.len > 0:
buff = self._stdin.read(BUFFSIZE)
12 years, 1 month
[NEW PATCH] Don't include vdsClient in tarball (via gerrit-bot)
by smizrahi@redhat.com
New patch submitted by Saggi Mizrahi (smizrahi(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/868
commit e5f64d372cc802fae3b7d633f90701974ccd553a
Author: Saggi Mizrahi <smizrahi(a)redhat.com>
Date: Tue Aug 30 16:14:04 2011 +0300
Don't include vdsClient in tarball
Change-Id: Ia4996be04a14fcb54a5874556f3c19331aaf03a5
diff --git a/vdsm_cli/Makefile.am b/vdsm_cli/Makefile.am
index b9cf6a8..a8e9f87 100644
--- a/vdsm_cli/Makefile.am
+++ b/vdsm_cli/Makefile.am
@@ -6,7 +6,7 @@
# LICENSE_GPL_v2 which accompany this distribution.
#
-dist_bin_SCRIPTS = \
+nodist_bin_SCRIPTS = \
vdsClient
dist_vdsm_DATA = \
12 years, 1 month
[NEW PATCH] BZ#732765 - Don't return empty strings for 'disktotal'/'diskfree' on unreachable domains during getStoragePoolInfo (via gerrit-bot)
by Igor Lvovsky
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/862
commit d9e65876c7e35379a535762505692001c2b7387a
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Mon Aug 29 12:54:56 2011 +0300
BZ#732765 - Don't return empty strings for 'disktotal'/'diskfree' on unreachable domains during getStoragePoolInfo
Change-Id: I64ae55b4942cf5748d3d8ada445a3381498ea258
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 4fbf95f..b381936 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -1072,11 +1072,13 @@ class StoragePool:
try:
stats.update(SDF.produce(item).getStats())
except:
- self.log.error("Could get information for domain `%s`", item, exc_info=True)
+ self.log.error("Could not get information for domain %s", item, exc_info=True)
# Domain is unavailable and we have nothing in the cache
- # Return defaults
- stats['disktotal'] = ""
- stats['diskfree'] = ""
+ try:
+ del(stats['disktotal'])
+ del(stats['diskfree'])
+ except KeyError:
+ pass
stats['alerts'] = alerts
stats['status'] = domDict[item]
12 years, 1 month
[NEW PATCH] Fix new killall (via gerrit-bot)
by smizrahi@redhat.com
New patch submitted by Saggi Mizrahi (smizrahi(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/871
commit 3797a10d6e037eb39e6587ee5f0a4c3d65579009
Author: Saggi Mizrahi <smizrahi(a)redhat.com>
Date: Wed Aug 31 14:00:03 2011 +0300
Fix new killall
Change-Id: I42b7f49f4ca14b22d343b99763613700c246f33b
diff --git a/vdsm/storage/misc.py b/vdsm/storage/misc.py
index b16df9b..247211d 100644
--- a/vdsm/storage/misc.py
+++ b/vdsm/storage/misc.py
@@ -1224,11 +1224,11 @@ class OperationMutex(object):
self._lock.release()
self._cond.notifyAll()
-def killall(signum, name, group=False):
+def killall(name, signum, group=False):
exception = None
knownPgs = set()
pidList = pgrep(name)
- if len(pidList):
+ if len(pidList) == 0:
raise OSError(errno.ESRCH, "Could not find processes named `%s`" % name)
for pid in pidList:
@@ -1240,7 +1240,7 @@ def killall(signum, name, group=False):
continue
knownPgs.add(pgid)
- os.killpg(pgid)
+ os.killpg(pgid, signum)
else:
os.kill(pid, signum)
except OSError, e:
12 years, 1 month
[NEW PATCH] BZ#705058 - Don't create a new pool object on storage refresh (via gerrit-bot)
by Yotam Oron
New patch submitted by Yotam Oron (yoron(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/796
commit 75f3a8102d4dad02727ea160f4afb71ae0c9c880
Author: Yotam Oron <yoron(a)redhat.com>
Date: Sun Aug 7 18:22:10 2011 +0300
BZ#705058 - Don't create a new pool object on storage refresh
Use the standard interface for pool connection and avoid double pool creation
Change-Id: I40d86257c239736c74d715775bcc27cab4613125
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index d9d8e21..f7ec4cc 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -170,10 +170,9 @@ class HSM:
poolPath = os.path.join(self.storage_repository, spUUID)
try:
if os.path.exists(poolPath):
- with rmanager.acquireResource(STORAGE, spUUID, rm.LockType.exclusive):
- self._restorePool(spUUID)
- #TODO Once we support simultaneous connection to multiple pools, remove following line (break)
- break
+ self.public_connectStoragePool(spUUID, None, None, None, None, setDefaulException=False)
+ #TODO Once we support simultaneous connection to multiple pools, remove following line (break)
+ break
except Exception:
self.log.error("Unexpected error", exc_info=True)
@@ -394,15 +393,6 @@ class HSM:
pool.hsmMailer.flushMessages()
- def _restorePool(self, spUUID):
- self.log.info("RESTOREPOOL: %s", spUUID)
- pool = sp.StoragePool(spUUID)
- if pool.reconnect():
- self.pools[spUUID] = pool
- return True
- self.log.info("RESTOREPOOL: %s reconnect failed", spUUID)
-
-
def public_createStoragePool(self, poolType, spUUID, poolName, masterDom, domList, masterVersion, lockPolicy=None, lockRenewalIntervalSec=None, leaseTimeSec=None, ioOpTimeoutSec=None, leaseRetries=None, options = None):
"""
Create new storage pool with single/multiple image data domain.
@@ -484,7 +474,8 @@ class HSM:
return sp.StoragePool(spUUID).create(poolName, masterDom, domList, masterVersion, safeLease)
- def public_connectStoragePool(self, spUUID, hostID, scsiKey, msdUUID, masterVersion, options = None):
+ def public_connectStoragePool(self, spUUID, hostID, scsiKey, msdUUID, masterVersion, options = None,
+ setDefaulException=True):
"""
Connect a Host to a specific storage pool.
@@ -504,13 +495,13 @@ class HSM:
:raises: :exc:`storage_exception.ConnotConnectMultiplePools` when storage pool is not connected to the system.
"""
- vars.task.setDefaultException(
- se.StoragePoolConnectionError("spUUID=%s, msdUUID=%s, masterVersion=%s, " \
- "hostID=%s, scsiKey=%s" % (str(spUUID), str(msdUUID),
- str(masterVersion), str(hostID), str(scsiKey))
+ if setDefaulException:
+ vars.task.setDefaultException(
+ se.StoragePoolConnectionError("spUUID=%s, msdUUID=%s, masterVersion=%s, " \
+ "hostID=%s, scsiKey=%s" % (str(spUUID), str(msdUUID),
+ str(masterVersion), str(hostID), str(scsiKey))
+ )
)
- )
- misc.validateN(hostID, 'hostID')
misc.validateUUID(spUUID, 'spUUID')
# TBD: To support multiple pool connection on single host,
@@ -523,25 +514,32 @@ class HSM:
except se.StoragePoolUnknown:
pass #pool not connected yet
else:
- vars.task.getSharedLock(STORAGE, spUUID)
- pool = self.getPool(spUUID)
- pool.verifyMasterDomain(msdUUID=msdUUID, masterVersion=masterVersion)
- return
-
- vars.task.getExclusiveLock(STORAGE, spUUID)
- try:
- pool = self.getPool(spUUID)
- except se.StoragePoolUnknown:
- pass #pool not connected yet
- else:
- pool.verifyMasterDomain(msdUUID=msdUUID, masterVersion=masterVersion)
- return
+ with rmanager.acquireResource(STORAGE, spUUID, rm.LockType.shared):
+ pool = self.getPool(spUUID)
+ if not msdUUID or not masterVersion:
+ hostID, scsiKey, msdUUID, masterVersion = pool.getPoolParams()
+ misc.validateN(hostID, 'hostID')
+ pool.verifyMasterDomain(msdUUID=msdUUID, masterVersion=masterVersion)
+ return
+
+ with rmanager.acquireResource(STORAGE, spUUID, rm.LockType.exclusive):
+ try:
+ pool = self.getPool(spUUID)
+ except se.StoragePoolUnknown:
+ pass #pool not connected yet
+ else:
+ if not msdUUID or not masterVersion:
+ hostID, scsiKey, msdUUID, masterVersion = pool.getPoolParams()
+ pool.verifyMasterDomain(msdUUID=msdUUID, masterVersion=masterVersion)
+ return
- pool = sp.StoragePool(spUUID)
- res = pool.connect(hostID, scsiKey, msdUUID, masterVersion)
- if res:
- self.pools[spUUID] = pool
- return res
+ pool = sp.StoragePool(spUUID)
+ if not hostID or not scsiKey or not msdUUID or not masterVersion:
+ hostID, scsiKey, msdUUID, masterVersion = pool.getPoolParams()
+ res = pool.connect(hostID, scsiKey, msdUUID, masterVersion)
+ if res:
+ self.pools[spUUID] = pool
+ return res
def public_disconnectStoragePool(self, spUUID, hostID, scsiKey, remove=False, options = None):
"""
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 8d2eebb..9461de0 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -400,31 +400,22 @@ class StoragePool:
return True
- def reconnect(self):
- self.log.info("Trying to reconnect to pool: %s" % self.spUUID)
- try:
- file = open(self._poolFile, "r")
- for line in file:
- pair = line.strip().split("=")
- if len(pair) == 2:
- if pair[0] == "id":
- hostId = int(pair[1])
- elif pair[0] == "scsiKey":
- scsiKey = pair[1]
- elif pair[0] == "sdUUID":
- msdUUID = pair[1]
- elif pair[0] == "version":
- masterVersion = pair[1]
- file.close()
- if not (hostId and scsiKey and msdUUID and masterVersion):
- os.unlink(self._poolFile)
- return False
- if self.connect(hostId, scsiKey, msdUUID, masterVersion):
- return True
- except:
- self.log.error("RECONNECT: Failed: %s", self.spUUID, exc_info=True)
- os.unlink(self._poolFile)
- raise
+ def getPoolParams(self):
+ file = open(self._poolFile, "r")
+ for line in file:
+ pair = line.strip().split("=")
+ if len(pair) == 2:
+ if pair[0] == "id":
+ hostId = int(pair[1])
+ elif pair[0] == "scsiKey":
+ scsiKey = pair[1]
+ elif pair[0] == "sdUUID":
+ msdUUID = pair[1]
+ elif pair[0] == "version":
+ masterVersion = pair[1]
+ file.close()
+
+ return hostId, scsiKey, msdUUID, masterVersion
def createMaster(self, poolName, domain, masterVersion, leaseParams):
12 years, 1 month