Allon Mureinik has uploaded a new change for review. Change subject: Revert "spbackends: Remove StoragePoolDiskBackend" ......................................................................
Revert "spbackends: Remove StoragePoolDiskBackend" This reverts commit 6049cecd583997c34f98c78cbe318f0476315ff3. That commit did not not properly handle creating new storage pools, and needs to be reverted. Change-Id: I1c3df9b3d8ddf4b243b43d520bafa34117daaae2 Signed-off-by: Allon Mureinik <amure...@redhat.com> --- M lib/api/vdsm-api.yml M lib/vdsm/rpc/bindingxmlrpc.py M vdsm/API.py M vdsm/storage/hsm.py M vdsm/storage/spbackends.py 5 files changed, 239 insertions(+), 13 deletions(-) git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/56/60256/1 diff --git a/lib/api/vdsm-api.yml b/lib/api/vdsm-api.yml index de658b8..eeade4e 100644 --- a/lib/api/vdsm-api.yml +++ b/lib/api/vdsm-api.yml @@ -8256,7 +8256,9 @@ name: masterVersion type: int - - description: The Storage Domain statuses + - defaultvalue: null + description: The Storage Domain statuses. If this argument is present + the Storage Pool will use the memory backend (no metadata). name: domainDict added: '3.4' type: *StorageDomainStatusMap diff --git a/lib/vdsm/rpc/bindingxmlrpc.py b/lib/vdsm/rpc/bindingxmlrpc.py index f3eb288..1d31033 100644 --- a/lib/vdsm/rpc/bindingxmlrpc.py +++ b/lib/vdsm/rpc/bindingxmlrpc.py @@ -800,7 +800,7 @@ return image.reconcileVolumeChain(leafUUID) def poolConnect(self, spUUID, hostID, scsiKey, msdUUID, masterVersion, - domainsMap, options=None): + domainsMap=None, options=None): pool = API.StoragePool(spUUID) return pool.connect(hostID, scsiKey, msdUUID, masterVersion, domainsMap) diff --git a/vdsm/API.py b/vdsm/API.py index 60fb7a2..fe9e429 100644 --- a/vdsm/API.py +++ b/vdsm/API.py @@ -1114,7 +1114,7 @@ # scsiKey not used def connect(self, hostID, scsiKey, masterSdUUID, masterVersion, - domainDict): + domainDict=None): return self._irs.connectStoragePool( self._UUID, hostID, masterSdUUID, masterVersion, domainDict) diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py index 55cb866..7d834c3 100644 --- a/vdsm/storage/hsm.py +++ b/vdsm/storage/hsm.py @@ -58,6 +58,7 @@ import sp from spbackends import MAX_POOL_DESCRIPTION_SIZE, MAX_DOMAINS +from spbackends import StoragePoolDiskBackend from spbackends import StoragePoolMemoryBackend import monitor import sd @@ -948,14 +949,14 @@ vars.task.getExclusiveLock(STORAGE, dom) pool = sp.StoragePool(spUUID, self.domainMonitor, self.taskMng) - pool.setBackend(StoragePoolMemoryBackend(pool)) + pool.setBackend(StoragePoolDiskBackend(pool)) return pool.create(poolName, masterDom, domList, masterVersion, leaseParams) @public def connectStoragePool(self, spUUID, hostID, msdUUID, masterVersion, - domainsMap, options=None): + domainsMap=None, options=None): """ Connect a Host to a specific storage pool. @@ -992,15 +993,21 @@ "hostId=%s, newHostId=%s" % (pool.id, hostId)) if domainsMap is None: - raise se.InvalidParameterException("domainsMap", domainsMap) - - pool.getBackend().updateVersionAndDomains( - masterVersion, domainsMap) + if not isinstance(pool.getBackend(), StoragePoolDiskBackend): + raise se.StoragePoolConnected('Cannot downgrade pool backend') + else: + if isinstance(pool.getBackend(), StoragePoolMemoryBackend): + pool.getBackend().updateVersionAndDomains( + masterVersion, domainsMap) + else: + # Live pool backend upgrade + pool.setBackend( + StoragePoolMemoryBackend(pool, masterVersion, domainsMap)) pool.refresh(msdUUID, masterVersion) def _connectStoragePool(self, spUUID, hostID, msdUUID, - masterVersion, domainsMap, options=None): + masterVersion, domainsMap=None, options=None): misc.validateUUID(spUUID, 'spUUID') # TBD: To support multiple pool connection on single host, @@ -1034,8 +1041,13 @@ return True pool = sp.StoragePool(spUUID, self.domainMonitor, self.taskMng) - pool.setBackend( - StoragePoolMemoryBackend(pool, masterVersion, domainsMap)) + pool.backend = StoragePoolDiskBackend(pool) + + if domainsMap is None: + pool.setBackend(StoragePoolDiskBackend(pool)) + else: + pool.setBackend( + StoragePoolMemoryBackend(pool, masterVersion, domainsMap)) res = pool.connect(hostID, msdUUID, masterVersion) if res: @@ -1890,7 +1902,7 @@ pool = self.getPool(spUUID) except se.StoragePoolUnknown: pool = sp.StoragePool(spUUID, self.domainMonitor, self.taskMng) - pool.setBackend(StoragePoolMemoryBackend(pool)) + pool.setBackend(StoragePoolDiskBackend(pool)) else: raise se.StoragePoolConnected(spUUID) @@ -3319,6 +3331,10 @@ se.SpmFenceError("spUUID=%s, lastOwner=%s, lastLver=%s" % (spUUID, lastOwner, lastLver))) pool = self.getPool(spUUID) + if isinstance(pool.getBackend(), StoragePoolDiskBackend): + pool.getBackend().invalidateMetadata() + vars.task.getExclusiveLock(STORAGE, spUUID) + pool.getBackend().forceFreeSpm() return dict(spm_st=self._getSpmStatusInfo(pool)) @public diff --git a/vdsm/storage/spbackends.py b/vdsm/storage/spbackends.py index cf227f8..505a988 100644 --- a/vdsm/storage/spbackends.py +++ b/vdsm/storage/spbackends.py @@ -22,6 +22,8 @@ import weakref from vdsm.storage import exception as se +from vdsm.storage import misc +from vdsm.storage.persistent import DictValidator from vdsm.storage.persistent import unicodeDecoder from vdsm.storage.persistent import unicodeEncoder from vdsm.storage.securable import secured @@ -31,6 +33,8 @@ import sd from sp import LVER_INVALID +from sp import SPM_ACQUIRED +from sp import SPM_FREE from sp import SPM_ID_FREE from vdsm.config import config @@ -192,6 +196,209 @@ @secured +class StoragePoolDiskBackend(StoragePoolBackendInterface): + + __slots__ = ('pool',) + + log = logging.getLogger('Storage.StoragePoolDiskBackend') + + def __init__(self, pool): + self.pool = weakref.proxy(pool) + + # Read-Only StoragePool Object Accessors ### + + def __is_secure__(self): + return self.pool.isSecure() + + @property + def id(self): + return self.pool.id + + @property + def spmRole(self): + return self.pool.spmRole + + @property + def spUUID(self): + return self.pool.spUUID + + @property + def masterDomain(self): + return self.pool.masterDomain + + # StoragePool Backend Interface Implementation ### + + @unsecured + def getSpmStatus(self): + poolMeta = self._getPoolMD(self.masterDomain) + + # if we claim that we were the SPM (but we're currently not) we + # have to make sure that we're not returning stale data + if (poolMeta[PMDK_SPM_ID] == self.id + and not self.spmRole == SPM_ACQUIRED): + self.invalidateMetadata() + poolMeta = self._getPoolMD(self.masterDomain) + + return poolMeta[PMDK_LVER], poolMeta[PMDK_SPM_ID] + + def setSpmStatus(self, lVer=None, spmId=None): + self.invalidateMetadata() + metaParams = dict(filter(lambda kv: kv[1] is not None, + ((PMDK_LVER, lVer), (PMDK_SPM_ID, spmId)))) + self._metadata.update(metaParams) + + @unsecured + def getDomainsMap(self): + # The assumption is that whenever the storage pool metadata changes + # the HSM hosts will receive refreshStoragePool (and the metadata will + # be invalidated). So the invalidation in this method may be redundant + # or it was introduced to handle negative flows (missed refresh call). + # Anyway I think that we could get rid of this in the future, provided + # that the engine handles/resends failed refreshStoragePool calls. + self.invalidateMetadata() + return self.getMetaParam(PMDK_DOMAINS) + + def setDomainsMap(self, domains): + self.setMetaParam(PMDK_DOMAINS, domains) + + @unsecured + def getMaximumSupportedDomains(self): + msdInfo = self.masterDomain.getInfo() + msdType = sd.name2type(msdInfo["type"]) + msdVersion = int(msdInfo["version"]) + if msdType in sd.BLOCK_DOMAIN_TYPES and \ + msdVersion in blockSD.VERS_METADATA_LV: + return MAX_DOMAINS + else: + return config.getint("irs", "maximum_domains_in_pool") + + @unsecured + def getMasterVersion(self): + return self.getMetaParam(PMDK_MASTER_VER) + + @unsecured + def validateMasterDomainVersion(self, masterDomain, masterVersion): + version = self._getPoolMD(masterDomain)[PMDK_MASTER_VER] + if version != int(masterVersion): + self.log.error("Requested master domain %s does not have expected " + "version %s it is version %s", + masterDomain.sdUUID, masterVersion, version) + raise se.StoragePoolWrongMaster(self.spUUID, masterDomain.sdUUID) + + # TODO: evaluate if it is possible to remove this from the backends and + # just use domain.changeRole(...) in the StoragePool class + def setDomainRegularRole(self, domain): + poolMetadata = self._getPoolMD(domain) + # TODO: consider to remove the transaction (and this method as well) + # since setting the version to 0 may be useless. + with poolMetadata.transaction(): + poolMetadata[PMDK_MASTER_VER] = 0 + domain.changeRole(sd.REGULAR_DOMAIN) + + @unsecured + def initParameters(self, poolName, domain, masterVersion): + self._getPoolMD(domain).update({ + PMDK_SPM_ID: SPM_ID_FREE, + PMDK_LVER: LVER_INVALID, + PMDK_MASTER_VER: masterVersion, + PMDK_POOL_DESCRIPTION: poolName, + PMDK_DOMAINS: {domain.sdUUID: sd.DOM_ACTIVE_STATUS}, + }) + + def switchMasterDomain(self, curMasterDomain, newMasterDomain, + newMasterVersion): + curPoolMD = self._getPoolMD(curMasterDomain) + newPoolMD = self._getPoolMD(newMasterDomain) + + newPoolMD.update({ + PMDK_DOMAINS: curPoolMD[PMDK_DOMAINS], + PMDK_POOL_DESCRIPTION: curPoolMD[PMDK_POOL_DESCRIPTION], + PMDK_LVER: curPoolMD[PMDK_LVER], + PMDK_SPM_ID: curPoolMD[PMDK_SPM_ID], + PMDK_MASTER_VER: newMasterVersion, + }) + + @unsecured + def getInfo(self): + try: + pmd = self._getPoolMD(self.masterDomain) + except Exception: + self.log.error("Pool metadata error", exc_info=True) + raise se.StoragePoolActionError(self.spUUID) + + return { + 'name': pmd[PMDK_POOL_DESCRIPTION], + 'domains': _domainListEncoder(pmd[PMDK_DOMAINS]), + 'master_ver': pmd[PMDK_MASTER_VER], + 'lver': pmd[PMDK_LVER], + 'spm_id': pmd[PMDK_SPM_ID], + } + + # Backend Specific Methods + + @unsecured + def forceFreeSpm(self): + # DO NOT USE, STUPID, HERE ONLY FOR BC + # TODO: SCSI Fence the 'lastOwner' + self.setSpmStatus(LVER_INVALID, SPM_ID_FREE, __securityOverride=True) + self.pool.spmRole = SPM_FREE + + @classmethod + def _getPoolMD(cls, domain): + # This might look disgusting but this makes it so that + # This is the only intrusion needed to satisfy the + # unholy union between pool and SD metadata + return DictValidator(domain._metadata._dict, SP_MD_FIELDS) + + @property + def _metadata(self): + return self._getPoolMD(self.masterDomain) + + @unsecured + def getMetaParam(self, key): + """ + Get parameter from pool metadata file + """ + return self._metadata[key] + + def setMetaParam(self, key, value): + """ + Set key:value in pool metadata file + """ + self._metadata[key] = value + + @unsecured + def getDescription(self): + try: + return self.getMetaParam(PMDK_POOL_DESCRIPTION) + # There was a bug that cause pool description to + # disappear. Returning "" might be ugly but it keeps + # everyone happy. + except KeyError: + return "" + + def setDescription(self, descr): + """ + Set storage pool description. + 'descr' - pool description + """ + if len(descr) > MAX_POOL_DESCRIPTION_SIZE: + raise se.StoragePoolDescriptionTooLongError() + + self.log.info("spUUID=%s descr=%s", self.spUUID, descr) + + if not misc.isAscii(descr) and not self.masterDomain.supportsUnicode(): + raise se.UnicodeArgumentException() + + self.setMetaParam(PMDK_POOL_DESCRIPTION, descr) + + @unsecured + def invalidateMetadata(self): + if not self.spmRole == SPM_ACQUIRED: + self._metadata.invalidate() + + +@secured class StoragePoolMemoryBackend(StoragePoolBackendInterface): __slots__ = ('pool', 'masterVersion', 'domainsMap') @@ -219,6 +426,7 @@ @unsecured def getSpmStatus(self): + # FIXME: unify with StoragePoolDiskBackend lVer, spmId = self.masterDomain.inquireClusterLock() return lVer or LVER_INVALID, spmId or SPM_ID_FREE -- To view, visit https://gerrit.ovirt.org/60256 To unsubscribe, visit https://gerrit.ovirt.org/settings Gerrit-MessageType: newchange Gerrit-Change-Id: I1c3df9b3d8ddf4b243b43d520bafa34117daaae2 Gerrit-PatchSet: 1 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Allon Mureinik <amure...@redhat.com> _______________________________________________ vdsm-patches mailing list vdsm-patches@lists.fedorahosted.org https://lists.fedorahosted.org/admin/lists/vdsm-patches@lists.fedorahosted.org