Nir Soffer has uploaded a new change for review.

Change subject: resourcemanager: Use new module api
......................................................................

resourcemanager: Use new module api

Replace client code to use the resourceManager module directly instead
of using ResourceManager.getInstance().

After this patch only the resourceManager and resourceFactories are
using the old getInstance api.

Tests that used to mock modules "rmanager" variable are mocking the "rm"
variable, and storagefakelib.FakeResourceManager implements now the
resourceManager module api instead of the
resourceManager.ResourceManager class. This seems to be the quickest
solution for keeping these tests working, but we may use differnet way
later.

Change-Id: Ib33d9fad90551a0c2e9b1e8a89c5f9f920681c6b
Signed-off-by: Nir Soffer <nsof...@redhat.com>
---
M tests/storage_sdm_copy_data_test.py
M tests/storage_sdm_create_volume_test.py
M tests/storagefakelib.py
M vdsm/storage/blockSD.py
M vdsm/storage/blockVolume.py
M vdsm/storage/hsm.py
M vdsm/storage/image.py
M vdsm/storage/sd.py
M vdsm/storage/sdm/api/create_volume.py
M vdsm/storage/sp.py
M vdsm/storage/volume.py
11 files changed, 73 insertions(+), 91 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/35/65035/1

diff --git a/tests/storage_sdm_copy_data_test.py 
b/tests/storage_sdm_copy_data_test.py
index db4d99e..d60936a 100644
--- a/tests/storage_sdm_copy_data_test.py
+++ b/tests/storage_sdm_copy_data_test.py
@@ -82,7 +82,7 @@
             with MonkeyPatchScope([
                 (guarded, 'context', fake_guarded_context()),
                 (storage.sdm.api.copy_data, 'sdCache', env.sdcache),
-                (blockVolume, 'rmanager', rm),
+                (blockVolume, 'rm', rm),
             ]):
                 src_vols = make_qemu_chain(env, size, src_fmt, chain_length)
                 dst_vols = make_qemu_chain(env, size, dst_fmt, chain_length)
diff --git a/tests/storage_sdm_create_volume_test.py 
b/tests/storage_sdm_create_volume_test.py
index 9c9295f..0fb7e4b 100644
--- a/tests/storage_sdm_create_volume_test.py
+++ b/tests/storage_sdm_create_volume_test.py
@@ -103,7 +103,7 @@
     @contextmanager
     def _fake_env(self):
         self.rm = FakeResourceManager()
-        with MonkeyPatchScope([(storage.sdm.api.create_volume, 'rmanager',
+        with MonkeyPatchScope([(storage.sdm.api.create_volume, 'rm',
                                 self.rm)]):
             yield
 
diff --git a/tests/storagefakelib.py b/tests/storagefakelib.py
index 7ea0cee..3fff119 100644
--- a/tests/storagefakelib.py
+++ b/tests/storagefakelib.py
@@ -30,6 +30,7 @@
 from vdsm import utils
 
 from storage import lvm as real_lvm
+from storage import resourceManager as rm
 
 
 class FakeLVM(object):
@@ -278,6 +279,9 @@
 
 class FakeResourceManager(object):
 
+    SHARED = rm.SHARED
+    EXCLUSIVE = rm.EXCLUSIVE
+
     @recorded
     @contextmanager
     def acquireResource(self, *args, **kwargs):
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 0f094fd..f39d41a 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -862,15 +862,13 @@
         """
         sd.StorageDomain._registerResourceNamespaces(self)
 
-        rmanager = rm.ResourceManager.getInstance()
         # Register lvm activation resource namespace for the underlying VG
         lvmActivationFactory = resourceFactories.LvmActivationFactory(
             self.sdUUID)
         lvmActivationNamespace = sd.getNamespace(sc.LVM_ACTIVATION_NAMESPACE,
                                                  self.sdUUID)
         try:
-            rmanager.registerNamespace(lvmActivationNamespace,
-                                       lvmActivationFactory)
+            rm.registerNamespace(lvmActivationNamespace, lvmActivationFactory)
         except KeyError:
             self.log.info("Resource namespace %s already registered",
                           lvmActivationNamespace)
diff --git a/vdsm/storage/blockVolume.py b/vdsm/storage/blockVolume.py
index 6d68d26..c6d128a 100644
--- a/vdsm/storage/blockVolume.py
+++ b/vdsm/storage/blockVolume.py
@@ -60,7 +60,6 @@
 RESERVED_LEASES = 100
 
 log = logging.getLogger('storage.Volume')
-rmanager = rm.ResourceManager.getInstance()
 
 
 class BlockVolumeManifest(volume.VolumeManifest):
@@ -372,8 +371,8 @@
         if setrw:
             self.setrw(rw=rw)
         access = rm.EXCLUSIVE if rw else rm.SHARED
-        activation = rmanager.acquireResource(self.lvmActivationNamespace,
-                                              self.volUUID, access)
+        activation = rm.acquireResource(self.lvmActivationNamespace,
+                                        self.volUUID, access)
         activation.autoRelease = False
 
     @classmethod
@@ -387,7 +386,7 @@
                      % (sdUUID, volUUID, justme))
         lvmActivationNamespace = sd.getNamespace(sc.LVM_ACTIVATION_NAMESPACE,
                                                  sdUUID)
-        rmanager.releaseResource(lvmActivationNamespace, volUUID)
+        rm.releaseResource(lvmActivationNamespace, volUUID)
         if not justme:
             try:
                 pvolUUID = getVolumeTag(sdUUID, volUUID, sc.TAG_PREFIX_PARENT)
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 106eb27..e1d6d1e 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -94,9 +94,6 @@
     logUtils.logcall, "dispatcher", "Run and protect: %s",
     resPattern="Run and protect: %(name)s, Return response: %(result)s")
 
-rmanager = rm.ResourceManager.getInstance()
-
-
 STORAGE_CONNECTION_DIR = os.path.join(constants.P_VDSM_LIB, "connections/")
 
 QEMU_READABLE_TIMEOUT = 30
@@ -337,8 +334,7 @@
         :type defExcFun: function
         """
         self._ready = False
-        rm.ResourceManager.getInstance().registerNamespace(
-            STORAGE, rm.SimpleResourceFactory())
+        rm.registerNamespace(STORAGE, rm.SimpleResourceFactory())
         self.storage_repository = config.get('irs', 'repository')
         self.taskMng = taskManager.TaskManager()
 
@@ -976,7 +972,7 @@
                 "spUUID=%s, msdUUID=%s, masterVersion=%s, hostID=%s, "
                 "domainsMap=%s" %
                 (spUUID, msdUUID, masterVersion, hostID, domainsMap)))
-        with rmanager.acquireResource(STORAGE, HSM_DOM_MON_LOCK, rm.EXCLUSIVE):
+        with rm.acquireResource(STORAGE, HSM_DOM_MON_LOCK, rm.EXCLUSIVE):
             return self._connectStoragePool(
                 spUUID, hostID, msdUUID, masterVersion, domainsMap)
 
@@ -1014,7 +1010,7 @@
         except se.StoragePoolUnknown:
             pass  # pool not connected yet
         else:
-            with rmanager.acquireResource(STORAGE, spUUID, rm.SHARED):
+            with rm.acquireResource(STORAGE, spUUID, rm.SHARED):
                 # FIXME: this breaks in case of a race as it assumes that the
                 # pool is still available. At the moment we maintain this
                 # behavior as it's inherited from the previous implementation
@@ -1024,7 +1020,7 @@
                                         masterVersion, domainsMap)
                 return True
 
-        with rmanager.acquireResource(STORAGE, spUUID, rm.EXCLUSIVE):
+        with rm.acquireResource(STORAGE, spUUID, rm.EXCLUSIVE):
             try:
                 pool = self.getPool(spUUID)
             except se.StoragePoolUnknown:
@@ -1090,7 +1086,7 @@
 
     def _disconnectPool(self, pool, hostID, remove):
         pool.validateNotSPM()
-        with rmanager.acquireResource(STORAGE, HSM_DOM_MON_LOCK, rm.EXCLUSIVE):
+        with rm.acquireResource(STORAGE, HSM_DOM_MON_LOCK, rm.EXCLUSIVE):
             res = pool.disconnect()
             del self.pools[pool.spUUID]
         return res
@@ -1814,8 +1810,7 @@
         repoPath = os.path.join(self.storage_repository, sdDom.getPools()[0])
 
         imageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID)
-        with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
-                                      rm.SHARED):
+        with rm.acquireResource(imageResourcesNamespace, imgUUID, rm.SHARED):
             image.Image(repoPath).syncVolumeChain(sdUUID, imgUUID, volUUID,
                                                   newChain)
 
@@ -3235,7 +3230,7 @@
         for sdUUID in activeDoms:
             dom = sdCache.produce(sdUUID=sdUUID)
             if dom.isData():
-                with rmanager.acquireResource(STORAGE, sdUUID, rm.SHARED):
+                with rm.acquireResource(STORAGE, sdUUID, rm.SHARED):
                     try:
                         imgs = dom.getAllImages()
                     except se.StorageDomainDoesNotExist:
@@ -3464,7 +3459,7 @@
     @deprecated
     @public
     def startMonitoringDomain(self, sdUUID, hostID, options=None):
-        with rmanager.acquireResource(STORAGE, HSM_DOM_MON_LOCK, rm.EXCLUSIVE):
+        with rm.acquireResource(STORAGE, HSM_DOM_MON_LOCK, rm.EXCLUSIVE):
             # Note: We cannot raise here StorageDomainIsMemberOfPool, as it
             # will break old hosted engine agent.
             self.domainMonitor.startMonitoring(sdUUID, int(hostID), False)
@@ -3472,7 +3467,7 @@
     @deprecated
     @public
     def stopMonitoringDomain(self, sdUUID, options=None):
-        with rmanager.acquireResource(STORAGE, HSM_DOM_MON_LOCK, rm.EXCLUSIVE):
+        with rm.acquireResource(STORAGE, HSM_DOM_MON_LOCK, rm.EXCLUSIVE):
             if sdUUID in self.domainMonitor.poolDomains:
                 raise se.StorageDomainIsMemberOfPool(sdUUID)
             self.domainMonitor.stopMonitoring([sdUUID])
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index ccaf971..e98ca9b 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -43,7 +43,6 @@
 import resourceManager as rm
 
 log = logging.getLogger('storage.Image')
-rmanager = rm.ResourceManager.getInstance()
 
 # Disk type
 UNKNOWN_DISK_TYPE = 0
@@ -376,8 +375,7 @@
         dstImageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE,
                                                      destDom.sdUUID)
         # In destination domain we need to lock image's template if exists
-        with rmanager.acquireResource(dstImageResourcesNamespace, pimg,
-                                      rm.SHARED) \
+        with rm.acquireResource(dstImageResourcesNamespace, pimg, rm.SHARED) \
                 if pimg != sc.BLANK_UUID else justLogIt(imgUUID):
             if fakeTemplate:
                 self.createFakeTemplate(destDom.sdUUID, volParams)
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 25bf1a4..7fa02b7 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -641,14 +641,12 @@
         Register resources namespaces and create
         factories for it.
         """
-        rmanager = rm.ResourceManager.getInstance()
         # Register image resource namespace
         imageResourceFactory = \
             resourceFactories.ImageResourceFactory(self.sdUUID)
         imageResourcesNamespace = getNamespace(sc.IMAGE_NAMESPACE, self.sdUUID)
         try:
-            rmanager.registerNamespace(imageResourcesNamespace,
-                                       imageResourceFactory)
+            rm.registerNamespace(imageResourcesNamespace, imageResourceFactory)
         except KeyError:
             self.log.info("Resource namespace %s already registered",
                           imageResourcesNamespace)
@@ -656,8 +654,8 @@
         volumeResourcesNamespace = getNamespace(sc.VOLUME_NAMESPACE,
                                                 self.sdUUID)
         try:
-            rmanager.registerNamespace(volumeResourcesNamespace,
-                                       rm.SimpleResourceFactory())
+            rm.registerNamespace(volumeResourcesNamespace,
+                                 rm.SimpleResourceFactory())
         except KeyError:
             self.log.info("Resource namespace %s already registered",
                           volumeResourcesNamespace)
diff --git a/vdsm/storage/sdm/api/create_volume.py 
b/vdsm/storage/sdm/api/create_volume.py
index c400dc0..ea8d7de 100644
--- a/vdsm/storage/sdm/api/create_volume.py
+++ b/vdsm/storage/sdm/api/create_volume.py
@@ -29,8 +29,6 @@
 
 from . import base
 
-rmanager = rm.ResourceManager.getInstance()
-
 
 class Job(base.Job):
     def __init__(self, job_id, host_id, sd_manifest, vol_info):
@@ -44,8 +42,8 @@
         with self.sd_manifest.domain_lock(self.host_id):
             image_res_ns = sd.getNamespace(sc.IMAGE_NAMESPACE,
                                            self.sd_manifest.sdUUID)
-            with rmanager.acquireResource(image_res_ns, self.vol_info.img_id,
-                                          rm.EXCLUSIVE):
+            with rm.acquireResource(image_res_ns, self.vol_info.img_id,
+                                    rm.EXCLUSIVE):
                 artifacts = self.sd_manifest.get_volume_artifacts(
                     self.vol_info.img_id, self.vol_info.vol_id)
                 artifacts.create(
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 0eda145..b56792a 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -52,8 +52,6 @@
 
 POOL_MASTER_DOMAIN = 'mastersd'
 
-rmanager = rm.ResourceManager.getInstance()
-
 SPM_ACQUIRED = 'SPM'
 SPM_CONTEND = 'Contend'
 SPM_FREE = 'Free'
@@ -140,14 +138,14 @@
             return
 
         domain = sdCache.produce(sdUUID)
-        with rmanager.acquireResource(sc.STORAGE, self.spUUID, rm.SHARED):
+        with rm.acquireResource(sc.STORAGE, self.spUUID, rm.SHARED):
             if sdUUID not in self.getDomains(activeOnly=True):
                 self.log.debug("Domain %s is not an active pool domain, "
                                "skipping domain links refresh",
                                sdUUID)
                 return
-            with rmanager.acquireResource(sc.STORAGE, sdUUID + "_repo",
-                                          rm.EXCLUSIVE):
+            with rm.acquireResource(sc.STORAGE, sdUUID + "_repo",
+                                    rm.EXCLUSIVE):
                 self.log.debug("Refreshing domain links for %s", sdUUID)
                 self._refreshDomainLinks(domain)
 
@@ -173,9 +171,9 @@
                            exc_info=True)
             return
 
-        with rmanager.acquireResource(sc.STORAGE, "upgrade_" + self.spUUID,
-                                      rm.SHARED):
-            with rmanager.acquireResource(sc.STORAGE, sdUUID, rm.EXCLUSIVE):
+        with rm.acquireResource(sc.STORAGE, "upgrade_" + self.spUUID,
+                                rm.SHARED):
+            with rm.acquireResource(sc.STORAGE, sdUUID, rm.EXCLUSIVE):
                 if sdUUID not in self._domainsToUpgrade:
                     return
 
@@ -347,8 +345,8 @@
     @unsecured
     def _shutDownUpgrade(self):
         self.log.debug("Shutting down upgrade process")
-        with rmanager.acquireResource(sc.STORAGE, "upgrade_" + self.spUUID,
-                                      rm.EXCLUSIVE):
+        with rm.acquireResource(sc.STORAGE, "upgrade_" + self.spUUID,
+                                rm.EXCLUSIVE):
             try:
                 self.domainMonitor.onDomainStateChange.unregister(
                     self._upgradeCallback)
@@ -418,14 +416,13 @@
 
     def _upgradePool(self, targetDomVersion, lockTimeout=None):
         try:
-            with rmanager.acquireResource(sc.STORAGE, "upgrade_" + self.spUUID,
-                                          rm.EXCLUSIVE, timeout=lockTimeout):
+            with rm.acquireResource(sc.STORAGE, "upgrade_" + self.spUUID,
+                                    rm.EXCLUSIVE, timeout=lockTimeout):
                 sd.validateDomainVersion(targetDomVersion)
                 self.log.info("Trying to upgrade master domain `%s`",
                               self.masterDomain.sdUUID)
-                with rmanager.acquireResource(sc.STORAGE,
-                                              self.masterDomain.sdUUID,
-                                              rm.EXCLUSIVE):
+                with rm.acquireResource(sc.STORAGE, self.masterDomain.sdUUID,
+                                        rm.EXCLUSIVE):
                     self._convertDomain(self.masterDomain,
                                         str(targetDomVersion))
 
@@ -1346,8 +1343,8 @@
 
     def extendVolumeSize(self, sdUUID, imgUUID, volUUID, newSize):
         imageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID)
-        with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
-                                      rm.EXCLUSIVE):
+        with rm.acquireResource(imageResourcesNamespace, imgUUID,
+                                rm.EXCLUSIVE):
             return sdCache.produce(sdUUID) \
                 .produceVolume(imgUUID, volUUID).extendSize(int(newSize))
 
@@ -1535,10 +1532,10 @@
         else:
             dstImageResourcesNamespace = srcImageResourcesNamespace
 
-        with nested(rmanager.acquireResource(srcImageResourcesNamespace,
-                                             srcImgUUID, rm.SHARED),
-                    rmanager.acquireResource(dstImageResourcesNamespace,
-                                             dstImgUUID, rm.EXCLUSIVE)
+        with nested(rm.acquireResource(srcImageResourcesNamespace,
+                                       srcImgUUID, rm.SHARED),
+                    rm.acquireResource(dstImageResourcesNamespace,
+                                       dstImgUUID, rm.EXCLUSIVE)
                     ):
             dstUUID = image.Image(self.poolPath).copyCollapsed(
                 sdUUID, vmUUID, srcImgUUID, srcVolUUID, dstImgUUID,
@@ -1583,10 +1580,10 @@
         else:
             raise se.MoveImageError(imgUUID)
 
-        with nested(rmanager.acquireResource(srcImageResourcesNamespace,
-                                             imgUUID, srcLock),
-                    rmanager.acquireResource(dstImageResourcesNamespace,
-                                             imgUUID, rm.EXCLUSIVE)):
+        with nested(rm.acquireResource(srcImageResourcesNamespace,
+                                       imgUUID, srcLock),
+                    rm.acquireResource(dstImageResourcesNamespace,
+                                       imgUUID, rm.EXCLUSIVE)):
             image.Image(self.poolPath).move(srcDomUUID, dstDomUUID, imgUUID,
                                             vmUUID, op, postZero, force)
 
@@ -1624,10 +1621,8 @@
         # Since source volume is only a parent of temporary volume, we don't
         # need to acquire any lock for it.
         with nested(
-                rmanager.acquireResource(srcNamespace, tmpImgUUID,
-                                         rm.EXCLUSIVE),
-                rmanager.acquireResource(dstNamespace, dstImgUUID,
-                                         rm.EXCLUSIVE)):
+                rm.acquireResource(srcNamespace, tmpImgUUID, rm.EXCLUSIVE),
+                rm.acquireResource(dstNamespace, dstImgUUID, rm.EXCLUSIVE)):
             image.Image(self.poolPath).sparsify(
                 tmpSdUUID, tmpImgUUID, tmpVolUUID, dstSdUUID, dstImgUUID,
                 dstVolUUID)
@@ -1650,7 +1645,7 @@
         dstImgResNs = sd.getNamespace(sc.IMAGE_NAMESPACE, dstSdUUID)
 
         # Preparing the ordered resource list to be acquired
-        resList = (rmanager.acquireResource(*x) for x in sorted((
+        resList = (rm.acquireResource(*x) for x in sorted((
             (srcImgResNs, imgUUID, rm.SHARED),
             (dstImgResNs, imgUUID, rm.EXCLUSIVE),
         )))
@@ -1678,7 +1673,7 @@
         dstImgResNs = sd.getNamespace(sc.IMAGE_NAMESPACE, dstSdUUID)
 
         # Preparing the ordered resource list to be acquired
-        resList = (rmanager.acquireResource(*x) for x in sorted((
+        resList = (rm.acquireResource(*x) for x in sorted((
             (srcImgResNs, imgUUID, rm.SHARED),
             (dstImgResNs, imgUUID, rm.EXCLUSIVE),
         )))
@@ -1692,7 +1687,7 @@
         Upload an image to a remote endpoint using the specified method and
         methodArgs.
         """
-        imgResourceLock = rmanager.acquireResource(
+        imgResourceLock = rm.acquireResource(
             sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID), imgUUID, rm.SHARED)
 
         with imgResourceLock:
@@ -1704,7 +1699,7 @@
         Download an image from a remote endpoint using the specified method
         and methodArgs.
         """
-        imgResourceLock = rmanager.acquireResource(
+        imgResourceLock = rm.acquireResource(
             sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID), imgUUID, rm.EXCLUSIVE)
 
         with imgResourceLock:
@@ -1720,7 +1715,7 @@
         while not startEvent.is_set():
             startEvent.wait()
 
-        imgResourceLock = rmanager.acquireResource(
+        imgResourceLock = rm.acquireResource(
             sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID), imgUUID, rm.SHARED)
 
         with imgResourceLock:
@@ -1735,7 +1730,7 @@
         """
         Download an image from a stream.
         """
-        imgResourceLock = rmanager.acquireResource(
+        imgResourceLock = rm.acquireResource(
             sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID), imgUUID, rm.EXCLUSIVE)
 
         with imgResourceLock:
@@ -1774,9 +1769,9 @@
 
         resourceList = []
         for imgUUID in imgList:
-            resourceList.append(rmanager.acquireResource(
+            resourceList.append(rm.acquireResource(
                 srcImageResourcesNamespace, imgUUID, rm.EXCLUSIVE))
-            resourceList.append(rmanager.acquireResource(
+            resourceList.append(rm.acquireResource(
                 dstImageResourcesNamespace, imgUUID, rm.EXCLUSIVE))
 
         with nested(*resourceList):
@@ -1799,8 +1794,8 @@
         :rtype: dict
         """
         imageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID)
-        with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
-                                      rm.EXCLUSIVE):
+        with rm.acquireResource(imageResourcesNamespace, imgUUID,
+                                rm.EXCLUSIVE):
             img = image.Image(self.poolPath)
             chain = img.reconcileVolumeChain(sdUUID, imgUUID, leafVolUUID)
         return dict(volumes=chain)
@@ -1826,8 +1821,8 @@
         """
         imageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID)
 
-        with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
-                                      rm.EXCLUSIVE):
+        with rm.acquireResource(imageResourcesNamespace, imgUUID,
+                                rm.EXCLUSIVE):
             image.Image(self.poolPath).merge(
                 sdUUID, vmUUID, imgUUID, ancestor, successor, postZero)
 
@@ -1883,9 +1878,9 @@
 
             if not srcVol.isShared():
                 if srcVol.getParent() == sc.BLANK_UUID:
-                    with rmanager.acquireResource(imageResourcesNamespace,
-                                                  srcImgUUID,
-                                                  rm.EXCLUSIVE):
+                    with rm.acquireResource(imageResourcesNamespace,
+                                            srcImgUUID,
+                                            rm.EXCLUSIVE):
 
                         self.log.debug("volume %s is not shared. "
                                        "Setting it as shared", srcVolUUID)
@@ -1893,8 +1888,8 @@
                 else:
                     raise se.VolumeNonShareable(srcVol)
 
-        with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
-                                      rm.EXCLUSIVE):
+        with rm.acquireResource(imageResourcesNamespace, imgUUID,
+                                rm.EXCLUSIVE):
             newVolUUID = sdCache.produce(sdUUID).createVolume(
                 imgUUID=imgUUID, size=size, volFormat=volFormat,
                 preallocate=preallocate, diskType=diskType, volUUID=volUUID,
@@ -1922,8 +1917,8 @@
         """
         imageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID)
 
-        with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
-                                      rm.EXCLUSIVE):
+        with rm.acquireResource(imageResourcesNamespace, imgUUID,
+                                rm.EXCLUSIVE):
             dom = sdCache.produce(sdUUID)
             for volUUID in volumes:
                 dom.produceVolume(imgUUID, volUUID).delete(
@@ -1975,8 +1970,8 @@
     def setVolumeDescription(self, sdUUID, imgUUID, volUUID, description):
         self.validatePoolSD(sdUUID)
         imageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID)
-        with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
-                                      rm.EXCLUSIVE):
+        with rm.acquireResource(imageResourcesNamespace, imgUUID,
+                                rm.EXCLUSIVE):
             sdCache.produce(sdUUID).produceVolume(
                 imgUUID=imgUUID,
                 volUUID=volUUID).setDescription(descr=description)
@@ -1984,8 +1979,8 @@
     def setVolumeLegality(self, sdUUID, imgUUID, volUUID, legality):
         self.validatePoolSD(sdUUID)
         imageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID)
-        with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
-                                      rm.EXCLUSIVE):
+        with rm.acquireResource(imageResourcesNamespace, imgUUID,
+                                rm.EXCLUSIVE):
             sdCache.produce(sdUUID).produceVolume(
                 imgUUID=imgUUID,
                 volUUID=volUUID).setLegality(legality=legality)
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index d2c6754..dab2e55 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -38,8 +38,6 @@
 from sdc import sdCache
 import task
 import resourceManager as rm
-rmanager = rm.ResourceManager.getInstance()
-
 
 DOMAIN_MNT_POINT = 'mnt'
 
@@ -651,8 +649,7 @@
 
         imageResourcesNamespace = sd.getNamespace(sc.IMAGE_NAMESPACE, sdUUID)
 
-        with rmanager.acquireResource(imageResourcesNamespace, srcImg,
-                                      rm.EXCLUSIVE):
+        with rm.acquireResource(imageResourcesNamespace, srcImg, rm.EXCLUSIVE):
             vol = sdCache.produce(sdUUID).produceVolume(srcImg, srcVol)
             vol.prepare(rw=True, chainrw=True, setrw=True)
 


-- 
To view, visit https://gerrit.ovirt.org/65035
To unsubscribe, visit https://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Ib33d9fad90551a0c2e9b1e8a89c5f9f920681c6b
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsof...@redhat.com>
_______________________________________________
vdsm-patches mailing list -- vdsm-patches@lists.fedorahosted.org
To unsubscribe send an email to vdsm-patches-le...@lists.fedorahosted.org

Reply via email to