Adam Litke has uploaded a new change for review.

Change subject: storage: Move IMAGE_NAMESPACE and VOLUME_NAMESPACE to 
constants.py
......................................................................

storage: Move IMAGE_NAMESPACE and VOLUME_NAMESPACE to constants.py

Change-Id: Icd5d8308b09b418440a29f5ff585ea30b193643f
Signed-off-by: Adam Litke <[email protected]>
---
M lib/vdsm/storage/constants.py
M tests/storage_sdm_create_volume_test.py
M vdsm/storage/blockSD.py
M vdsm/storage/blockVolume.py
M vdsm/storage/hsm.py
M vdsm/storage/image.py
M vdsm/storage/resourceFactories.py
M vdsm/storage/sd.py
M vdsm/storage/sdm/api/create_volume.py
M vdsm/storage/sp.py
M vdsm/storage/volume.py
11 files changed, 45 insertions(+), 50 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/34/61434/1

diff --git a/lib/vdsm/storage/constants.py b/lib/vdsm/storage/constants.py
index c122c05..e49c88e 100644
--- a/lib/vdsm/storage/constants.py
+++ b/lib/vdsm/storage/constants.py
@@ -22,7 +22,12 @@
 
 from vdsm import qemuimg
 
+# ResourceManager Lock Namespaces
 STORAGE = "Storage"
+LVM_ACTIVATION_NAMESPACE = 'lvmActivationNS'
+IMAGE_NAMESPACE = 'imageNS'
+VOLUME_NAMESPACE = 'volumeNS'
+
 SECTOR_SIZE = 512
 VG_EXTENT_SIZE_MB = 128
 
diff --git a/tests/storage_sdm_create_volume_test.py 
b/tests/storage_sdm_create_volume_test.py
index b367dbc..92f5e9f 100644
--- a/tests/storage_sdm_create_volume_test.py
+++ b/tests/storage_sdm_create_volume_test.py
@@ -29,11 +29,11 @@
 
 from vdsm import exception
 from vdsm import jobs
+from vdsm.storage import constants as sc
 from vdsm.storage import exception as se
 
 from storage import fileVolume, sd
 from storage import resourceManager as rm
-from storage.resourceFactories import IMAGE_NAMESPACE
 
 import storage.sdm.api.create_volume
 
@@ -124,7 +124,7 @@
                          args['sd_manifest'].__calls__)
 
         # Verify that the image resource was locked and released
-        image_ns = sd.getNamespace(job.sd_manifest.sdUUID, IMAGE_NAMESPACE)
+        image_ns = sd.getNamespace(job.sd_manifest.sdUUID, sc.IMAGE_NAMESPACE)
         rm_args = (image_ns, job.vol_info.img_id, rm.LockType.exclusive)
         self.assertEqual([('acquireResource', rm_args, {}),
                           ('releaseResource', rm_args, {})],
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 07f2ac7..dee4100 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -55,7 +55,6 @@
 import blockVolume
 import multipath
 import resourceFactories
-from resourceFactories import LVM_ACTIVATION_NAMESPACE
 import iscsi
 from storage_mailbox import MAILBOX_SIZE
 import resourceManager as rm
@@ -838,7 +837,7 @@
         lvmActivationFactory = resourceFactories.LvmActivationFactory(
             self.sdUUID)
         lvmActivationNamespace = sd.getNamespace(self.sdUUID,
-                                                 LVM_ACTIVATION_NAMESPACE)
+                                                 sc.LVM_ACTIVATION_NAMESPACE)
         try:
             rmanager.registerNamespace(lvmActivationNamespace,
                                        lvmActivationFactory)
diff --git a/vdsm/storage/blockVolume.py b/vdsm/storage/blockVolume.py
index 9651314..d019050 100644
--- a/vdsm/storage/blockVolume.py
+++ b/vdsm/storage/blockVolume.py
@@ -46,7 +46,6 @@
 import lvm
 import resourceManager as rm
 from sdc import sdCache
-from resourceFactories import LVM_ACTIVATION_NAMESPACE
 
 
 BLOCK_SIZE = sc.BLOCK_SIZE
@@ -71,8 +70,8 @@
         volume.VolumeManifest.__init__(self, repoPath, sdUUID, imgUUID,
                                        volUUID)
         self.metaoff = None
-        self.lvmActivationNamespace = sd.getNamespace(self.sdUUID,
-                                                      LVM_ACTIVATION_NAMESPACE)
+        self.lvmActivationNamespace = sd.getNamespace(
+            self.sdUUID, sc.LVM_ACTIVATION_NAMESPACE)
 
     def getMetadataId(self):
         """
@@ -389,7 +388,7 @@
         cls.log.info("Tearing down volume %s/%s justme %s"
                      % (sdUUID, volUUID, justme))
         lvmActivationNamespace = sd.getNamespace(sdUUID,
-                                                 LVM_ACTIVATION_NAMESPACE)
+                                                 sc.LVM_ACTIVATION_NAMESPACE)
         rmanager.releaseResource(lvmActivationNamespace, volUUID)
         if not justme:
             try:
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 47d7e76..c61446d 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -75,7 +75,6 @@
 import iscsi
 import taskManager
 import resourceManager as rm
-from resourceFactories import IMAGE_NAMESPACE
 import dispatcher
 import storageServer
 
@@ -1821,7 +1820,7 @@
         sdDom = sdCache.produce(sdUUID=sdUUID)
         repoPath = os.path.join(self.storage_repository, sdDom.getPools()[0])
 
-        imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+        imageResourcesNamespace = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
         with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
                                       rm.LockType.shared):
             image.Image(repoPath).syncVolumeChain(sdUUID, imgUUID, volUUID,
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index 6f83c1e..76a6400 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -376,8 +376,8 @@
                            img)
             yield
 
-        dstImageResourcesNamespace = sd.getNamespace(
-            destDom.sdUUID, resourceFactories.IMAGE_NAMESPACE)
+        dstImageResourcesNamespace = sd.getNamespace(destDom.sdUUID,
+                                                     sc.IMAGE_NAMESPACE)
         # In destination domain we need to lock image's template if exists
         with rmanager.acquireResource(dstImageResourcesNamespace, pimg,
                                       rm.LockType.shared) \
diff --git a/vdsm/storage/resourceFactories.py 
b/vdsm/storage/resourceFactories.py
index 90c4512..6e86716 100644
--- a/vdsm/storage/resourceFactories.py
+++ b/vdsm/storage/resourceFactories.py
@@ -21,6 +21,7 @@
 import os
 
 from vdsm.config import config
+from vdsm.storage import constants as sc
 from vdsm.storage import exception as se
 
 import logging
@@ -29,10 +30,6 @@
 from sdc import sdCache
 import sd
 import image
-
-LVM_ACTIVATION_NAMESPACE = 'lvmActivationNS'
-IMAGE_NAMESPACE = 'imageNS'
-VOLUME_NAMESPACE = 'volumeNS'
 
 rmanager = rm.ResourceManager.getInstance()
 
@@ -108,7 +105,7 @@
         rm.SimpleResourceFactory.__init__(self)
         self.sdUUID = sdUUID
         self.volumeResourcesNamespace = sd.getNamespace(self.sdUUID,
-                                                        VOLUME_NAMESPACE)
+                                                        sc.VOLUME_NAMESPACE)
 
     def __getResourceCandidatesList(self, resourceName, lockType):
         """
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index b84abbd..6bd4707 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -34,7 +34,6 @@
 
 import image
 import resourceFactories
-from resourceFactories import IMAGE_NAMESPACE, VOLUME_NAMESPACE
 import resourceManager as rm
 from vdsm import concurrent
 from vdsm import constants
@@ -595,7 +594,7 @@
         # Register image resource namespace
         imageResourceFactory = \
             resourceFactories.ImageResourceFactory(self.sdUUID)
-        imageResourcesNamespace = getNamespace(self.sdUUID, IMAGE_NAMESPACE)
+        imageResourcesNamespace = getNamespace(self.sdUUID, sc.IMAGE_NAMESPACE)
         try:
             rmanager.registerNamespace(imageResourcesNamespace,
                                        imageResourceFactory)
@@ -603,7 +602,8 @@
             self.log.info("Resource namespace %s already registered",
                           imageResourcesNamespace)
 
-        volumeResourcesNamespace = getNamespace(self.sdUUID, VOLUME_NAMESPACE)
+        volumeResourcesNamespace = getNamespace(self.sdUUID,
+                                                sc.VOLUME_NAMESPACE)
         try:
             rmanager.registerNamespace(volumeResourcesNamespace,
                                        rm.SimpleResourceFactory())
diff --git a/vdsm/storage/sdm/api/create_volume.py 
b/vdsm/storage/sdm/api/create_volume.py
index ac38f95..671d888 100644
--- a/vdsm/storage/sdm/api/create_volume.py
+++ b/vdsm/storage/sdm/api/create_volume.py
@@ -26,7 +26,6 @@
 
 from storage import resourceManager as rm
 from storage import image, sd
-from storage.resourceFactories import IMAGE_NAMESPACE
 
 from . import base
 
@@ -44,7 +43,7 @@
 
         with self.sd_manifest.domain_lock(self.host_id):
             image_res_ns = sd.getNamespace(self.sd_manifest.sdUUID,
-                                           IMAGE_NAMESPACE)
+                                           sc.IMAGE_NAMESPACE)
             with rmanager.acquireResource(image_res_ns, self.vol_info.img_id,
                                           rm.LockType.exclusive):
                 artifacts = self.sd_manifest.get_volume_artifacts(
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index c9b8ddd..f76fbd3 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -48,7 +48,6 @@
 from vdsm.config import config
 from sdc import sdCache
 import image
-from resourceFactories import IMAGE_NAMESPACE
 import resourceManager as rm
 
 POOL_MASTER_DOMAIN = 'mastersd'
@@ -1347,7 +1346,7 @@
         sdCache.produce(sdUUID).extendVolume(volumeUUID, size, isShuttingDown)
 
     def extendVolumeSize(self, sdUUID, imgUUID, volUUID, newSize):
-        imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+        imageResourcesNamespace = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
         with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
                                       rm.LockType.exclusive):
             return sdCache.produce(sdUUID) \
@@ -1529,10 +1528,11 @@
         :returns: a dict containing the UUID of the newly created image.
         :rtype: dict
         """
-        srcImageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+        srcImageResourcesNamespace = sd.getNamespace(sdUUID,
+                                                     sc.IMAGE_NAMESPACE)
         if dstSdUUID not in (sdUUID, sd.BLANK_UUID):
             dstImageResourcesNamespace = sd.getNamespace(dstSdUUID,
-                                                         IMAGE_NAMESPACE)
+                                                         sc.IMAGE_NAMESPACE)
         else:
             dstImageResourcesNamespace = srcImageResourcesNamespace
 
@@ -1572,9 +1572,9 @@
         :type force: bool
         """
         srcImageResourcesNamespace = sd.getNamespace(srcDomUUID,
-                                                     IMAGE_NAMESPACE)
+                                                     sc.IMAGE_NAMESPACE)
         dstImageResourcesNamespace = sd.getNamespace(dstDomUUID,
-                                                     IMAGE_NAMESPACE)
+                                                     sc.IMAGE_NAMESPACE)
         # For MOVE_OP acquire exclusive lock
         # For COPY_OP shared lock is enough
         if op == image.MOVE_OP:
@@ -1615,8 +1615,8 @@
                             sparsified volume.
         :type dstVolUUID: UUID
         """
-        srcNamespace = sd.getNamespace(tmpSdUUID, IMAGE_NAMESPACE)
-        dstNamespace = sd.getNamespace(dstSdUUID, IMAGE_NAMESPACE)
+        srcNamespace = sd.getNamespace(tmpSdUUID, sc.IMAGE_NAMESPACE)
+        dstNamespace = sd.getNamespace(dstSdUUID, sc.IMAGE_NAMESPACE)
 
         # virt-sparsify writes to temporary volume when using --tmp:prebuilt,
         # so we acquire exclusive lock for the temporary image.
@@ -1647,8 +1647,8 @@
         :param dstSdUUID: The UUID of the storage domain you want to copy to.
         :type dstSdUUID: UUID
         """
-        srcImgResNs = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
-        dstImgResNs = sd.getNamespace(dstSdUUID, IMAGE_NAMESPACE)
+        srcImgResNs = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
+        dstImgResNs = sd.getNamespace(dstSdUUID, sc.IMAGE_NAMESPACE)
 
         # Preparing the ordered resource list to be acquired
         resList = (rmanager.acquireResource(*x) for x in sorted((
@@ -1675,8 +1675,8 @@
         :param syncType: The type of sync to perform (all volumes, etc.).
         :type syncType: syncType enum
         """
-        srcImgResNs = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
-        dstImgResNs = sd.getNamespace(dstSdUUID, IMAGE_NAMESPACE)
+        srcImgResNs = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
+        dstImgResNs = sd.getNamespace(dstSdUUID, sc.IMAGE_NAMESPACE)
 
         # Preparing the ordered resource list to be acquired
         resList = (rmanager.acquireResource(*x) for x in sorted((
@@ -1694,7 +1694,7 @@
         methodArgs.
         """
         imgResourceLock = rmanager.acquireResource(
-            sd.getNamespace(sdUUID, IMAGE_NAMESPACE), imgUUID,
+            sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE), imgUUID,
             rm.LockType.shared)
 
         with imgResourceLock:
@@ -1707,7 +1707,7 @@
         and methodArgs.
         """
         imgResourceLock = rmanager.acquireResource(
-            sd.getNamespace(sdUUID, IMAGE_NAMESPACE), imgUUID,
+            sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE), imgUUID,
             rm.LockType.exclusive)
 
         with imgResourceLock:
@@ -1724,7 +1724,7 @@
             startEvent.wait()
 
         imgResourceLock = rmanager.acquireResource(
-            sd.getNamespace(sdUUID, IMAGE_NAMESPACE), imgUUID,
+            sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE), imgUUID,
             rm.LockType.shared)
 
         with imgResourceLock:
@@ -1740,7 +1740,7 @@
         Download an image from a stream.
         """
         imgResourceLock = rmanager.acquireResource(
-            sd.getNamespace(sdUUID, IMAGE_NAMESPACE), imgUUID,
+            sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE), imgUUID,
             rm.LockType.exclusive)
 
         with imgResourceLock:
@@ -1770,9 +1770,9 @@
         :type force: bool
         """
         srcImageResourcesNamespace = sd.getNamespace(srcDomUUID,
-                                                     IMAGE_NAMESPACE)
+                                                     sc.IMAGE_NAMESPACE)
         dstImageResourcesNamespace = sd.getNamespace(dstDomUUID,
-                                                     IMAGE_NAMESPACE)
+                                                     sc.IMAGE_NAMESPACE)
 
         imgList = imgDict.keys()
         imgList.sort()
@@ -1803,7 +1803,7 @@
         :returns: A dict with a list of volume UUIDs in the corrected chain
         :rtype: dict
         """
-        imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+        imageResourcesNamespace = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
         with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
                                       rm.LockType.exclusive):
             img = image.Image(self.poolPath)
@@ -1829,7 +1829,7 @@
         :param postZero: ?
         :type postZero: bool?
         """
-        imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+        imageResourcesNamespace = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
 
         with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
                                       rm.LockType.exclusive):
@@ -1879,7 +1879,7 @@
         :returns: a dict with the UUID of the new volume.
         :rtype: dict
         """
-        imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+        imageResourcesNamespace = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
 
         if imgUUID != srcImgUUID and srcImgUUID != sc.BLANK_UUID:
             srcDom = sdCache.produce(sdUUID)
@@ -1925,7 +1925,7 @@
                         have.
         :type imgUUID: UUID
         """
-        imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+        imageResourcesNamespace = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
 
         with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
                                       rm.LockType.exclusive):
@@ -1979,7 +1979,7 @@
 
     def setVolumeDescription(self, sdUUID, imgUUID, volUUID, description):
         self.validatePoolSD(sdUUID)
-        imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+        imageResourcesNamespace = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
         with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
                                       rm.LockType.exclusive):
             sdCache.produce(sdUUID).produceVolume(
@@ -1988,7 +1988,7 @@
 
     def setVolumeLegality(self, sdUUID, imgUUID, volUUID, legality):
         self.validatePoolSD(sdUUID)
-        imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+        imageResourcesNamespace = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
         with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
                                       rm.LockType.exclusive):
             sdCache.produce(sdUUID).produceVolume(
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index b1cf15a..b7a5608 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -36,7 +36,6 @@
 import sd
 from sdc import sdCache
 import task
-import resourceFactories
 import resourceManager as rm
 rmanager = rm.ResourceManager.getInstance()
 
@@ -604,9 +603,7 @@
                      'dstFormat=%s srcParent=%s)', sdUUID, srcImg, srcVol,
                      dstFormat, srcParent)
 
-        imageResourcesNamespace = sd.getNamespace(
-            sdUUID,
-            resourceFactories.IMAGE_NAMESPACE)
+        imageResourcesNamespace = sd.getNamespace(sdUUID, sc.IMAGE_NAMESPACE)
 
         with rmanager.acquireResource(imageResourcesNamespace,
                                       srcImg, rm.LockType.exclusive):


-- 
To view, visit https://gerrit.ovirt.org/61434
To unsubscribe, visit https://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Icd5d8308b09b418440a29f5ff585ea30b193643f
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <[email protected]>
_______________________________________________
vdsm-patches mailing list
[email protected]
https://lists.fedorahosted.org/admin/lists/[email protected]

Reply via email to