Hello Bala.FA,

I'd like you to do a code review.  Please visit

    http://gerrit.ovirt.org/18358

to review the following change.

Change subject: gluster: Using XML output for 
rebalance/remove-brick/replace-brick status verbs
......................................................................

gluster: Using XML output for rebalance/remove-brick/replace-brick status verbs

Modified rebalance/remove-brick/replace-brick status outputto consume XML
output provided by gluster cli command.

Modified verbs:
glusterVolumeRebalanceStatus
glusterVolumeReplaceBrickStatus
glusterVolumeRemoveBrickStatus

Change-Id: I7d277f267cf64d8af419d07367c33dc065f86a9b
Signed-off-by: Bala.FA <[email protected]>
Signed-off-by: Aravinda VK <[email protected]>
---
M client/vdsClientGluster.py
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm/gluster/vdsmapi-gluster-schema.json
5 files changed, 408 insertions(+), 55 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/58/18358/1

diff --git a/client/vdsClientGluster.py b/client/vdsClientGluster.py
index 90af83e..38674bb 100644
--- a/client/vdsClientGluster.py
+++ b/client/vdsClientGluster.py
@@ -134,7 +134,10 @@
         return status['status']['code'], status['status']['message']
 
     def do_glusterVolumeRebalanceStatus(self, args):
-        status = self.s.glusterVolumeRebalanceStatus(args[0])
+        params = self._eqSplit(args)
+        volumeName = params.get('volumeName', '')
+
+        status = self.s.glusterVolumeRebalanceStatus(volumeName)
         pp.pprint(status)
         return status['status']['code'], status['status']['message']
 
@@ -188,8 +191,15 @@
         return status['status']['code'], status['status']['message']
 
     def do_glusterVolumeReplaceBrickStatus(self, args):
-        status = self.s.glusterVolumeReplaceBrickStatus(args[0], args[1],
-                                                        args[2])
+        params = self._eqSplit(args)
+        volumeName = params.get('volumeName', '')
+        existingBrick = params.get('existingBrick', '')
+        newBrick = params.get('newBrick', '')
+
+        status = self.s.glusterVolumeReplaceBrickStatus(volumeName,
+                                                        existingBrick,
+                                                        newBrick)
+        pp.pprint(status)
         return status['status']['code'], status['status']['message']
 
     def do_glusterVolumeReplaceBrickCommit(self, args):
@@ -235,13 +245,16 @@
         return status['status']['code'], status['status']['message']
 
     def do_glusterVolumeRemoveBrickStatus(self, args):
-        params = self._eqSplit(args[1:])
+        params = self._eqSplit(args)
+        volumeName = params.get('volumeName', '')
         try:
             brickList = params['bricks'].split(',')
         except:
             raise ValueError
         replicaCount = params.get('replica', '')
-        status = self.s.glusterVolumeRemoveBrickStatus(args[0], brickList,
+
+        status = self.s.glusterVolumeRemoveBrickStatus(volumeName,
+                                                       brickList,
                                                        replicaCount)
         pp.pprint(status)
         return status['status']['code'], status['status']['message']
@@ -421,8 +434,6 @@
             raise ValueError
 
         status = self.s.glusterServicesGet(serviceNames)
-        pp.pprint(status)
-        return status['status']['code'], status['status']['message']
 
 
 def getGlusterCmdDict(serv):
@@ -503,7 +514,8 @@
               )),
          'glusterVolumeRebalanceStatus': (
              serv.do_glusterVolumeRebalanceStatus,
-             ('<volume_name>\n\t<volume_name> is existing volume name',
+             ('volumeName=<volume_name>\n\t'
+              '<volume_name> is existing volume name',
               'get volume rebalance status'
               )),
          'glusterVolumeDelete': (
@@ -548,8 +560,10 @@
               )),
          'glusterVolumeReplaceBrickStatus': (
              serv.do_glusterVolumeReplaceBrickStatus,
-             ('<volume_name> <existing_brick> <new_brick> \n\t<volume_name> '
-              'is existing volume name\n\t<brick> is existing brick\n\t'
+             ('volumeName=<volume_name> existingBrick=<existing_brick> '
+              'newBrick=<new_brick>\n\t'
+              '<volume_name> is existing volume name\n\t'
+              '<existing_brick> is existing brick\n\t'
               '<new_brick> is new brick',
               'get volume replace brick status'
               )),
@@ -580,9 +594,10 @@
               )),
          'glusterVolumeRemoveBrickStatus': (
              serv.do_glusterVolumeRemoveBrickStatus,
-             ('<volume_name> [replica=<count>] bricks=brick[,brick] ... \n\t'
-              '<volume_name> is existing volume name\n\t<brick> is '
-              'existing brick',
+             ('volumeName=<volume_name> bricks=<brick[,brick, ...]> '
+              '[replica=<count>]\n\t'
+              '<volume_name> is existing volume name\n\t'
+              '<brick[,brick, ...]> is existing brick(s)',
               'get volume remove bricks status'
               )),
          'glusterVolumeRemoveBrickCommit': (
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index 4bd8308..7c1547f 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -126,8 +126,8 @@
 
     @exportAsVerb
     def volumeRebalanceStatus(self, volumeName, options=None):
-        st, msg = self.svdsmProxy.glusterVolumeRebalanceStatus(volumeName)
-        return {'rebalance': st, 'message': msg}
+        status = self.svdsmProxy.glusterVolumeRebalanceStatus(volumeName)
+        return {'volumeStatus': status}
 
     @exportAsVerb
     def volumeReplaceBrickStart(self, volumeName, existingBrick, newBrick,
@@ -151,12 +151,12 @@
                                                        newBrick)
 
     @exportAsVerb
-    def volumeReplaceBrickStatus(self, volumeName, oldBrick, newBrick,
+    def volumeReplaceBrickStatus(self, volumeName, existingBrick, newBrick,
                                  options=None):
-        st, msg = self.svdsmProxy.glusterVolumeReplaceBrickStatus(volumeName,
-                                                                  oldBrick,
-                                                                  newBrick)
-        return {'replaceBrick': st, 'message': msg}
+        status = self.svdsmProxy.glusterVolumeReplaceBrickStatus(volumeName,
+                                                                 existingBrick,
+                                                                 newBrick)
+        return {'volumeStatus': status}
 
     @exportAsVerb
     def volumeReplaceBrickCommit(self, volumeName, existingBrick, newBrick,
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index bac6d1c..8b79130 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -73,6 +73,25 @@
     RDMA = 'RDMA'
 
 
+class TaskType:
+    REBALANCE = 'REBALANCE'
+    REPLACE_BRICK = 'REPLACE_BRICK'
+    REMOVE_BRICK = 'REMOVE_BRICK'
+
+
+class TaskStatus:
+    RUNNING = 'RUNNING'
+    FAILED = 'FAILED'
+    COMPLETED = 'COMPLETED'
+
+
+class TaskAction:
+    STOP = 'STOP'
+    ABORT = 'ABORT'
+    PAUSE = 'PAUSE'
+    COMMIT = 'COMMIT'
+
+
 def _execGluster(cmd):
     return utils.execCmd(cmd)
 
@@ -307,6 +326,50 @@
             return _parseVolumeStatusMem(xmltree)
         else:
             return _parseVolumeStatus(xmltree)
+    except _etreeExceptions:
+        raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
+
+
+def _parseVolumeStatusAll(tree):
+    """
+    returns {TaskId: {'volumeName': VolumeName,
+                      'volumeId': VolumeId,
+                      'taskType': TaskType,
+                      'bricks': BrickList}, ...}
+    """
+    tasks = {}
+    for el in tree.findall('volStatus/volumes/volume'):
+        volumeName = el.find('volName').text
+        volumeId = el.find('id').text
+        for c in el.findall('tasks/task'):
+            taskType = c.find('type').text
+            taskType = taskType.upper().replace('-', '_')
+            taskId = c.find('id').text
+            bricks = []
+            if taskType == TaskType.REPLACE_BRICK:
+                bricks.append(c.find('sourceBrick').text)
+                bricks.append(c.find('destBrick').text)
+            elif taskType == TaskType.REMOVE_BRICK:
+                for b in c.findall('brick'):
+                    bricks.append(b.text)
+            elif taskType == TaskType.REBALANCE:
+                pass
+            tasks[taskId] = {'volumeName': volumeName,
+                             'volumeId': volumeId,
+                             'taskType': taskType,
+                             'bricks': bricks}
+    return tasks
+
+
+@makePublic
+def volumeStatusAll():
+    command = _getGlusterVolCmd() + ["status", "all"]
+    try:
+        xmltree = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeStatusAllFailedException(rc=e.rc, err=e.err)
+    try:
+        return _parseVolumeStatusAll(xmltree)
     except _etreeExceptions:
         raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
 
@@ -585,17 +648,58 @@
 
 
 @makePublic
-def volumeRebalanceStatus(volumeName):
-    rc, out, err = _execGluster(_getGlusterVolCmd() + ["rebalance", volumeName,
-                                                       "status"])
-    if rc:
-        raise ge.GlusterVolumeRebalanceStatusFailedException(rc, out, err)
-    if 'in progress' in out[0]:
-        return BrickStatus.RUNNING, "\n".join(out)
-    elif 'complete' in out[0]:
-        return BrickStatus.COMPLETED, "\n".join(out)
+def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode):
+    """
+    returns {'taskId': UUID,
+             'host': [{'name': NAME,
+                       'filesScanned': INT,
+                       'filesMoved': INT,
+                       'filesFailed': INT,
+                       'totalSizeMoved': INT,
+                       'status': TaskStatus},...]
+             'summary': {'filesScanned': INT,
+                         'filesMoved': INT,
+                         'filesFailed': INT,
+                         'totalSizeMoved': INT,
+                         'status': TaskStatus}}
+    """
+    if mode == 'rebalance':
+        tree = xmltree.find('volRebalance')
+    elif mode == 'remove-brick':
+        tree = xmltree.find('volRemoveBrick')
     else:
-        return BrickStatus.UNKNOWN, "\n".join(out)
+        return
+    status = \
+        {'taskId': tree.find('task-id').text,
+         'summary': {
+             'filesScanned': int(tree.find('aggregate/lookups').text),
+             'filesMoved': int(tree.find('aggregate/files').text),
+             'filesFailed': int(tree.find('aggregate/failures').text),
+             'totalSizeMoved': int(tree.find('aggregate/size').text),
+             'status': tree.find('aggregate/status').text},
+         'host': []}
+    for el in tree.findall('node'):
+        status['host'].append({'name': el.find('nodeName').text,
+                               'filesScanned': int(el.find('lookups').text),
+                               'filesMoved': int(el.find('files').text),
+                               'filesFailed': int(el.find('failures').text),
+                               'totalSizeMoved': int(el.find('size').text),
+                               'status': el.find('status').text})
+    return status
+
+
+@makePublic
+def volumeRebalanceStatus(volumeName):
+    command = _getGlusterVolCmd() + ["rebalance", volumeName, "status"]
+    try:
+        xmltree = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRebalanceStatusFailedException(rc=e.rc,
+                                                             err=e.err)
+    try:
+        return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'rebalance')
+    except _etreeExceptions:
+        raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
 
 
 @makePublic
@@ -638,26 +742,32 @@
 
 
 @makePublic
+def _parseVolumeReplaceBrickStatus(tree):
+    """
+    returns {'taskId': UUID,
+             'filesMoved': INT,
+             'movingFile': STRING,
+             'status': TaskStatus}
+    """
+    return {'taskId': tree.find('volReplaceBrick/id').text,
+            'filesMoved': int(tree.find('volReplaceBrick/filesMoved').text),
+            'movingFile': tree.find('volReplaceBrick/movingFile').text,
+            'status': tree.find('volReplaceBrick/status').text}
+
+
+@makePublic
 def volumeReplaceBrickStatus(volumeName, existingBrick, newBrick):
-    rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
-                                                       volumeName,
-                                                       existingBrick, newBrick,
-                                                       "status"])
-    if rc:
-        raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc, out,
-                                                                err)
-    message = "\n".join(out)
-    statLine = out[0].strip().upper()
-    if BrickStatus.PAUSED in statLine:
-        return BrickStatus.PAUSED, message
-    elif statLine.endswith('MIGRATION COMPLETE'):
-        return BrickStatus.COMPLETED, message
-    elif statLine.startswith('NUMBER OF FILES MIGRATED'):
-        return BrickStatus.RUNNING, message
-    elif statLine.endswith("UNKNOWN"):
-        return BrickStatus.UNKNOWN, message
-    else:
-        return BrickStatus.NA, message
+    command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+                                     existingBrick, newBrick, "status"]
+    try:
+        xmltree = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc=e.rc,
+                                                                err=e.err)
+    try:
+        return _parseVolumeReplaceBrickStatus(xmltree)
+    except _etreeExceptions:
+        raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
 
 
 @makePublic
@@ -712,12 +822,15 @@
     if replicaCount:
         command += ["replica", "%s" % replicaCount]
     command += brickList + ["status"]
-    rc, out, err = _execGluster(command)
-
-    if rc:
-        raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc, out, err)
-    else:
-        return "\n".join(out)
+    try:
+        xmltree = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc=e.rc,
+                                                               err=e.err)
+    try:
+        return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'remove-brick')
+    except _etreeExceptions:
+        raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
 
 
 @makePublic
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index c569a9e..b880339 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -351,6 +351,11 @@
     message = "Volume profile info failed"
 
 
+class GlusterVolumeStatusAllFailedException(GlusterVolumeException):
+    code = 4161
+    message = "Volume status all failed"
+
+
 # Host
 class GlusterHostException(GlusterException):
     code = 4400
diff --git a/vdsm/gluster/vdsmapi-gluster-schema.json 
b/vdsm/gluster/vdsmapi-gluster-schema.json
index 7a4c034..c91bc08 100644
--- a/vdsm/gluster/vdsmapi-gluster-schema.json
+++ b/vdsm/gluster/vdsmapi-gluster-schema.json
@@ -372,3 +372,223 @@
 {'command': {'class': 'GlusterService', 'name': 'action'},
  'data': {'serviceName': 'str', 'action': 'GlusterServiceAction'},
  'returns': 'GlusterServicesStatusInfo'}
+##
+# @UUID:
+#
+# A universally unique identifier in RFC 4122 format.
+#     eg. "407cb255-34be-432c-be7c-eb43b8de82be"
+#
+# Since: 4.10.3
+# XXX: Extension: 'str' data type
+##
+{'alias': 'UUID', 'data': 'str'}
+
+## Category: @GlusterVolume 
######################################################
+##
+# @GlusterVolume:
+#
+# Gluster Volume API object.
+#
+# Since: 4.10.3
+##
+{'class': 'GlusterVolume'}
+
+##
+# @GlusterVolume.rebalanceStart:
+#
+# Start rebalance of given volume.
+#
+# @volumeName:  Gluster volume name
+#
+# Returns:
+# TaskID
+#
+# Since: 4.10.3
+##
+{'command': {'class': 'GlusterVolume', 'name': 'rebalanceStart'},
+ 'data': {'volumeName': 'str'},
+ 'returns': 'UUID'}
+
+##
+# @TaskStatus:
+#
+# Possible value of task status.
+#
+# @RUNNING:    Task is running
+#
+# @FAILED:   Task is failed
+#
+# @COMPLETED:   Task is completed
+#
+# Since: 4.10.3
+##
+{'enum': 'TaskStatus',
+ 'data': ['RUNNING', 'FAILED', 'COMPLETED']}
+
+##
+# @RebalanceRemoveBrickStatusHost:
+#
+# Volume rebalance or remove brick status information on a host.
+#
+# @name:             Hostname or IP address of Gluster peer
+#
+# @filesScanned:     Count of files scanned
+#
+# @filesMoved:       Count of files moved
+#
+# @filesFailed:      Count of files failed
+#
+# @totalSizeMoved:   Size in total files moved
+#
+# @status:           Status of the operation
+#
+# Since: 4.10.3
+##
+{'type': 'RebalanceRemoveBrickStatusHost',
+  'data': {'name': 'str', 'filesScanned': 'uint', 'filesMoved': 'uint', 
'filesFailed': 'uint', 'total
+SizeMoved': 'uint', 'status': 'TaskStatus'}}
+
+
+##
+# @RebalanceRemoveBrickStatusSummary:
+#
+# Volume rebalance or remove brick overall status information.
+#
+# @filesScanned:     Count of files scanned
+#
+# @filesMoved:       Count of files moved
+#
+# @filesFailed:      Count of files failed
+#
+# @totalSizeMoved:   Size in total files moved
+#
+# @status:           Status of the operation
+#
+# Since: 4.10.3
+##
+{'type': 'RebalanceRemoveBrickStatusSummary',
+  'data': {'filesScanned': 'uint', 'filesMoved': 'uint', 'filesFailed': 
'uint', 'totalSizeMoved': 'ui
+nt', 'status': 'TaskStatus'}}
+
+
+##
+# @RebalanceRemoveBrickStatus:
+#
+# Volume rebalance or remove brick status information.
+#
+# @taskId:    The @UUID of the task
+#
+# @host:      List of rebalance status on hosts
+#
+# @summary:   Summary of overall rebalance status
+#
+# Since: 4.10.3
+##
+{'type': 'RebalanceRemoveBrickStatus',
+  'data': {'taskId': 'UUID', 'host': ['RebalanceRemoveBrickStatusHost'], 
'summary': 'RebalanceRemoveB
+rickStatusSummary'}}
+
+##
+# @GlusterVolume.rebalanceStatus:
+#
+# Get rebalance status of given volume.
+#
+# @volumeName:  Gluster volume name
+#
+# Returns:
+# Map of rebalance status
+#
+# Since: 4.10.3
+##
+{'command': {'class': 'GlusterVolume', 'name': 'rebalanceStatus'},
+ 'data': {'volumeName': 'str'},
+ 'returns': 'RebalanceRemoveBrickStatus'}
+
+##
+# @GlusterVolume.replaceBrickStart:
+#
+# Start replace brick of given volume.
+#
+# @volumeName:     Gluster volume name
+# @existingBrick:  Existing brick in the volume
+# @newBrick:       New brick replacing existing brick of the volume
+#
+# Returns:
+# TaskID
+#
+# Since: 4.10.3
+##
+{'command': {'class': 'GlusterVolume', 'name': 'replaceBrickStart'},
+ 'data': {'volumeName': 'str', 'existingBrick': 'str', 'newBrick': 'str'},
+ 'returns': 'UUID'}
+
+##
+# @ReplaceBrickStatus:
+#
+# Volume replace brick status information.
+#
+# @taskId:        The @UUID of the task
+#
+# @filesMoved:    Count of files moved
+#
+# @movingFile:    Name of the file currently in moving process
+#
+# @status:        Status of the operation
+#
+# Since: 4.10.3
+##
+{'type': 'ReplaceBrickStatus',
+ 'data': {'taskId': 'UUID', 'filesMoved': 'uint', 'movingFile': 'str', 
'status': 'TaskStatus'}}
+
+##
+# @GlusterVolume.replaceBrickStatus:
+#
+# Get replace brick status of given volume.
+#
+# @volumeName:     Gluster volume name
+# @existingBrick:  Existing brick in the volume
+# @newBrick:       New brick replacing existing brick of the volume
+#
+# Returns:
+# Map of remove brick status information
+#
+# Since: 4.10.3
+##
+{'command': {'class': 'GlusterVolume', 'name': 'replaceBrickStatus'},
+ 'data': {'volumeName': 'str', 'existingBrick': 'str', 'newBrick': 'str'},
+ 'returns': 'ReplaceBrickStatus'}
+
+##
+# @GlusterVolume.removeBrickStart:
+#
+# Start remove brick(s) of given volume.
+#
+# @volumeName:    Gluster volume name
+# @brickList:     Existing brick(s) in the volume for removal
+# @replicaCount:  #optional parameter to reduce replication count on brick 
removal
+#
+# Returns:
+# TaskID
+#
+# Since: 4.10.3
+##
+{'command': {'class': 'GlusterVolume', 'name': 'removeBrickStart'},
+ 'data': {'volumeName': 'str', 'brickList': ['str'], '*replicaCount': 'uint'},
+ 'returns': 'UUID'}
+
+##
+# @GlusterVolume.removeBrickStatus:
+#
+# Get status of brick(s) removal of given volume.
+#
+# @volumeName:    Gluster volume name
+# @brickList:     Existing brick(s) in the volume for removal
+#
+# Returns:
+# Map of remove brick status information
+#
+# Since: 4.10.3
+##
+{'command': {'class': 'GlusterVolume', 'name': 'removeBrickStatus'},
+ 'data': {'volumeName': 'str', 'brickList': ['str']},
+ 'returns': 'RebalanceRemoveBrickStatus'}


-- 
To view, visit http://gerrit.ovirt.org/18358
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I7d277f267cf64d8af419d07367c33dc065f86a9b
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Aravinda VK <[email protected]>
Gerrit-Reviewer: Bala.FA <[email protected]>
_______________________________________________
vdsm-patches mailing list
[email protected]
https://lists.fedorahosted.org/mailman/listinfo/vdsm-patches

Reply via email to