Volans has uploaded a new change for review. ( 
https://gerrit.wikimedia.org/r/352892 )

Change subject: ClusterShell: allow to specify exit codes per Command
......................................................................

ClusterShell: allow to specify exit codes per Command

Bug: T164833
Change-Id: I33fd1e889a3c7c2c36a3870ec3bbe50c0df006cf
---
M cumin/tests/unit/transports/test_clustershell.py
M cumin/transports/clustershell.py
2 files changed, 17 insertions(+), 16 deletions(-)


  git pull ssh://gerrit.wikimedia.org:29418/operations/software/cumin 
refs/changes/92/352892/1

diff --git a/cumin/tests/unit/transports/test_clustershell.py 
b/cumin/tests/unit/transports/test_clustershell.py
index 7c32f57..2fbe744 100644
--- a/cumin/tests/unit/transports/test_clustershell.py
+++ b/cumin/tests/unit/transports/test_clustershell.py
@@ -174,7 +174,7 @@
     def setUp(self, *args):
         """Initialize default properties and instances."""
         self.nodes = ['node1', 'node2']
-        self.commands = [Command('command1'), Command('command2')]
+        self.commands = [Command('command1', ok_codes=[0, 100]), 
Command('command2')]
         self.worker = mock.MagicMock()
         self.worker.current_node = 'node1'
         self.worker.command = 'command1'
@@ -321,7 +321,7 @@
     @mock.patch('cumin.transports.clustershell.Task.Task.timer')
     def test_ev_hup_ok(self, timer):
         """Calling ev_hup with a worker that has exit status zero should 
update the success progress bar."""
-        self.worker.current_rc = 0
+        self.worker.current_rc = 100
         self.handler.ev_pickup(self.worker)
         self.handler.ev_hup(self.worker)
         self.assertTrue(self.handler.pbar_ok.update.called)
diff --git a/cumin/transports/clustershell.py b/cumin/transports/clustershell.py
index 3b3385d..ed8922b 100644
--- a/cumin/transports/clustershell.py
+++ b/cumin/transports/clustershell.py
@@ -498,17 +498,18 @@
 
         self.lock.acquire()  # Avoid modifications of the same data from other 
callbacks triggered by ClusterShell
         try:
-            if worker.current_rc != 0:
-                # Considering failed any execution with return code different 
than zero
-                self.pbar_ko.update()
-                self.counters['failed'] += 1
-                new_state = State.failed
-            else:
+            node = self.nodes[worker.current_node]
+
+            if worker.current_rc in 
node.commands[node.running_command_index].ok_codes:
                 self.pbar_ok.update()
                 self.counters['success'] += 1
                 new_state = State.success
+            else:
+                self.pbar_ko.update()
+                self.counters['failed'] += 1
+                new_state = State.failed
 
-            self.nodes[worker.current_node].state.update(new_state)
+            node.state.update(new_state)
         finally:
             self.lock.release()
 
@@ -642,13 +643,8 @@
         self.lock.acquire()  # Avoid modifications of the same data from other 
callbacks triggered by ClusterShell
         try:
             node = self.nodes[worker.current_node]
-            if worker.current_rc != 0:
-                # Considering failed any execution with return code different 
than zero
-                self.pbar_ko.update()
-                self.counters['failed'] += 1
-                node.state.update(State.failed)
-                schedule_timer = True  # Continue the execution on other nodes 
if criteria are met
-            else:
+
+            if worker.current_rc in 
node.commands[node.running_command_index].ok_codes:
                 if node.running_command_index == (len(node.commands) - 1):
                     self.pbar_ok.update()
                     self.counters['success'] += 1
@@ -656,6 +652,11 @@
                     schedule_timer = True  # Continue the execution on other 
nodes if criteria are met
                 else:
                     schedule_next = True  # Continue the execution in the 
current node with the next command
+            else:
+                self.pbar_ko.update()
+                self.counters['failed'] += 1
+                node.state.update(State.failed)
+                schedule_timer = True  # Continue the execution on other nodes 
if criteria are met
         finally:
             self.lock.release()
 

-- 
To view, visit https://gerrit.wikimedia.org/r/352892
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I33fd1e889a3c7c2c36a3870ec3bbe50c0df006cf
Gerrit-PatchSet: 1
Gerrit-Project: operations/software/cumin
Gerrit-Branch: master
Gerrit-Owner: Volans <[email protected]>

_______________________________________________
MediaWiki-commits mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to