Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-06-05 12:52:57
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Tue Jun  5 12:52:57 2018 rev:24 rq:614020 version:4.0.0+git_r823_5652fd8

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-06-02 12:11:53.584551492 +0200
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-06-05 12:52:57.857522171 +0200
@@ -1,0 +2,42 @@
+Fri Jun  1 14:37:21 UTC 2018 - [email protected]
+
+- Commit 33b39b3 by Alvaro Saurin [email protected]
+ Skip nodes that are being removed in the list of servers in haproxy.
+ 
+ bsc#1095330
+ 
+ Commit 8484c28 by Alvaro Saurin [email protected]
+ Fix the "targets" priorities for getting nodes for replacements. Minor: use
+ the same pattern for targeting nodes in removals.sls
+ as in kubernetes.sls. Do not use "unassigned" nodes when looking for
+ replacements. Minor improvements
+ 
+ bsc#1095336 bsc#1094078
+ 
+ Commit b80c8f1 by Alvaro Saurin [email protected]
+ Minor cleanups and "beautifications"
+ 
+ feature#cleanups
+
+
+-------------------------------------------------------------------
+Thu May 31 20:50:24 UTC 2018 - [email protected]
+
+- Commit 0979191 by Florian Bergmann [email protected]
+ Remove 'range' imports from six.
+ 
+ There were problems when running 'salt' using these imports and the
+ difference in semantics seems not significant.
+
+
+-------------------------------------------------------------------
+Thu May 31 11:37:06 UTC 2018 - [email protected]
+
+- Commit 138358b by David Cassany [email protected]
+ Spec update
+ 
+ * make use of %license macro
+ * update image prefix for sle15
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.RPogJx/_old  2018-06-05 12:52:58.397502392 +0200
+++ /var/tmp/diff_new_pack.RPogJx/_new  2018-06-05 12:52:58.401502246 +0200
@@ -20,8 +20,8 @@
   %define _base_image sles12
 %endif
 
-%if 0%{?suse_version} == 1500 && !0%{?is_opensuse}
-  %define _base_image sles15
+%if 0%{?suse_version} >= 1500 && !0%{?is_opensuse}
+  %define _base_image caasp
 %endif
 
 %if 0%{?is_opensuse} && 0%{?suse_version} > 1500
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        4.0.0+git_r815_4d4d315
+Version:        4.0.0+git_r823_5652fd8
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management
@@ -67,6 +67,11 @@
 mkdir -p %{buildroot}%{_datadir}/salt/kubernetes
 cp -R %{_builddir}/%{gitrepo}-master/*  
%{buildroot}%{_datadir}/salt/kubernetes/
 
+# license macro installs LICENSE file, if not removed it is duplicated
+%if 0%{?suse_version} >= 1500
+rm %{buildroot}%{_datadir}/salt/kubernetes/LICENSE
+%endif
+
 # fix image name
 dir_name=%{buildroot}/%{_datadir}/salt/kubernetes
 files=$(grep "image:[ ]*sles12" $dir_name -r | cut -d: -f1 | uniq)
@@ -84,6 +89,9 @@
 
 %files
 %defattr(-,root,root)
+%if 0%{?suse_version} >= 1500
+%license LICENSE
+%endif
 %dir %{_datadir}/salt
 %dir %{_datadir}/salt/kubernetes
 %{_datadir}/salt/kubernetes/*

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/packaging/suse/make_spec.sh 
new/salt-master/packaging/suse/make_spec.sh
--- old/salt-master/packaging/suse/make_spec.sh 2018-05-25 11:42:05.000000000 
+0200
+++ new/salt-master/packaging/suse/make_spec.sh 2018-06-01 16:37:28.000000000 
+0200
@@ -42,8 +42,8 @@
   %define _base_image sles12
 %endif
 
-%if 0%{?suse_version} == 1500 && !0%{?is_opensuse}
-  %define _base_image sles15
+%if 0%{?suse_version} >= 1500 && !0%{?is_opensuse}
+  %define _base_image caasp
 %endif
 
 %if 0%{?is_opensuse} && 0%{?suse_version} > 1500
@@ -89,6 +89,11 @@
 mkdir -p %{buildroot}%{_datadir}/salt/kubernetes
 cp -R %{_builddir}/%{gitrepo}-${SAFE_BRANCH}/*  
%{buildroot}%{_datadir}/salt/kubernetes/
 
+# license macro installs LICENSE file, if not removed it is duplicated
+%if 0%{?suse_version} >= 1500
+rm %{buildroot}%{_datadir}/salt/kubernetes/LICENSE
+%endif
+
 # fix image name
 dir_name=%{buildroot}/%{_datadir}/salt/kubernetes
 files=\$(grep "image:[ ]*sles12" \$dir_name -r | cut -d: -f1 | uniq)
@@ -106,6 +111,9 @@
 
 %files
 %defattr(-,root,root)
+%if 0%{?suse_version} >= 1500
+%license LICENSE
+%endif
 %dir %{_datadir}/salt
 %dir %{_datadir}/salt/kubernetes
 %{_datadir}/salt/kubernetes/*
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_etcd.py 
new/salt-master/salt/_modules/caasp_etcd.py
--- old/salt-master/salt/_modules/caasp_etcd.py 2018-05-25 11:42:05.000000000 
+0200
+++ new/salt-master/salt/_modules/caasp_etcd.py 2018-06-01 16:37:28.000000000 
+0200
@@ -137,6 +137,7 @@
     #
     new_etcd_members = __salt__['caasp_nodes.get_with_prio_for_role'](
         num_additional_etcd_members, 'etcd',
+        unassigned=False,
         excluded=current_etcd_members + excluded)
 
     if len(new_etcd_members) < num_additional_etcd_members:
@@ -214,6 +215,7 @@
     target_nodename = nodename or __salt__['caasp_net.get_nodename']()
 
     debug("getting etcd member ID with: %s", command)
+    members_output = ''
     try:
         target_url = 'https://{}:{}'.format(target_nodename, ETCD_CLIENT_PORT)
         members_output = subprocess.check_output(command)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_grains.py 
new/salt-master/salt/_modules/caasp_grains.py
--- old/salt-master/salt/_modules/caasp_grains.py       2018-05-25 
11:42:05.000000000 +0200
+++ new/salt-master/salt/_modules/caasp_grains.py       2018-06-01 
16:37:28.000000000 +0200
@@ -5,11 +5,11 @@
     return "caasp_grains"
 
 
-# default grain name, used for getting node IDs
-_GRAIN_NAME = 'nodename'
+# an exported (to the mine) grain used for getting ids
+DEFAULT_GRAIN = 'nodename'
 
 
-def get(expr, grain=_GRAIN_NAME, type='compound'):
+def get(expr, grain=DEFAULT_GRAIN, type='compound'):
     if __opts__['__role'] == 'master':
         # 'mine.get' is not available in the master: it returns nothing
         # in that case, we should use "saltutil.runner"... uh?
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_net.py 
new/salt-master/salt/_modules/caasp_net.py
--- old/salt-master/salt/_modules/caasp_net.py  2018-05-25 11:42:05.000000000 
+0200
+++ new/salt-master/salt/_modules/caasp_net.py  2018-06-01 16:37:28.000000000 
+0200
@@ -12,6 +12,8 @@
 
 DEFAULT_INTERFACE = 'eth0'
 
+NODENAME_GRAIN = 'nodename'
+
 
 def __virtual__():
     return "caasp_net"
@@ -83,9 +85,9 @@
     try:
         if not host:
             assert __opts__['__role'] != 'master'
-            return __salt__['grains.get']('nodename')
+            return __salt__['grains.get'](NODENAME_GRAIN)
         else:
-            all_nodenames = __salt__['caasp_grains.get'](host, 'nodename', 
type='glob')
+            all_nodenames = __salt__['caasp_grains.get'](host, 
grain=NODENAME_GRAIN, type='glob')
             return all_nodenames[host]
     except Exception as e:
         error('could not get nodename: %s', e)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_nodes.py 
new/salt-master/salt/_modules/caasp_nodes.py
--- old/salt-master/salt/_modules/caasp_nodes.py        2018-05-25 
11:42:05.000000000 +0200
+++ new/salt-master/salt/_modules/caasp_nodes.py        2018-06-01 
16:37:28.000000000 +0200
@@ -8,6 +8,19 @@
 _MIN_MASTERS_AFTER_REMOVAL = 1
 _MIN_MINIONS_AFTER_REMOVAL = 1
 
+# an exported (to the mine) grain used for getting ids
+DEFAULT_GRAIN = 'nodename'
+
+# all the "*_in_progress" grains
+IN_PROGRESS_GRAINS = ['bootstrap_in_progress',
+                      'update_in_progress',
+                      'node_removal_in_progress',
+                      'node_addition_in_progress']
+
+
+# use unassigned nodes when looking for a replacement
+USE_UNASSIGNED = False
+
 
 def _get_prio_etcd(unassigned=False):
     '''
@@ -20,22 +33,23 @@
     '''
     res = []
 
-    # etcd nodes that not been bootstrapped yet
-    # (ie, the role has been assigned in Velum)
+    # only-etcd or etcd+master nodes that not been bootstrapped yet
+    # these are the machines where the role has been assigned in Velum
+    res.append('G@roles:etcd and not P@roles:(kube-master|kube-minion) and not 
G@bootstrap_complete:true')
     res.append('G@roles:etcd and not G@bootstrap_complete:true')
 
     if unassigned:
-        # has no role (prefering non-bootstrapped nodes)
+        # nodes with no role assigned (preferring non-bootstrapped ones)
         res.append('not P@roles:(kube-master|kube-minion|etcd) and not 
G@bootstrap_complete:true')
         res.append('not P@roles:(kube-master|kube-minion|etcd)')
 
-    # kubernetes masters (prefering non-bootstrapped nodes)
-    res.append('G@roles:kube-master and not G@bootstrap_complete:true')
-    res.append('G@roles:kube-master')
-
-    # kuberetes minions (prefering non-bootstrapped nodes)
-    res.append('G@roles:kube-minion and not G@bootstrap_complete:true')
-    res.append('G@roles:kube-minion')
+    # only-master nodes (preferring non-bootstrapped ones)
+    res.append('G@roles:kube-master and not G@roles:etcd and not 
G@bootstrap_complete:true')
+    res.append('G@roles:kube-master and not G@roles:etcd')
+
+    # kubernetes minions (preferring non-bootstrapped ones)
+    res.append('G@roles:kube-minion and not G@roles:etcd and not 
G@bootstrap_complete:true')
+    res.append('G@roles:kube-minion and not G@roles:etcd')
 
     return res
 
@@ -51,18 +65,19 @@
     '''
     res = []
 
-    # kubernetes masters that not been bootstrapped yet
-    # (ie, the role has been assigned in Velum)
+    # only-master or master+etcd nodes that not been bootstrapped yet
+    # these are the machines where the role has been assigned in Velum
+    res.append('G@roles:kube-master and not G@roles:etcd and not 
G@bootstrap_complete:true')
     res.append('G@roles:kube-master and not G@bootstrap_complete:true')
 
     if unassigned:
-        # nodes with no role (preferring non-bootstrapped nodes)
+        # nodes with no role assigned (preferring non-bootstrapped ones)
         res.append('not P@roles:(kube-master|kube-minion|etcd) and not 
G@bootstrap_complete:true')
         res.append('not P@roles:(kube-master|kube-minion|etcd)')
 
-    # etcd-only nodes (preferring non-bootstrapped nodes)
-    res.append('G@roles:etcd and not G@roles:kube-master and not 
G@bootstrap_complete:true')
-    res.append('G@roles:etcd and not G@roles:kube-master')
+    # only-etcd nodes (preferring non-bootstrapped ones)
+    res.append('G@roles:etcd and not P@roles:(kube-master|kube-minion) and not 
G@bootstrap_complete:true')
+    res.append('G@roles:etcd and not P@roles:(kube-master|kube-minion)')
     return res
 
 
@@ -77,18 +92,19 @@
     '''
     res = []
 
-    # kubernetes minions that not been bootstrapped yet
-    # (ie, the role has been assigned in Velum)
+    # only-minions or minion+etcd that not been bootstrapped yet
+    # these are the machines where the role has been assigned in Velum
+    res.append('G@roles:kube-minion and not G@roles:etcd and not 
G@bootstrap_complete:true')
     res.append('G@roles:kube-minion and not G@bootstrap_complete:true')
 
     if unassigned:
-        # nodes with no role (preferring non-bootstrapped nodes)
+        # nodes with no role assigned (preferring non-bootstrapped ones)
         res.append('not P@roles:(kube-master|kube-minion|etcd) and not 
G@bootstrap_complete:true')
         res.append('not P@roles:(kube-master|kube-minion|etcd)')
 
-    # etcd-only nodes (preferring non-bootstrapped nodes)
-    res.append('G@roles:etcd and not G@roles:kube-master and not 
G@bootstrap_complete:true')
-    res.append('G@roles:etcd and not G@roles:kube-master')
+    # only-etcd nodes (preferring non-bootstrapped ones)
+    res.append('G@roles:etcd and not P@roles:(kube-master|kube-minion) and not 
G@bootstrap_complete:true')
+    res.append('G@roles:etcd and not P@roles:(kube-master|kube-minion)')
 
     return res
 
@@ -119,30 +135,41 @@
       * `exclude_in_progress`: exclude any node with *_in_progress grains
       * `excluded`: list of nodes to exclude
       * `excluded_roles`: list of roles to exclude
+      * `excluded_grains`: list of grains to exclude
+      * `grain`: return a map of <id>:<grain> items instead of a list of <id>s
     '''
     expr_items = [expr]
 
+    grain = kwargs.get('grain', DEFAULT_GRAIN)
+
+    excluded = _sanitize_list(kwargs.get('excluded', []))
+    excluded_grains = _sanitize_list(kwargs.get('excluded_grains', []))
+    excluded_roles = _sanitize_list(kwargs.get('excluded_roles', []))
+
     if kwargs.get('booted', False):
         expr_items.append('G@bootstrap_complete:true')
 
     if kwargs.get('exclude_admin', False):
-        expr_items.append('not P@roles:(admin|ca)')
+        excluded_roles += ['admin', 'ca']
 
     if kwargs.get('exclude_in_progress', False):
-        expr_items.append('not G@bootstrap_in_progress:true')
-        expr_items.append('not G@update_in_progress:true')
-        expr_items.append('not G@node_removal_in_progress:true')
-        expr_items.append('not G@node_addition_in_progress:true')
+        excluded_grains += IN_PROGRESS_GRAINS
 
-    excluded = _sanitize_list(kwargs.get('excluded', []))
     if excluded:
         expr_items.append('not L@' + '|'.join(excluded))
 
-    excluded_roles = _sanitize_list(kwargs.get('excluded_roles', []))
+    excluded_roles = _sanitize_list(excluded_roles)
     if excluded_roles:
         expr_items.append('not P@roles:(' + '|'.join(excluded_roles) + ')')
 
-    return __salt__['caasp_grains.get'](' and '.join(expr_items)).keys()
+    excluded_grains = _sanitize_list(excluded_grains)
+    if excluded_grains:
+        expr_items += ['not G@{}:true'.format(g) for g in excluded_grains]
+
+    res = __salt__['caasp_grains.get'](' and '.join(expr_items), grain=grain)
+    res = res if ('grain' in kwargs) else res.keys()
+    debug('%s: %s', expr, res)
+    return res
 
 
 def get_from_args_or_with_expr(arg_name, args_dict, *args, **kwargs):
@@ -151,6 +178,7 @@
     or from an expression.
     '''
     if arg_name in args_dict:
+        debug('using argument "%s": %s', arg_name, args_dict[arg_name])
         return _sanitize_list(args_dict[arg_name])
     else:
         return get_with_expr(*args, **kwargs)
@@ -170,22 +198,24 @@
     new_nodes = []
     remaining = num
     for expr in prio_rules:
-        debug('trying to find candidates for %s with %s',
+        debug('trying to find candidates for "%s" with "%s"',
               description, expr)
         # get all the nodes matching the priority expression,
         # but filtering out all the nodes we already have
         candidates = get_with_expr(expr,
                                    exclude_admin=True, 
exclude_in_progress=True,
                                    **kwargs)
+        debug('... %d candidates', len(candidates))
         ids = [x for x in candidates if x not in new_nodes]
         if len(ids) > 0:
+            debug('... new candidates: %s (we need %d)', candidates, remaining)
             new_ids = ids[:remaining]
             new_nodes = new_nodes + new_ids
             remaining -= len(new_ids)
             debug('... %d new candidates (%s) for %s: %d remaining',
                   len(ids), str(ids), description, remaining, )
         else:
-            debug('... no candidates found with %s', expr)
+            debug('... no new candidates found with "%s"', expr)
 
         if remaining <= 0:
             break
@@ -195,14 +225,13 @@
     return new_nodes[:num]
 
 
-def get_with_prio_for_role(num, role, **kwargs):
-    unassigned = kwargs.get('unassigned', False)
+def get_with_prio_for_role(num, role, unassigned=USE_UNASSIGNED, **kwargs):
     prio_rules = _PRIO_FUN[role](unassigned)
     return get_with_prio(num, role, prio_rules, **kwargs)
 
 
 def _get_one_for_role(role, **kwargs):
-    res = get_with_prio_for_role(1, role, unassigned=True, **kwargs)
+    res = get_with_prio_for_role(1, role, unassigned=USE_UNASSIGNED, **kwargs)
     return res[0] if len(res) > 0 else ''
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/tests/test_caasp_nodes.py 
new/salt-master/salt/_modules/tests/test_caasp_nodes.py
--- old/salt-master/salt/_modules/tests/test_caasp_nodes.py     2018-05-25 
11:42:05.000000000 +0200
+++ new/salt-master/salt/_modules/tests/test_caasp_nodes.py     2018-06-01 
16:37:28.000000000 +0200
@@ -163,17 +163,18 @@
         Check the user-provided etcd & minion replacement is valid,
         at least for some roles
         '''
+        kwargs = self.get_replacement_for_kwargs
+
         # add one of the minions to the etcd cluster
         etcd_members = [self.master_1, self.master_2, self.minion_1]
-
-        self.get_replacement_for_kwargs.update({
+        kwargs.update({
             'etcd_members': etcd_members,
             'booted_etcd_members': etcd_members,
         })
 
         replacement, roles = get_replacement_for(self.minion_1,
                                                  replacement=self.other_node,
-                                                 
**self.get_replacement_for_kwargs)
+                                                 **kwargs)
 
         # when removing minion_1 (with roles minion and etcd), we can migrate
         # both roles to a free node
@@ -191,7 +192,7 @@
         with self.assertRaises(ExecutionAborted):
             replacement, roles = get_replacement_for(self.minion_1,
                                                      replacement=self.minion_3,
-                                                     
**self.get_replacement_for_kwargs)
+                                                     **kwargs)
 
     def test_user_provided_for_minion(self):
         '''
@@ -286,6 +287,15 @@
             self.assertNotIn('kube-master', roles,
                              'kube-master role not found in replacement')
 
+        with patch('caasp_nodes._get_one_for_role', 
MagicMock(return_value=self.master_3)):
+
+            replacement, roles = get_replacement_for(self.master_2,
+                                                     
**self.get_replacement_for_kwargs)
+
+            # we can not migrate master_2 to master_3: it is already a master 
and a etcd server
+            self.assertEqual(replacement, '',
+                             'unexpected replacement ' + replacement)
+
 
 class TestGetExprAffectedBy(unittest.TestCase):
     '''
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_states/caasp_cmd.py 
new/salt-master/salt/_states/caasp_cmd.py
--- old/salt-master/salt/_states/caasp_cmd.py   2018-05-25 11:42:05.000000000 
+0200
+++ new/salt-master/salt/_states/caasp_cmd.py   2018-06-01 16:37:28.000000000 
+0200
@@ -1,7 +1,6 @@
 from __future__ import absolute_import
 
 import time
-from salt.ext.six.moves import range
 
 
 def run(name,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_states/caasp_cri.py 
new/salt-master/salt/_states/caasp_cri.py
--- old/salt-master/salt/_states/caasp_cri.py   2018-05-25 11:42:05.000000000 
+0200
+++ new/salt-master/salt/_states/caasp_cri.py   2018-06-01 16:37:28.000000000 
+0200
@@ -1,4 +1,5 @@
 from __future__ import absolute_import
+
 from caasp_log import debug
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_states/caasp_http.py 
new/salt-master/salt/_states/caasp_http.py
--- old/salt-master/salt/_states/caasp_http.py  2018-05-25 11:42:05.000000000 
+0200
+++ new/salt-master/salt/_states/caasp_http.py  2018-06-01 16:37:28.000000000 
+0200
@@ -17,8 +17,8 @@
 
 # Import python libs
 from __future__ import absolute_import
-import re
 
+import re
 import time
 
 __monitor__ = ['query']
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_states/caasp_retriable.py 
new/salt-master/salt/_states/caasp_retriable.py
--- old/salt-master/salt/_states/caasp_retriable.py     2018-05-25 
11:42:05.000000000 +0200
+++ new/salt-master/salt/_states/caasp_retriable.py     2018-06-01 
16:37:28.000000000 +0200
@@ -1,6 +1,6 @@
 from __future__ import absolute_import
+
 import time
-from salt.ext.six.moves import range
 
 
 def retry(name, target, retry={}, **kwargs):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_states/caasp_service.py 
new/salt-master/salt/_states/caasp_service.py
--- old/salt-master/salt/_states/caasp_service.py       2018-05-25 
11:42:05.000000000 +0200
+++ new/salt-master/salt/_states/caasp_service.py       2018-06-01 
16:37:28.000000000 +0200
@@ -1,6 +1,6 @@
 from __future__ import absolute_import
+
 import time
-from salt.ext.six.moves import range
 
 
 def running_stable(name, enable=None, sig=None, init_delay=None, 
successful_retries_in_a_row=50,
@@ -66,17 +66,20 @@
             current_successful_retries_in_a_row = 0
 
         latest_pid = pid
-        max_current_successful_retries_in_a_row = 
max(max_current_successful_retries_in_a_row, 
current_successful_retries_in_a_row)
+        max_current_successful_retries_in_a_row = max(
+            max_current_successful_retries_in_a_row, 
current_successful_retries_in_a_row)
 
         if current_successful_retries_in_a_row == successful_retries_in_a_row:
             ret['result'] = True
-            ret['comment'] = 'Service {0} is up after {1} total retries. 
Including {2} retries in a row'.format(name, retry + 1, 
successful_retries_in_a_row)
+            ret['comment'] = 'Service {0} is up after {1} total retries. 
Including {2} retries in a row'.format(
+                name, retry + 1, successful_retries_in_a_row)
             break
 
         if delay_between_retries:
             time.sleep(delay_between_retries)
 
     if not ret['result']:
-        ret['comment'] = 'Service {0} is dead after {1} total retries. 
Expected {2} success in a row, got {3} as maximum'.format(name, max_retries, 
successful_retries_in_a_row, max_current_successful_retries_in_a_row)
+        ret['comment'] = 'Service {0} is dead after {1} total retries. 
Expected {2} success in a row, got {3} as maximum'.format(
+            name, max_retries, successful_retries_in_a_row, 
max_current_successful_retries_in_a_row)
 
     return ret
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etc-hosts/hosts.jinja 
new/salt-master/salt/etc-hosts/hosts.jinja
--- old/salt-master/salt/etc-hosts/hosts.jinja  2018-05-25 11:42:05.000000000 
+0200
+++ new/salt-master/salt/etc-hosts/hosts.jinja  2018-06-01 16:37:28.000000000 
+0200
@@ -9,7 +9,16 @@
 {%- endif -%}
 
 {%- macro nodes_entries(expr) -%}
-  {%- set nodes = salt['mine.get'](expr, 'network.interfaces', 'compound') %}
+  {# note regarding node removals:
+   # we need the "node_(addition|removal)_in_progress" nodes here, otherwise
+   #   - nodes being removed will be immediately banned from the cluster (with 
a message like:
+   #    'rejected connection from <NODE> (error tls: <NODE-IP> does not match 
any of DNSNames [...]')
+   #    and the cluster will become unhealthy
+   #   - nodes being added will not be able to join (with some similar TLS 
verification error)
+   # doing another /etc/hosts update just for one stale entry seem like an 
overkill,
+   # so the /etc/hosts cleanup will have to be delayed for some other moment...
+   #}
+  {%- set nodes = salt.caasp_nodes.get_with_expr(expr, 
grain='network.interfaces') %}
   {%- for id, ifaces in nodes.items() %}
     {%- set ip = salt.caasp_net.get_primary_ip(host=id, ifaces=ifaces) %}
     {%- if ip|length > 0 %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd/etcd.conf.jinja 
new/salt-master/salt/etcd/etcd.conf.jinja
--- old/salt-master/salt/etcd/etcd.conf.jinja   2018-05-25 11:42:05.000000000 
+0200
+++ new/salt-master/salt/etcd/etcd.conf.jinja   2018-06-01 16:37:28.000000000 
+0200
@@ -24,6 +24,7 @@
 ETCD_PEER_CLIENT_CERT_AUTH="true"
 # ETCD_PEER_AUTO_TLS=on
 
+{# note on node removal: we cannot skip nodes with node_removal_in_progress #}
 ETCD_INITIAL_CLUSTER="{{ salt.caasp_etcd.get_endpoints(with_id=True, 
port=2380) }}"
 ETCD_INITIAL_CLUSTER_TOKEN="{{ pillar['etcd']['token'] }}"
 ETCD_INITIAL_ADVERTISE_PEER_URLS="https://{{ this_addr }}:2380"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/haproxy/haproxy.cfg.jinja 
new/salt-master/salt/haproxy/haproxy.cfg.jinja
--- old/salt-master/salt/haproxy/haproxy.cfg.jinja      2018-05-25 
11:42:05.000000000 +0200
+++ new/salt-master/salt/haproxy/haproxy.cfg.jinja      2018-06-01 
16:37:28.000000000 +0200
@@ -1,8 +1,15 @@
-{%- if "kube-master" in salt['grains.get']('roles', []) -%}
-{%- set bind_ip = "0.0.0.0" -%}
-{%- else -%}
-{%- set bind_ip = "127.0.0.1" -%}
-{%- endif -%}
+{%- set this_roles = salt['grains.get']('roles', [])%}
+
+{%- set bind_ip = "0.0.0.0" if "kube-master" in this_roles else "127.0.0.1" -%}
+
+{%- set masters = salt.caasp_nodes.get_with_expr('G@roles:kube-master',
+                                                 
excluded_grains=['node_removal_in_progress'],
+                                                 grain='nodename') %}
+{% if not masters %}
+  {# fail early instead of generating a config file that is useless... #}
+  {% do salt.caasp_log.abort('No masters found when calculating backends for 
haproxy') %}
+{% endif %}
+
 global
         log /dev/log    local0
         log /dev/log    local1 notice
@@ -42,7 +49,7 @@
 backend default-backend
         option forwardfor
         option httpchk GET /healthz
-{% for minion_id, nodename in salt['mine.get']('roles:kube-master', 
'nodename', 'grain').items() %}
+{% for id, nodename in masters.items() %}
         server master-{{ nodename }} {{ nodename }}.{{ 
pillar['internal_infra_domain'] }}:{{ pillar['api']['int_ssl_port'] }} ssl crt 
{{ pillar['ssl']['kube_apiserver_proxy_bundle'] }} ca-file /etc/pki/ca.crt 
check check-ssl port {{ pillar['api']['int_ssl_port'] }} verify required
 {%- endfor %}
 
@@ -53,12 +60,12 @@
         option httpchk GET /healthz
         timeout server 0
         timeout tunnel 0
-{% for minion_id, nodename in salt['mine.get']('roles:kube-master', 
'nodename', 'grain').items() %}
+{% for id, nodename in masters.items() %}
         server master-{{ nodename }} {{ nodename }}.{{ 
pillar['internal_infra_domain'] }}:{{ pillar['api']['int_ssl_port'] }} ssl crt 
{{ pillar['ssl']['kube_apiserver_proxy_bundle'] }} ca-file /etc/pki/ca.crt 
check check-ssl port {{ pillar['api']['int_ssl_port'] }} verify required
 {%- endfor %}
 
 
-{% if "admin" in salt['grains.get']('roles', []) %}
+{% if "admin" in this_roles %}
 # Velum should be able to access Kube API and Dex service as well to get 
kubeconfig
 listen kubernetes-dex
         bind {{ bind_ip }}:{{ pillar['dex']['node_port'] }}
@@ -67,7 +74,7 @@
         balance roundrobin
         option redispatch
         option httpchk GET /healthz
-{% for minion_id, nodename in salt['mine.get']('roles:kube-master', 
'nodename', 'grain').items() %}
+{% for id, nodename in masters.items() %}
         server master-{{ nodename }} {{ nodename }}.{{ 
pillar['internal_infra_domain'] }}:{{ pillar['dex']['node_port'] }} check 
check-ssl port {{ pillar['dex']['node_port'] }} verify none
 {%- endfor %}
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/removal.sls 
new/salt-master/salt/orch/removal.sls
--- old/salt-master/salt/orch/removal.sls       2018-05-25 11:42:05.000000000 
+0200
+++ new/salt-master/salt/orch/removal.sls       2018-06-01 16:37:28.000000000 
+0200
@@ -325,8 +325,8 @@
 # remove the we-are-removing-some-node grain in the cluster
 remove-cluster-wide-removal-grain:
   salt.function:
-    - tgt: '{{ all_responsive_nodes_tgt }}'
-    - tgt_type: compound
+    - tgt: 'removal_in_progress:true'
+    - tgt_type: grain
     - name: grains.delval
     - arg:
       - removal_in_progress


Reply via email to