Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-05-04 11:30:49
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Fri May  4 11:30:49 2018 rev:16 rq:603698 version:3.0.0+git_r750_8f19e53

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-04-27 16:10:20.752038484 +0200
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-05-04 11:30:54.696658103 +0200
@@ -1,0 +2,45 @@
+Thu May  3 10:02:57 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 0294ed9 by Alvaro Saurin alvaro.sau...@gmail.com
+ Do not try to use the mine when we can get the same information with a
+ module.
+ 
+ (cherry picked from commit dfd3b8a6a65c7d969466b09a1f20536a525ae42a)
+ 
+ bsc#1091077
+
+
+-------------------------------------------------------------------
+Wed May  2 11:57:18 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 17e9533 by Kiall Mac Innes ki...@macinnes.ie
+ Harden the waiting for CRI socket to become active
+ 
+ * Allow more time for the CRI socket to become active - 20 seconds
+ * Explicitly fail if the socket does not become active within this
+ time.
+ 
+ Related to bsc#1091419
+
+
+-------------------------------------------------------------------
+Sun Apr 29 13:31:42 UTC 2018 - containers-bugow...@suse.de
+
+- Commit c03b41d by Alvaro Saurin alvaro.sau...@gmail.com
+ Retry the `wait_for_http` when waiting for the API server. Use the same
+ cleanup.post-orchestration that tyhe forces removal uses. Some other removal
+ orchestration fixes and improvements.
+ 
+ feature#node_removal
+
+
+-------------------------------------------------------------------
+Fri Apr 27 15:15:36 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 03242db by Kiall Mac Innes ki...@macinnes.ie
+ Fix caasp_etcd.get_member_id error handling
+ 
+ caasp_etcd.get_member_id was referencing a variable that doesn't exist.
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.CtkU2t/_old  2018-05-04 11:30:55.264637263 +0200
+++ /var/tmp/diff_new_pack.CtkU2t/_new  2018-05-04 11:30:55.268637117 +0200
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        3.0.0+git_r742_8508870
+Version:        3.0.0+git_r750_8f19e53
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/pillar/cri.sls 
new/salt-master/pillar/cri.sls
--- old/salt-master/pillar/cri.sls      2018-04-26 11:56:10.000000000 +0200
+++ new/salt-master/pillar/cri.sls      2018-05-03 12:03:26.000000000 +0200
@@ -1,6 +1,6 @@
 cri:
   chosen: 'docker'
-  socket_timeout: 10
+  socket_timeout: 20
   docker:
     description: Docker open-source container engine
     package: docker
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_cri.py 
new/salt-master/salt/_modules/caasp_cri.py
--- old/salt-master/salt/_modules/caasp_cri.py  2018-04-26 11:56:10.000000000 
+0200
+++ new/salt-master/salt/_modules/caasp_cri.py  2018-05-03 12:03:26.000000000 
+0200
@@ -199,7 +199,7 @@
     '''
 
     socket = cri_runtime_endpoint()
-    timeout = int(__salt__['pillar.get']('cri:socket_timeout', '10'))
+    timeout = int(__salt__['pillar.get']('cri:socket_timeout', '20'))
     expire = time.time() + timeout
 
     while time.time() < expire:
@@ -207,6 +207,11 @@
             return
         time.sleep(0.3)
 
+    raise CommandExecutionError(
+        'CRI socket did not become ready',
+        info={'errors': ['CRI socket did not become ready']}
+    )
+
 
 def needs_docker():
     '''
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_etcd.py 
new/salt-master/salt/_modules/caasp_etcd.py
--- old/salt-master/salt/_modules/caasp_etcd.py 2018-04-26 11:56:10.000000000 
+0200
+++ new/salt-master/salt/_modules/caasp_etcd.py 2018-05-03 12:03:26.000000000 
+0200
@@ -222,7 +222,7 @@
                 return member_line.split(':')[0]
 
     except Exception as e:
-        error('cannot get member ID for "%s": %s', e, this_nodename)
+        error('cannot get member ID for "%s": %s', e, target_nodename)
         error('output: %s', members_output)
 
     return ''
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_net.py 
new/salt-master/salt/_modules/caasp_net.py
--- old/salt-master/salt/_modules/caasp_net.py  2018-04-26 11:56:10.000000000 
+0200
+++ new/salt-master/salt/_modules/caasp_net.py  2018-05-03 12:03:26.000000000 
+0200
@@ -10,38 +10,41 @@
     return "caasp_net"
 
 
-def _get_local_id():
-    return __salt__['grains.get']('id')
-
-
-def get_iface_ip(iface, **kwargs):
+def get_iface_ip(iface, host=None, ifaces=None):
     '''
     given an 'iface' (and an optional 'host' and list of 'ifaces'),
     return the IP address associated with 'iface'
     '''
-    host = kwargs.pop('host', _get_local_id())
-    all_ifaces = __salt__['caasp_grains.get'](host, 'network.interfaces', 
type='glob')
-    ifaces = kwargs.pop('ifaces', all_ifaces[host])
+    if not ifaces:
+        if not host or host == get_nodename():
+            ifaces = __salt__['network.interfaces']()
+        else:
+            ifaces = __salt__['caasp_grains.get'](host, 'network.interfaces', 
type='glob')
+
+    iface = ifaces.get(iface)
+    ipv4addr = iface.get('inet', [{}])
+    return ipv4addr[0].get('address')
 
-    return ifaces.get(iface).get('inet', [{}])[0].get('address')
 
-
-def get_primary_iface(**kwargs):
+def get_primary_iface(host=None):
     '''
     (given some optional 'host')
     return the name of the primary iface (the iface associated with the 
default route)
     '''
-    host = kwargs.pop('host', _get_local_id())
-    all_routes = __salt__['caasp_grains.get'](host, 'network.default_route', 
type='glob')
-    return all_routes[host][0]['interface']
+    if not host or host == get_nodename():
+        default_route_lst = __salt__['network.default_route']()
+        return default_route_lst[0]['interface']
+    else:
+        all_routes = __salt__['caasp_grains.get'](host, 
'network.default_route', type='glob')
+        return all_routes[host][0]['interface']
 
 
-def get_primary_ip(**kwargs):
+def get_primary_ip(host=None, ifaces=None):
     '''
     (given an optional minion 'host' and a list of its network interfaces, 
'ifaces'),
     return the primary IP
     '''
-    return get_iface_ip(get_primary_iface(**kwargs), **kwargs)
+    return get_iface_ip(iface=get_primary_iface(host=host), host=host, 
ifaces=ifaces)
 
 
 def get_primary_ips_for(compound, **kwargs):
@@ -51,19 +54,15 @@
     '''
     res = []
     all_ifaces = __salt__['caasp_grains.get'](compound, 'network.interfaces')
-    for host in all_ifaces.keys():
-        res.append(get_primary_ip(host=host, **kwargs))
-    return res
+    return [get_primary_ip(host=host, **kwargs) for host in all_ifaces.keys()]
 
 
-def get_nodename(**kwargs):
+def get_nodename(host=None, **kwargs):
     '''
     (given some optional 'host')
     return the `nodename`
     '''
-    _not_provided = object()
-    host = kwargs.pop('host', _not_provided)
-    if host is _not_provided:
+    if not host:
         assert __opts__['__role'] != 'master'
         return __salt__['grains.get']('nodename')
     else:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_nodes.py 
new/salt-master/salt/_modules/caasp_nodes.py
--- old/salt-master/salt/_modules/caasp_nodes.py        2018-04-26 
11:56:10.000000000 +0200
+++ new/salt-master/salt/_modules/caasp_nodes.py        2018-05-03 
12:03:26.000000000 +0200
@@ -467,6 +467,9 @@
     affected_roles.sort()
     affected_items.append('P@roles:(' + '|'.join(affected_roles) + ')')
 
+    # exclude some roles
+    affected_items.append('not G@roles:ca')
+
     if kwargs.get('exclude_in_progress', True):
         affected_items.append('not G@bootstrap_in_progress:true')
         affected_items.append('not G@update_in_progress:true')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/cleanup/remove-post-orchestration.sls 
new/salt-master/salt/cleanup/remove-post-orchestration.sls
--- old/salt-master/salt/cleanup/remove-post-orchestration.sls  2018-04-26 
11:56:10.000000000 +0200
+++ new/salt-master/salt/cleanup/remove-post-orchestration.sls  2018-05-03 
12:03:26.000000000 +0200
@@ -1,6 +1,3 @@
-include:
-  - kubectl-config
-
 {%- set target = salt.caasp_pillar.get('target') %}
 {%- set forced = salt.caasp_pillar.get('forced', False) %}
 
@@ -10,9 +7,12 @@
 # k8s cluster
 ###############
 
-{%- set k8s_nodes = salt.caasp_nodes.get_with_expr('G@roles:kube-master', 
booted=True) %}
+{%- set k8s_nodes = 
salt.caasp_nodes.get_with_expr('P@roles:(kube-master|kube-minion)', 
booted=True) %}
 {%- if forced or target in k8s_nodes %}
 
+include:
+  - kubectl-config
+
 {%- from '_macros/kubectl.jinja' import kubectl with context %}
 
 {{ kubectl("remove-node",
@@ -32,3 +32,11 @@
   - nodename: {{ nodename }}
 
 {%- endif %}
+
+
+{%- if not (forced or target in k8s_nodes + etcd_members) %}
+{# Make suse we do not generate an empty file if target is not a etcd/master #}
+dummy_step:
+  cmd.run:
+    - name: "echo saltstack bug 14553"
+{%- endif %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-apiserver/init.sls 
new/salt-master/salt/kube-apiserver/init.sls
--- old/salt-master/salt/kube-apiserver/init.sls        2018-04-26 
11:56:10.000000000 +0200
+++ new/salt-master/salt/kube-apiserver/init.sls        2018-05-03 
12:03:26.000000000 +0200
@@ -53,27 +53,29 @@
       - sls:             ca-cert
       - caasp_retriable: {{ pillar['ssl']['kube_apiserver_crt'] }}
       - x509:            {{ pillar['paths']['service_account_key'] }}
-  # wait until the API server is actually up and running
-  http.wait_for_successful_query:
-    {% set api_server = "api." + pillar['internal_infra_domain']  -%}
-    {% set api_ssl_port = pillar['api']['int_ssl_port'] -%}
-    - name:       {{ 'https://' + api_server + ':' + api_ssl_port }}/healthz
-    - wait_for:   300
-    - ca_bundle:  {{ pillar['ssl']['ca_file'] }}
-    - status:     200
-    - watch:
-      - service:  kube-apiserver
 
-# Wait for the kube-apiserver to be answering on any location. Even if our 
local instance is already
-# up, it could happen that HAProxy did not yet realize it's up, so let's wait 
until HAProxy agrees
-# with us.
-kube-apiserver-up:
-  http.wait_for_successful_query:
-    {% set api_server = "api." + pillar['internal_infra_domain']  -%}
-    {% set api_ssl_port = pillar['api']['ssl_port'] -%}
-    - name:       {{ 'https://' + api_server + ':' + api_ssl_port }}/healthz
+#
+# Wait for (in order)
+# 1. the local ("internal") API server
+# 2. the API-through-haproxy, to be answering on any location. Even if our
+#    local instance is already up, it could happen that HAProxy did not
+#    yet realize it's up, so let's wait until HAProxy agrees with us.
+#
+{%- set api_server = 'api.' + pillar['internal_infra_domain'] %}
+
+{%- for port in ['int_ssl_port', 'ssl_port'] %}
+
+kube-apiserver-wait-port-{{ port }}:
+  caasp_retriable.retry:
+    - target:     http.wait_for_successful_query
+    - name:       {{ 'https://' + api_server + ':' + pillar['api'][port] 
}}/healthz
     - wait_for:   300
+    # retry just in case the API server returns a transient error
+    - retry:
+        attempts: 3
     - ca_bundle:  {{ pillar['ssl']['ca_file'] }}
     - status:     200
     - watch:
       - service:  kube-apiserver
+
+{% endfor %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/removal.sls 
new/salt-master/salt/orch/removal.sls
--- old/salt-master/salt/orch/removal.sls       2018-04-26 11:56:10.000000000 
+0200
+++ new/salt-master/salt/orch/removal.sls       2018-05-03 12:03:26.000000000 
+0200
@@ -8,14 +8,14 @@
 {#- This sends a "are you still there?" message to all #}
 {#- the nodes and wait for a response, so it takes some time. #}
 {#- Hopefully this list will not be too long... #}
+{%- set all_responsive_nodes_tgt = 'not G@roles:ca' %}
+
 {%- set nodes_down = salt.saltutil.runner('manage.down') %}
 {%- if not nodes_down %}
   {%- do salt.caasp_log.debug('all nodes seem to be up') %}
-  {%- set all_responsive_nodes_tgt = 'P@roles:(etcd|kube-master|kube-minion)' 
%}
 {%- else %}
   {%- do salt.caasp_log.debug('nodes "%s" seem to be down', 
nodes_down|join(',')) %}
-  {%- set all_responsive_nodes_tgt = 'not L@' + nodes_down|join(',')
-                                   + ' and 
P@roles:(etcd|kube-master|kube-minion)' %}
+  {%- set all_responsive_nodes_tgt = all_responsive_nodes_tgt + ' and not L@' 
+ nodes_down|join(',') %}
 
   {%- if target in nodes_down %}
     {%- do salt.caasp_log.abort('target is unresponsive, forced removal must 
be used') %}
@@ -44,7 +44,7 @@
 # This will ensure the update-etc-hosts orchestration is not run.
 set-cluster-wide-removal-grain:
   salt.function:
-    - tgt: 'P@roles:(kube-master|kube-minion|etcd)'
+    - tgt: '{{ all_responsive_nodes_tgt }}'
     - tgt_type: compound
     - name: grains.setval
     - arg:
@@ -55,7 +55,7 @@
 # (ie, expired certs produce really funny errors)
 update-config:
   salt.state:
-    - tgt: '{{ all_responsive_nodes_tgt }}'
+    - tgt: 'P@roles:(kube-master|kube-minion|etcd) and {{ 
all_responsive_nodes_tgt }}'
     - tgt_type: compound
     - sls:
       - etc-hosts
@@ -70,7 +70,7 @@
 
 assign-removal-grain:
   salt.function:
-    - tgt: {{ target }}
+    - tgt: '{{ target }}'
     - name: grains.setval
     - arg:
       - node_removal_in_progress
@@ -114,7 +114,6 @@
       - saltutil.refresh_pillar
       - saltutil.refresh_grains
       - mine.update
-      - saltutil.sync_all
     - require:
       - update-config
       - assign-removal-grain
@@ -122,6 +121,16 @@
       - assign-{{ role }}-role-to-replacement
   {%- endfor %}
 
+update-modules:
+  salt.function:
+    - tgt: '{{ all_responsive_nodes_tgt }}'
+    - tgt_type: compound
+    - name: saltutil.sync_all
+    - kwarg:
+        refresh: True
+    - require:
+      - sync-all
+
 {##############################
  # replacement setup
  #############################}
@@ -133,11 +142,11 @@
     - tgt: '{{ replacement }}'
     - highstate: True
     - require:
-      - sync-all
+      - update-modules
 
 kubelet-setup:
   salt.state:
-    - tgt: {{ replacement }}
+    - tgt: '{{ replacement }}'
     - sls:
       - kubelet.configure-taints
       - kubelet.configure-labels
@@ -178,7 +187,7 @@
 
 stop-services-in-target:
   salt.state:
-    - tgt: {{ target }}
+    - tgt: '{{ target }}'
     - sls:
       - container-feeder.stop
   {%- if target in masters %}
@@ -193,15 +202,15 @@
       - etcd.stop
   {%- endif %}
     - require:
-      - sync-all
-    {%- if replacement %}
+      - update-modules
+  {%- if replacement %}
       - remove-addition-grain
-    {%- endif %}
+  {%- endif %}
 
 # remove any other configuration in the machines
 cleanups-in-target-before-rebooting:
   salt.state:
-    - tgt: {{ target }}
+    - tgt: '{{ target }}'
     - sls:
   {%- if target in masters %}
       - kube-apiserver.remove-pre-reboot
@@ -223,7 +232,7 @@
 # shutdown the node
 shutdown-target:
   salt.function:
-    - tgt: {{ target }}
+    - tgt: '{{ target }}'
     - name: cmd.run
     - arg:
       - sleep 15; systemctl poweroff
@@ -243,14 +252,9 @@
     - sls:
       - cleanup.remove-post-orchestration
     - require:
-      - sync-all
       - shutdown-target
-    {%- if replacement %}
-      - remove-addition-grain
-    {%- endif %}
 
-# remove the Salt key
-# (it will appear as "unaccepted")
+# remove the Salt key and the mine for the target
 remove-target-salt-key:
   salt.wheel:
     - name: key.reject
@@ -277,12 +281,14 @@
 # the etcd server we have just removed (but they would
 # keep working fine as long as we had >1 etcd servers)
 
-{%- set affected_tgt = salt.caasp_nodes.get_expr_affected_by(target,
-                                                             
excluded=[replacement] + nodes_down,
-                                                             masters=masters,
-                                                             minions=minions,
-                                                             
etcd_members=etcd_members) %}
-{%- do salt.caasp_log.debug('will high-state machines affected by removal: 
"%s"', affected_tgt) %}
+{%- set affected_expr = salt.caasp_nodes.get_expr_affected_by(target,
+                                                              
excluded=[replacement] + nodes_down,
+                                                              masters=masters,
+                                                              minions=minions,
+                                                              
etcd_members=etcd_members) %}
+
+{%- if affected_expr %}
+  {%- do salt.caasp_log.debug('will high-state machines affected by removal: 
%s', affected_expr) %}
 
 # make sure the cluster has up-to-date state
 sync-after-removal:
@@ -295,19 +301,32 @@
     - require:
       - remove-target-mine
 
+update-modules-after-removal:
+  salt.function:
+    - tgt: '{{ all_responsive_nodes_tgt }}'
+    - tgt_type: compound
+    - name: saltutil.sync_all
+    - kwarg:
+        refresh: True
+    - require:
+      - sync-after-removal
+
 highstate-affected:
   salt.state:
-    - tgt: '{{ affected_tgt }}'
+    - tgt: '{{ affected_expr }}'
     - tgt_type: compound
     - highstate: True
     - batch: 1
     - require:
-      - sync-after-removal
+      - update-modules-after-removal
+
+{%- endif %} {# affected_expr #}
 
 # remove the we-are-removing-some-node grain in the cluster
 remove-cluster-wide-removal-grain:
   salt.function:
-    - tgt: 'P@roles:(kube-master|kube-minion|etcd)'
+    - tgt: '{{ all_responsive_nodes_tgt }}'
+    - tgt_type: compound
     - name: grains.delval
     - arg:
       - removal_in_progress


Reply via email to