Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-03-22 12:08:12
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Thu Mar 22 12:08:12 2018 rev:7 rq:589836 version:3.0.0+git_r657_c294782

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-03-19 23:38:22.813349004 +0100
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-03-22 12:12:13.138400408 +0100
@@ -1,0 +2,46 @@
+Wed Mar 21 17:10:41 UTC 2018 - [email protected]
+
+- Commit 0926982 by Kiall Mac Innes [email protected]
+ Add flannel readiness/liveness probe
+ 
+ This makes sure flannel has at least reached the point where it starts the
+ healthz API endpoint. However, that point in the flannel code is *very* early
+ and not all that useful for actual health checking. Additionally, as long as
+ the HTTP gorouting is running, healthz will *always* respond with a 200. It
+ performs no actual health checking.
+ 
+ Even still, lets include the probe. If flannel gets better health checking,
+ it will be enabled for us, on the other hand, if flannel doesn't get better
+ health checking, it's still *very slightly* useful to know that flannel has
+ at least reached this point in it's code.
+
+
+-------------------------------------------------------------------
+Wed Mar 21 17:06:31 UTC 2018 - [email protected]
+
+- Commit 4259116 by Rafael Fernández López [email protected]
+ Wait for dex on the admin node before calling the orchestration done
+ 
+ When we finish the orchestration all bits and pieces should be working as
+ expected. Wait for the haproxy on the admin node to be correctly pointing to
+ dex before finishing the orchestration.
+
+
+-------------------------------------------------------------------
+Wed Mar 21 08:43:52 UTC 2018 - [email protected]
+
+- Commit 113a807 by Rafael Fernández López [email protected]
+ If no replacement provided do not ask for nonexistent states.
+ 
+ If no replacement is provided, `sync-all` was trying to refer to states that
+ didn't exist because those states also were wrapped with a `replacement`
+ guard.
+ 
+ Commit f6d8787 by Rafael Fernández López [email protected]
+ Always set `replacement_provided` variable
+ 
+ Salt was complaining that this variable didn't exist in the `orch.removal`
+ orchestration when removing a master when no replacement was provided.
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.gkwDUO/_old  2018-03-22 12:12:14.214361845 +0100
+++ /var/tmp/diff_new_pack.gkwDUO/_new  2018-03-22 12:12:14.222361559 +0100
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        3.0.0+git_r650_e65e789
+Version:        3.0.0+git_r657_c294782
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/pillar/cni.sls 
new/salt-master/pillar/cni.sls
--- old/salt-master/pillar/cni.sls      2018-03-16 18:05:32.000000000 +0100
+++ new/salt-master/pillar/cni.sls      2018-03-21 18:10:02.000000000 +0100
@@ -3,6 +3,7 @@
   image:          'sles12/flannel:0.9.1'
   backend:        'vxlan'
   port:           '8472'    # UDP port to use for sending encapsulated 
packets. Defaults to kernel default, currently 8472.
+  healthz_port:   '8471'    # TCP port used for flannel healthchecks
 # log level for flanneld service
 # 0 - Generally useful for this to ALWAYS be visible to an operator.
 # 1 - A reasonable default log level if you don't want verbosity.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/dex/init.sls 
new/salt-master/salt/addons/dex/init.sls
--- old/salt-master/salt/addons/dex/init.sls    2018-03-16 18:05:32.000000000 
+0100
+++ new/salt-master/salt/addons/dex/init.sls    2018-03-21 18:10:02.000000000 
+0100
@@ -40,18 +40,3 @@
 {{ kubectl("remove-old-dex-clusterrolebinding",
            "delete clusterrolebinding system:dex",
            onlyif="kubectl get clusterrolebinding system:dex") }}
-
-ensure_dex_running:
-  # Wait until the Dex API is actually up and running
-  http.wait_for_successful_query:
-    {% set dex_api_server = "api." + pillar['internal_infra_domain']  -%}
-    {% set dex_api_server_ext = pillar['api']['server']['external_fqdn'] -%}
-    {% set dex_api_port = pillar['dex']['node_port'] -%}
-    - name:       {{ 'https://' + dex_api_server + ':' + dex_api_port 
}}/.well-known/openid-configuration
-    - wait_for:   300
-    - ca_bundle:  {{ pillar['ssl']['ca_file'] }}
-    - status:     200
-    - header_dict:
-        Host: {{ dex_api_server_ext + ':' + dex_api_port }}
-    - watch:
-      - /etc/kubernetes/addons/dex/
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/dex/wait.sls 
new/salt-master/salt/addons/dex/wait.sls
--- old/salt-master/salt/addons/dex/wait.sls    1970-01-01 01:00:00.000000000 
+0100
+++ new/salt-master/salt/addons/dex/wait.sls    2018-03-21 18:10:02.000000000 
+0100
@@ -0,0 +1,12 @@
+ensure-dex-running:
+  # Wait until the Dex API is actually up and running
+  http.wait_for_successful_query:
+    {% set dex_api_server = "api." + pillar['internal_infra_domain']  -%}
+    {% set dex_api_server_ext = pillar['api']['server']['external_fqdn'] -%}
+    {% set dex_api_port = pillar['dex']['node_port'] -%}
+    - name:       {{ 'https://' + dex_api_server + ':' + dex_api_port 
}}/.well-known/openid-configuration
+    - wait_for:   300
+    - ca_bundle:  {{ pillar['ssl']['ca_file'] }}
+    - status:     200
+    - header_dict:
+        Host: {{ dex_api_server_ext + ':' + dex_api_port }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/cni/kube-flannel.yaml.jinja 
new/salt-master/salt/cni/kube-flannel.yaml.jinja
--- old/salt-master/salt/cni/kube-flannel.yaml.jinja    2018-03-16 
18:05:32.000000000 +0100
+++ new/salt-master/salt/cni/kube-flannel.yaml.jinja    2018-03-21 
18:10:02.000000000 +0100
@@ -103,8 +103,21 @@
           - "--kube-subnet-mgr"
           - "--v={{ pillar['flannel']['log_level'] }}"
           - "--iface=$(POD_IP)"
+          - "--healthz-ip=$(POD_IP)"
+          - "--healthz-port={{ pillar['flannel']['healthz_port'] }}"
         securityContext:
           privileged: true
+        ports:
+        - name: healthz
+          containerPort: {{ pillar['flannel']['healthz_port'] }}
+        readinessProbe:
+          httpGet:
+            path: /healthz
+            port: healthz
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: healthz
         env:
         - name: POD_IP
           valueFrom:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/haproxy/haproxy.cfg.jinja 
new/salt-master/salt/haproxy/haproxy.cfg.jinja
--- old/salt-master/salt/haproxy/haproxy.cfg.jinja      2018-03-16 
18:05:32.000000000 +0100
+++ new/salt-master/salt/haproxy/haproxy.cfg.jinja      2018-03-21 
18:10:02.000000000 +0100
@@ -33,14 +33,15 @@
 {%- if "admin" in salt['grains.get']('roles', []) %}
 # Velum should be able to access Kube API and Dex service as well to get 
kubeconfig
 listen kubernetes-dex
-        bind {{ bind_ip }}:32000
+        bind {{ bind_ip }}:{{ pillar['dex']['node_port'] }}
         mode tcp
         default-server inter 10s fall 2
         balance roundrobin
         option redispatch
+        option httpchk GET /healthz
 
 {%- for minion_id, nodename in salt['mine.get']('roles:kube-master', 
'nodename', 'grain').items() %}
-        server master-{{ nodename }} {{ nodename }}.{{ 
pillar['internal_infra_domain'] }}:32000 check
+        server master-{{ nodename }} {{ nodename }}.{{ 
pillar['internal_infra_domain'] }}:{{ pillar['dex']['node_port'] }} check 
check-ssl port {{ pillar['dex']['node_port'] }} verify none
 {% endfor %}
 
 listen velum
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/kubernetes.sls 
new/salt-master/salt/orch/kubernetes.sls
--- old/salt-master/salt/orch/kubernetes.sls    2018-03-16 18:05:32.000000000 
+0100
+++ new/salt-master/salt/orch/kubernetes.sls    2018-03-21 18:10:02.000000000 
+0100
@@ -210,6 +210,19 @@
     - require:
       - reboot-setup
 
+# Velum will connect to dex through the local haproxy instance in the admin 
node (because the
+# /etc/hosts include the external apiserver pointing to 127.0.0.1). Make sure 
that before calling
+# the orchestration done, we can access dex from the admin node as Velum would 
do.
+admin-wait-for-services:
+  salt.state:
+    - tgt: 'roles:admin'
+    - tgt_type: grain
+    - batch: {{ default_batch }}
+    - sls:
+      - addons.dex.wait
+    - require:
+      - services-setup
+
 # This flag indicates at least one bootstrap has completed at some
 # point in time on this node.
 set-bootstrap-complete-flag:
@@ -220,7 +233,7 @@
       - bootstrap_complete
       - true
     - require:
-      - services-setup
+      - admin-wait-for-services
 
 # Ensure the node is marked as finished bootstrapping
 clear-bootstrap-in-progress-flag:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/removal.sls 
new/salt-master/salt/orch/removal.sls
--- old/salt-master/salt/orch/removal.sls       2018-03-16 18:05:32.000000000 
+0100
+++ new/salt-master/salt/orch/removal.sls       2018-03-21 18:10:02.000000000 
+0100
@@ -4,9 +4,7 @@
 # ... and we can provide an optional replacement node, and
 # this Salt code will always trust that node as a valid replacement
 {%- set replacement = salt['pillar.get']('replacement', '') %}
-{%- if replacement %}
-  {%- set replacement_provided = True %}
-{%- endif %}
+{%- set replacement_provided = (replacement != '') %}
 {%- set replacement_roles = [] %}
 
 
@@ -51,7 +49,7 @@
     {%- do salt.caasp_log.debug('CaaS: setting %s as the replacement for the 
etcd member %s', replacement, target) %}
     {%- do replacement_roles.append('etcd') %}
   {%- elif etcd_members|length > 1 %}
-    {%- do salt.caasp_log.warn('CaaS: numnber of etcd members will be reduced 
to %d, as no replacement for %s has been found (or provided)', 
etcd_members|length, target) %}
+    {%- do salt.caasp_log.warn('CaaS: number of etcd members will be reduced 
to %d, as no replacement for %s has been found (or provided)', 
etcd_members|length, target) %}
   {%- else %}
     {#- we need at least one etcd server #}
     {%- do salt.caasp_log.abort('CaaS: cannot remove etcd member %s: too few 
etcd members, and no replacement found or provided', target) %}
@@ -149,12 +147,18 @@
 {%- endif %} {# target in minions #}
 
 {#- other consistency checks... #}
+
 {%- if replacement %}
   {#- consistency check: if there is a replacement, it must have some (new) 
role(s) #}
   {%- if not replacement_roles %}
     {%- do salt.caasp_log.abort('CaaS: %s cannot be removed: too few etcd 
members, and no replacement found', target) %}
   {%- endif %}
-{%- endif %} {# replacement #}
+{%- else %}
+  {#- consistency check: if some of the previous checks set replacement to an 
empty string, clear replacement_roles #}
+  {%- set replacement_roles = [] %}
+{%- endif %}
+
+{#- end other consistency checks... #}
 
 {##############################
  # set grains
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-03-16 18:05:32.000000000 
+0100
+++ new/salt-master/salt/orch/update.sls        2018-03-21 18:10:02.000000000 
+0100
@@ -375,6 +375,19 @@
     - require:
       - cni-setup
 
+# Velum will connect to dex through the local haproxy instance in the admin 
node (because the
+# /etc/hosts include the external apiserver pointing to 127.0.0.1). Make sure 
that before calling
+# the orchestration done, we can access dex from the admin node as Velum would 
do.
+admin-wait-for-services:
+  salt.state:
+    - tgt: 'roles:admin'
+    - tgt_type: grain
+    - batch: 1
+    - sls:
+      - addons.dex.wait
+    - require:
+      - services-setup
+
 # Remove the now defuct caasp_fqdn grain (Remove for 4.0).
 remove-caasp-fqdn-grain:
   salt.function:
@@ -385,7 +398,7 @@
     - kwarg:
         destructive: True
     - require:
-      - services-setup
+      - admin-wait-for-services
 
 masters-remove-update-grain:
   salt.function:


Reply via email to