Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-04-05 15:33:18
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Thu Apr  5 15:33:18 2018 rev:10 rq:593475 version:3.0.0+git_r670_185e941

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-03-28 10:32:53.861773620 +0200
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-04-05 15:33:21.936415765 +0200
@@ -1,0 +2,12 @@
+Tue Apr  3 10:26:04 UTC 2018 - containers-bugow...@suse.de
+
+- Commit c7ee6be by Rafael Fernández López eresli...@ereslibre.es
+ Wait for deployments during the orchestration time.
+ 
+ Additionally to other checks, we should also consider the orchestration done
+ once that the expected pods are running.
+ 
+ feature#deployment-stability
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.4SJSge/_old  2018-04-05 15:33:23.420362132 +0200
+++ /var/tmp/diff_new_pack.4SJSge/_new  2018-04-05 15:33:23.424361987 +0200
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        3.0.0+git_r668_8e45600
+Version:        3.0.0+git_r670_185e941
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_macros/kubectl.jinja 
new/salt-master/salt/_macros/kubectl.jinja
--- old/salt-master/salt/_macros/kubectl.jinja  2018-03-27 12:02:53.000000000 
+0200
+++ new/salt-master/salt/_macros/kubectl.jinja  2018-04-03 12:26:19.000000000 
+0200
@@ -84,3 +84,19 @@
                   watch=["file: " + dest] + kwargs.pop('watch', []),
                   **kwargs) }}
 {%- endmacro %}
+
+#####################################################################
+
+{% macro kubectl_wait_for_deployment(deployment, namespace = 'kube-system', 
timeout = 600) -%}
+wait-for-{{ deployment }}-deployment:
+  caasp_cmd.run:
+    - name: |-
+        desiredReplicas=$(kubectl --kubeconfig={{ 
pillar['paths']['kubeconfig'] }} get deployment {{ deployment }} --namespace={{ 
namespace }} --template {{ '{{.spec.replicas}}' }})
+        readyReplicas=$(kubectl --kubeconfig={{ pillar['paths']['kubeconfig'] 
}} get deployment {{ deployment }} --namespace={{ namespace }} --template {{ 
'{{.status.readyReplicas}}' }})
+        availableReplicas=$(kubectl --kubeconfig={{ 
pillar['paths']['kubeconfig'] }} get deployment {{ deployment }} --namespace={{ 
namespace }} --template {{ '{{.status.availableReplicas}}' }})
+        updatedReplicas=$(kubectl --kubeconfig={{ 
pillar['paths']['kubeconfig'] }} get deployment {{ deployment }} --namespace={{ 
namespace }} --template {{ '{{.status.updatedReplicas}}' }})
+        [ "$readyReplicas" == "$desiredReplicas" ] && [ "$availableReplicas" 
== "$desiredReplicas" ] && [ "$updatedReplicas" == "$desiredReplicas" ]
+    - retry:
+        attempts: {{ timeout }}
+        interval: 1
+{%- endmacro %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/dex/deployment-wait.sls 
new/salt-master/salt/addons/dex/deployment-wait.sls
--- old/salt-master/salt/addons/dex/deployment-wait.sls 1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/dex/deployment-wait.sls 2018-04-03 
12:26:19.000000000 +0200
@@ -0,0 +1,3 @@
+{% from '_macros/kubectl.jinja' import kubectl_wait_for_deployment with 
context %}
+
+{{ kubectl_wait_for_deployment('dex') }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/dns/deployment-wait.sls 
new/salt-master/salt/addons/dns/deployment-wait.sls
--- old/salt-master/salt/addons/dns/deployment-wait.sls 1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/dns/deployment-wait.sls 2018-04-03 
12:26:19.000000000 +0200
@@ -0,0 +1,3 @@
+{% from '_macros/kubectl.jinja' import kubectl_wait_for_deployment with 
context %}
+
+{{ kubectl_wait_for_deployment('kube-dns') }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/tiller/deployment-wait.sls 
new/salt-master/salt/addons/tiller/deployment-wait.sls
--- old/salt-master/salt/addons/tiller/deployment-wait.sls      1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/tiller/deployment-wait.sls      2018-04-03 
12:26:19.000000000 +0200
@@ -0,0 +1,13 @@
+{% if salt.caasp_pillar.get('addons:tiller', False) %}
+
+{% from '_macros/kubectl.jinja' import kubectl_wait_for_deployment with 
context %}
+
+{{ kubectl_wait_for_deployment('tiller-deploy') }}
+
+{% else %}
+
+dummy:
+  cmd.run:
+    - name: echo "Tiller addon not enabled in config"
+
+{% endif %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/kubernetes.sls 
new/salt-master/salt/orch/kubernetes.sls
--- old/salt-master/salt/orch/kubernetes.sls    2018-03-27 12:02:53.000000000 
+0200
+++ new/salt-master/salt/orch/kubernetes.sls    2018-04-03 12:26:19.000000000 
+0200
@@ -210,6 +210,17 @@
     - require:
       - reboot-setup
 
+# Wait for deployments to have the expected number of pods running.
+super-master-wait-for-services:
+  salt.state:
+    - tgt: {{ super_master }}
+    - sls:
+      - addons.dns.deployment-wait
+      - addons.tiller.deployment-wait
+      - addons.dex.deployment-wait
+    - require:
+      - services-setup
+
 # Velum will connect to dex through the local haproxy instance in the admin 
node (because the
 # /etc/hosts include the external apiserver pointing to 127.0.0.1). Make sure 
that before calling
 # the orchestration done, we can access dex from the admin node as Velum would 
do.
@@ -221,7 +232,7 @@
     - sls:
       - addons.dex.wait
     - require:
-      - services-setup
+      - super-master-wait-for-services
 
 # This flag indicates at least one bootstrap has completed at some
 # point in time on this node.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-03-27 12:02:53.000000000 
+0200
+++ new/salt-master/salt/orch/update.sls        2018-04-03 12:26:19.000000000 
+0200
@@ -383,6 +383,17 @@
     - require:
       - cni-setup
 
+# Wait for deployments to have the expected number of pods running.
+super-master-wait-for-services:
+  salt.state:
+    - tgt: {{ super_master }}
+    - sls:
+      - addons.dns.deployment-wait
+      - addons.tiller.deployment-wait
+      - addons.dex.deployment-wait
+    - require:
+      - services-setup
+
 # Velum will connect to dex through the local haproxy instance in the admin 
node (because the
 # /etc/hosts include the external apiserver pointing to 127.0.0.1). Make sure 
that before calling
 # the orchestration done, we can access dex from the admin node as Velum would 
do.
@@ -394,7 +405,7 @@
     - sls:
       - addons.dex.wait
     - require:
-      - services-setup
+      - super-master-wait-for-services
 
 # Remove the now defuct caasp_fqdn grain (Remove for 4.0).
 remove-caasp-fqdn-grain:


Reply via email to