Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-06-27 10:20:35
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Wed Jun 27 10:20:35 2018 rev:28 rq:618553 version:4.0.0+git_r853_e2b520b

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-06-20 15:35:02.454055999 +0200
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-06-27 10:20:37.537778059 +0200
@@ -1,0 +2,29 @@
+Fri Jun 22 14:01:41 UTC 2018 - [email protected]
+
+- Commit 8a746bc by Flavio Castelli [email protected]
+ Do not install recommends
+ 
+ Instruct salt to not install recommended packages.
+ 
+ feature#do-not-install-recommends
+ 
+ Signed-off-by: Flavio Castelli <[email protected]>
+
+
+-------------------------------------------------------------------
+Thu Jun 21 09:48:51 UTC 2018 - [email protected]
+
+- Commit 97d8178 by Rafael Fernández López [email protected]
+ Call to `mine.update` after `saltutil.sync_pillar` has been called.
+ 
+ During an upgrade we want to call to `mine.update` after
+ `saltutil.sync_pillar` has been called, because the `mine_functions` reside
+ on the pillar, we first want to make sure to sync that, and update the mine
+ afterwards. Otherwise, we risk doing this in a race condition when the salt
+ minion starts, and it could or could not lead to update orchestration
+ failure.
+ 
+ Fixes: bsc#1097478
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.BCZR4U/_old  2018-06-27 10:20:38.145755952 +0200
+++ /var/tmp/diff_new_pack.BCZR4U/_new  2018-06-27 10:20:38.149755807 +0200
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        4.0.0+git_r847_002982b
+Version:        4.0.0+git_r853_e2b520b
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/crio/init.sls 
new/salt-master/salt/crio/init.sls
--- old/salt-master/salt/crio/init.sls  2018-06-19 17:12:12.000000000 +0200
+++ new/salt-master/salt/crio/init.sls  2018-06-22 16:03:13.000000000 +0200
@@ -5,6 +5,7 @@
 crio:
   pkg.installed:
     - name: cri-o
+    - install_recommends: False
   file.managed:
     - name: /etc/crio/crio.conf
     - source: salt://crio/crio.conf.jinja
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/crypto/init.sls 
new/salt-master/salt/crypto/init.sls
--- old/salt-master/salt/crypto/init.sls        2018-06-19 17:12:12.000000000 
+0200
+++ new/salt-master/salt/crypto/init.sls        2018-06-22 16:03:13.000000000 
+0200
@@ -1,8 +1,9 @@
 python-M2Crypto:
-  pkg.installed
+  pkg.installed:
+    - install_recommends: False
 
 /etc/pki:
   file.directory:
     - user: root
     - group: root
-    - mode: 755
\ No newline at end of file
+    - mode: 755
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd/init.sls 
new/salt-master/salt/etcd/init.sls
--- old/salt-master/salt/etcd/init.sls  2018-06-19 17:12:12.000000000 +0200
+++ new/salt-master/salt/etcd/init.sls  2018-06-22 16:03:13.000000000 +0200
@@ -10,6 +10,7 @@
 add-etcd-to-cluster:
   pkg.installed:
     - name: etcdctl
+    - install_recommends: False
   caasp_etcd.member_add:
     - retry:
         interval: 4
@@ -39,6 +40,7 @@
       - iptables
       - etcdctl
       - etcd
+    - install_recommends: False
   caasp_retriable.retry:
     - name: iptables-etcd
     - target: iptables.append
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-apiserver/init.sls 
new/salt-master/salt/kube-apiserver/init.sls
--- old/salt-master/salt/kube-apiserver/init.sls        2018-06-19 
17:12:12.000000000 +0200
+++ new/salt-master/salt/kube-apiserver/init.sls        2018-06-22 
16:03:13.000000000 +0200
@@ -17,6 +17,7 @@
     - pkgs:
       - iptables
       - kubernetes-master
+    - install_recommends: False
   caasp_retriable.retry:
     - name: iptables-kube-apiserver
     - target: iptables.append
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-controller-manager/init.sls 
new/salt-master/salt/kube-controller-manager/init.sls
--- old/salt-master/salt/kube-controller-manager/init.sls       2018-06-19 
17:12:12.000000000 +0200
+++ new/salt-master/salt/kube-controller-manager/init.sls       2018-06-22 
16:03:13.000000000 +0200
@@ -6,6 +6,7 @@
   pkg.installed:
     - pkgs:
       - kubernetes-master
+    - install_recommends: False
   file.managed:
     - name:       /etc/kubernetes/controller-manager
     - source:     salt://kube-controller-manager/controller-manager.jinja
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-proxy/init.sls 
new/salt-master/salt/kube-proxy/init.sls
--- old/salt-master/salt/kube-proxy/init.sls    2018-06-19 17:12:12.000000000 
+0200
+++ new/salt-master/salt/kube-proxy/init.sls    2018-06-22 16:03:13.000000000 
+0200
@@ -25,6 +25,7 @@
       - iptables
       - conntrack-tools
       - kubernetes-node
+    - install_recommends: False
   file.managed:
     - name: /etc/kubernetes/proxy
     - source: salt://kube-proxy/proxy.jinja
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-scheduler/init.sls 
new/salt-master/salt/kube-scheduler/init.sls
--- old/salt-master/salt/kube-scheduler/init.sls        2018-06-19 
17:12:12.000000000 +0200
+++ new/salt-master/salt/kube-scheduler/init.sls        2018-06-22 
16:03:13.000000000 +0200
@@ -6,6 +6,7 @@
   pkg.installed:
     - pkgs:
       - kubernetes-master
+    - install_recommends: False
   file.managed:
     - name: /etc/kubernetes/scheduler
     - source: salt://kube-scheduler/scheduler.jinja
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/init.sls 
new/salt-master/salt/kubelet/init.sls
--- old/salt-master/salt/kubelet/init.sls       2018-06-19 17:12:12.000000000 
+0200
+++ new/salt-master/salt/kubelet/init.sls       2018-06-22 16:03:13.000000000 
+0200
@@ -59,6 +59,7 @@
       - iptables
       - kubernetes-client
       - kubernetes-node
+    - install_recommends: False
   file.managed:
     - name:     /etc/kubernetes/kubelet
     - source:   salt://kubelet/kubelet.jinja
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/update-pre-orchestration.sh 
new/salt-master/salt/kubelet/update-pre-orchestration.sh
--- old/salt-master/salt/kubelet/update-pre-orchestration.sh    2018-06-19 
17:12:12.000000000 +0200
+++ new/salt-master/salt/kubelet/update-pre-orchestration.sh    2018-06-22 
16:03:13.000000000 +0200
@@ -7,6 +7,7 @@
 
 OLD_NODE_NAME="$1"
 NEW_NODE_NAME="$2"
+ROLE="$3"
 
 ##########################################################
 
@@ -67,6 +68,14 @@
   podCIDR: $(get_node_data .spec.podCIDR)
 EOF
 
+if [[ "$ROLE" == "master" ]]; then
+  cat << EOF >> /tmp/k8s-node-migration.yaml
+  taints:
+  - effect: NoSchedule
+    key: node-role.kubernetes.io/master
+EOF
+fi
+
 kubectl --request-timeout=1m create -f /tmp/k8s-node-migration.yaml 2>/dev/null
 
 rm /tmp/k8s-node-migration.yaml
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/kubelet/update-pre-orchestration.sls 
new/salt-master/salt/kubelet/update-pre-orchestration.sls
--- old/salt-master/salt/kubelet/update-pre-orchestration.sls   2018-06-19 
17:12:12.000000000 +0200
+++ new/salt-master/salt/kubelet/update-pre-orchestration.sls   2018-06-22 
16:03:13.000000000 +0200
@@ -11,7 +11,11 @@
     - source: salt://kubelet/update-pre-orchestration.sh
     - mode: 0755
   cmd.run:
-    - name: /tmp/kubelet-update-pre-orchestration.sh {{ grains['machine_id'] + 
"." + pillar['internal_infra_domain'] }} {{ grains['nodename'] }}
+{% if "kube-master" in salt['grains.get']('roles', []) %}
+    - name: /tmp/kubelet-update-pre-orchestration.sh {{ grains['machine_id'] + 
"." + pillar['internal_infra_domain'] }} {{ grains['nodename'] }} master
+{% else %}
+    - name: /tmp/kubelet-update-pre-orchestration.sh {{ grains['machine_id'] + 
"." + pillar['internal_infra_domain'] }} {{ grains['nodename'] }} worker
+{% endif %}
     - stateful: True
     - env:
       - KUBECONFIG: {{ pillar['paths']['kubeconfig'] }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubernetes-common/init.sls 
new/salt-master/salt/kubernetes-common/init.sls
--- old/salt-master/salt/kubernetes-common/init.sls     2018-06-19 
17:12:12.000000000 +0200
+++ new/salt-master/salt/kubernetes-common/init.sls     2018-06-22 
16:03:13.000000000 +0200
@@ -2,6 +2,7 @@
   pkg.installed:
     - pkgs:
       - kubernetes-common
+    - install_recommends: False
 
 /etc/kubernetes/config:
   file.managed:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-06-19 17:12:12.000000000 
+0200
+++ new/salt-master/salt/orch/update.sls        2018-06-22 16:03:13.000000000 
+0200
@@ -57,10 +57,20 @@
     - names:
       - saltutil.refresh_pillar
       - saltutil.refresh_grains
-      - mine.update
     - require:
       - sync-pillar
 
+# This needs to be a separate step from `update-data`, so 
`saltutil.refresh_pillar` has been
+# called before this, discovering new mine functions defined in the pillar, 
before publishing
+# them on the mine.
+update-mine:
+  salt.function:
+    - tgt: '{{ is_responsive_node_tgt }}'
+    - tgt_type: compound
+    - name: mine.update
+    - require:
+      - update-data
+
 update-modules:
   salt.function:
     - name: saltutil.sync_all
@@ -69,7 +79,7 @@
     - kwarg:
         refresh: True
     - require:
-      - update-data
+      - update-mine
 
 # Generate sa key (we should refactor this as part of the ca highstate along 
with its counterpart
 # in orch/kubernetes.sls)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/reboot/init.sls 
new/salt-master/salt/reboot/init.sls
--- old/salt-master/salt/reboot/init.sls        2018-06-19 17:12:12.000000000 
+0200
+++ new/salt-master/salt/reboot/init.sls        2018-06-22 16:03:13.000000000 
+0200
@@ -26,6 +26,7 @@
 set_max_holders_mutex:
   pkg.installed:
     - name: curl
+    - install_recommends: False
   cmd.run:
     - name: curl -L -X PUT {{ curl_args}} {{ reboot_uri 
}}/mutex?prevExist=false -d value="0"
     - onlyif: curl {{ curl_args}} {{ reboot_uri }}/mutex?prevExist=false | 
grep -i "key not found"
@@ -35,6 +36,7 @@
 set_max_holders_data:
   pkg.installed:
     - name: curl
+    - install_recommends: False
   cmd.run:
     - name:
         curl -L -X PUT {{ curl_args}} {{ reboot_uri }}/data?prevExist=false -d 
value='{ "max":"{{ max_holders }}", "holders":[] }'


Reply via email to