Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-02-02 22:22:13
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Fri Feb  2 22:22:13 2018 rev:2 rq:571950 version:3.0.0+git_r549_76bcd68

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-02-01 21:29:22.677103692 +0100
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-02-02 22:22:14.461990881 +0100
@@ -1,0 +2,36 @@
+Fri Feb  2 09:41:56 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 2eb40f1 by Jordi Massaguer Pla jmassaguer...@suse.de
+ replace sle12 for tumbleweed if the package is building in tumbleweed
+
+
+-------------------------------------------------------------------
+Fri Feb  2 09:16:38 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 37e99c4 by Alvaro Saurin alvaro.sau...@gmail.com
+ Use the same code convention for ids in the orchestration as all the other
+ ids. Cleanup some files when updating CNI.
+
+
+-------------------------------------------------------------------
+Thu Feb  1 15:53:55 UTC 2018 - containers-bugow...@suse.de
+
+- Commit cf53150 by Kiall Mac Innes ki...@macinnes.ie
+ No longer use machine-id's as node names
+ 
+ With CaaSP 3.0, we're introducing a requirement for machines to have
+ valid+unique hostnames in order to allow for the K8S CPIs to function
+ correctly.
+ 
+ This means our generated hostname is no longer needed, as our environment
+ requirements force operators to provision servers with unique hostnames.
+
+
+-------------------------------------------------------------------
+Thu Feb  1 13:06:16 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 4ba7007 by Kiall Mac Innes ki...@macinnes.ie
+ Update dex binary name to caasp-dex
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.GqdEai/_old  2018-02-02 22:22:15.177957455 +0100
+++ /var/tmp/diff_new_pack.GqdEai/_new  2018-02-02 22:22:15.177957455 +0100
@@ -15,11 +15,24 @@
 # Please submit bugfixes or comments via http://bugs.opensuse.org/
 #
 
+
+%if 0%{?suse_version} == 1315 && !0%{?is_opensuse}
+  %define _base_image sles12
+%endif
+
+%if 0%{?suse_version} == 1500 && !0%{?is_opensuse}
+  %define _base_image sles15
+%endif
+
+%if 0%{?is_opensuse} && 0%{?suse_version} > 1500
+  %define _base_image tumbleweed
+%endif
+
 %{!?tmpfiles_create:%global tmpfiles_create systemd-tmpfiles --create}
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        3.0.0+git_r542_d522c0a
+Version:        3.0.0+git_r549_76bcd68
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management
@@ -28,8 +41,8 @@
 Url:            https://github.com/kubic-project/salt
 Source:         master.tar.gz
 BuildRequires:  systemd-rpm-macros
-Requires:       salt
 Requires:       python-m2crypto
+Requires:       salt
 
 %description
 Salt scripts for deploying a Kubernetes cluster
@@ -44,6 +57,21 @@
 mkdir -p %{buildroot}%{_datadir}/salt/kubernetes
 cp -R %{_builddir}/%{gitrepo}-master/*  
%{buildroot}%{_datadir}/salt/kubernetes/
 
+# fix image name
+dir_name=%{buildroot}/%{_datadir}/salt/kubernetes
+files=$(grep "image:[ ]*sles12" $dir_name -r | cut -d: -f1 | uniq)
+files="$files $(grep "image:[ ]*'sles12" $dir_name -r | cut -d: -f1 | uniq)"
+
+for file in $files;do
+    echo "DEBUG: Replacing sles12 by %{_base_image} in $file"
+    if [ ! -f $file ];then
+        echo "ERROR: File not found $file"
+        exit -1
+    fi
+    sed -e "s%image:[ ]*sles12/\(.*\):%image: %{_base_image}/\1:%g" -i $file
+    sed -e "s%image:[ ]*'sles12/\(.*\):%image: '%{_base_image}/\1:%g" -i $file
+done
+
 %files
 %defattr(-,root,root)
 %dir %{_datadir}/salt

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/packaging/suse/make_spec.sh 
new/salt-master/packaging/suse/make_spec.sh
--- old/salt-master/packaging/suse/make_spec.sh 2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/packaging/suse/make_spec.sh 2018-02-02 10:42:57.000000000 
+0100
@@ -38,6 +38,18 @@
 # Please submit bugfixes or comments via http://bugs.opensuse.org/
 #
 
+%if 0%{?suse_version} == 1315 && !0%{?is_opensuse}
+  %define _base_image sles12
+%endif
+
+%if 0%{?suse_version} == 1500 && !0%{?is_opensuse}
+  %define _base_image sles15
+%endif
+
+%if 0%{?is_opensuse} && 0%{?suse_version} > 1500
+  %define _base_image tumbleweed
+%endif
+
 %{!?tmpfiles_create:%global tmpfiles_create systemd-tmpfiles --create}
 
 Name:           $NAME
@@ -67,6 +79,21 @@
 mkdir -p %{buildroot}%{_datadir}/salt/kubernetes
 cp -R %{_builddir}/%{gitrepo}-${SAFE_BRANCH}/*  
%{buildroot}%{_datadir}/salt/kubernetes/
 
+# fix image name
+dir_name=%{buildroot}/%{_datadir}/salt/kubernetes
+files=\$(grep "image:[ ]*sles12" \$dir_name -r | cut -d: -f1 | uniq)
+files="\$files \$(grep "image:[ ]*'sles12" \$dir_name -r | cut -d: -f1 | uniq)"
+
+for file in \$files;do
+    echo "DEBUG: Replacing sles12 by %{_base_image} in \$file"
+    if [ ! -f \$file ];then
+        echo "ERROR: File not found \$file"
+        exit -1
+    fi
+    sed -e "s%image:[ ]*sles12/\(.*\):%image: %{_base_image}/\1:%g" -i \$file
+    sed -e "s%image:[ ]*'sles12/\(.*\):%image: '%{_base_image}/\1:%g" -i \$file
+done
+
 %files
 %defattr(-,root,root)
 %dir %{_datadir}/salt
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/pillar/fqdn.sls 
new/salt-master/pillar/fqdn.sls
--- old/salt-master/pillar/fqdn.sls     2018-01-31 14:11:12.000000000 +0100
+++ new/salt-master/pillar/fqdn.sls     1970-01-01 01:00:00.000000000 +0100
@@ -1,7 +0,0 @@
-mine_functions:
-  fqdn:
-    - mine_function: grains.get
-    - fqdn
-  caasp_fqdn:
-    - mine_function: grains.get
-    - caasp_fqdn
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/pillar/mine.sls 
new/salt-master/pillar/mine.sls
--- old/salt-master/pillar/mine.sls     2018-01-31 14:11:12.000000000 +0100
+++ new/salt-master/pillar/mine.sls     2018-02-02 10:42:57.000000000 +0100
@@ -2,4 +2,9 @@
   network.ip_addrs: []
   network.interfaces: []
   network.default_route: []
-
+  nodename:
+    - mine_function: grains.get
+    - nodename
+  host:
+    - mine_function: grains.get
+    - host
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/pillar/top.sls 
new/salt-master/pillar/top.sls
--- old/salt-master/pillar/top.sls      2018-01-31 14:11:12.000000000 +0100
+++ new/salt-master/pillar/top.sls      2018-02-02 10:42:57.000000000 +0100
@@ -8,7 +8,6 @@
     - mine
     - docker
     - registries
-    - fqdn
     - schedule
   'roles:ca':
     - match: grain
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_macros/certs.jinja 
new/salt-master/salt/_macros/certs.jinja
--- old/salt-master/salt/_macros/certs.jinja    2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/_macros/certs.jinja    2018-02-02 10:42:57.000000000 
+0100
@@ -1,7 +1,10 @@
 
 {% macro alt_names(lst=[]) -%}
   {#- add all the names and IPs we know about -#}
-  {%- set altNames = ["DNS: " + grains['caasp_fqdn'] ] -%}
+  {%- set altNames = [
+    "DNS: " + grains['nodename'], "DNS: " + grains['nodename'] + "." + 
pillar['internal_infra_domain'],
+    "DNS: " + grains['machine_id'], "DNS: " + grains['machine_id'] + "." + 
pillar['internal_infra_domain']
+  ] -%}
   {#- append all the names/IPs provided (if not empty) -#}
   {%- for name in lst -%}
     {%- if name and name|length > 0 -%}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_net.py 
new/salt-master/salt/_modules/caasp_net.py
--- old/salt-master/salt/_modules/caasp_net.py  2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/_modules/caasp_net.py  2018-02-02 10:42:57.000000000 
+0100
@@ -59,3 +59,9 @@
     for host in _get_mine(compound, 'network.interfaces', 
expr_form='compound').keys():
         res.append(get_primary_ip(host=host, **kwargs))
     return res
+
+
+def get_nodename(**kwargs):
+    host = kwargs.pop('host', _get_local_id())
+    
+    return _get_mine(host, 'nodename')[host]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/cert/init.sls 
new/salt-master/salt/cert/init.sls
--- old/salt-master/salt/cert/init.sls  2018-01-31 14:11:12.000000000 +0100
+++ new/salt-master/salt/cert/init.sls  2018-02-02 10:42:57.000000000 +0100
@@ -3,7 +3,7 @@
 
 {% from '_macros/certs.jinja' import certs with context %}
 
-{{ certs("node:" + grains['caasp_fqdn'],
+{{ certs("node:" + grains['nodename'],
          pillar['ssl']['crt_file'],
          pillar['ssl']['key_file'],
          o = pillar['certificate_information']['subject_properties']['O']) }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/cni/update-pre-orchestration.sh 
new/salt-master/salt/cni/update-pre-orchestration.sh
--- old/salt-master/salt/cni/update-pre-orchestration.sh        2018-01-31 
14:11:12.000000000 +0100
+++ new/salt-master/salt/cni/update-pre-orchestration.sh        2018-02-02 
10:42:57.000000000 +0100
@@ -18,7 +18,7 @@
 }
 
 get_node_cidr() {
-       kubectl get no "$NODE_ID" --template="{{.spec.podCIDR}}"
+       kubectl get node "$NODE_ID" --template="{{.spec.podCIDR}}"
 }
 
 patch_node() {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/cni/update-pre-orchestration.sls 
new/salt-master/salt/cni/update-pre-orchestration.sls
--- old/salt-master/salt/cni/update-pre-orchestration.sls       2018-01-31 
14:11:12.000000000 +0100
+++ new/salt-master/salt/cni/update-pre-orchestration.sls       2018-02-02 
10:42:57.000000000 +0100
@@ -6,12 +6,12 @@
  - kubectl-config
 
 # try to save the flannel subnet in the .spec.podCIDR (if not assigned yet)
-/tmp/update-pre-orchestration.sh:
+/tmp/cni-update-pre-orchestration.sh:
   file.managed:
     - source: salt://cni/update-pre-orchestration.sh
     - mode: 0755
   cmd.run:
-    - name: /tmp/update-pre-orchestration.sh {{ grains['caasp_fqdn'] }} {{ 
salt.caasp_net.get_primary_ip() }} {{ salt.caasp_pillar.get('flannel:backend', 
'vxlan') }}
+    - name: /tmp/cni-update-pre-orchestration.sh {{ grains['machine_id'] + "." 
+ pillar['internal_infra_domain'] }} {{ salt.caasp_net.get_primary_ip() }} {{ 
salt.caasp_pillar.get('flannel:backend', 'vxlan') }}
     - stateful: True
     - env:
       - KUBECONFIG: {{ pillar['paths']['kubeconfig'] }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/cni/update-pre-reboot.sls 
new/salt-master/salt/cni/update-pre-reboot.sls
--- old/salt-master/salt/cni/update-pre-reboot.sls      2018-01-31 
14:11:12.000000000 +0100
+++ new/salt-master/salt/cni/update-pre-reboot.sls      2018-02-02 
10:42:57.000000000 +0100
@@ -6,3 +6,11 @@
   # make sure that the service is disabled
   service.disabled:
     - name: flanneld
+
+remove-flannel-unit:
+  file.absent:
+    - name: /usr/lib/systemd/system/docker.service.d/flannel.conf
+
+remove-flannel-subnets:
+  file.absent:
+    - name: /var/run/flannel
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/dex/dex.yaml 
new/salt-master/salt/dex/dex.yaml
--- old/salt-master/salt/dex/dex.yaml   2018-01-31 14:11:12.000000000 +0100
+++ new/salt-master/salt/dex/dex.yaml   2018-02-02 10:42:57.000000000 +0100
@@ -101,7 +101,7 @@
       containers:
       - image: sles12/caasp-dex:2.7.1
         name: dex
-        command: ["/usr/bin/dex", "serve", "/etc/dex/cfg/config.yaml"]
+        command: ["/usr/bin/caasp-dex", "serve", "/etc/dex/cfg/config.yaml"]
 
         ports:
         - name: https
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etc-hosts/hosts.jinja 
new/salt-master/salt/etc-hosts/hosts.jinja
--- old/salt-master/salt/etc-hosts/hosts.jinja  2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/etc-hosts/hosts.jinja  2018-02-02 10:42:57.000000000 
+0100
@@ -12,17 +12,17 @@
 ### admin nodes ###
 {%- set admins = salt['mine.get']('roles:admin', 'network.interfaces', 
'grain') %}
 {%- for admin_id, ifaces in admins.items() %}
-{{ salt.caasp_net.get_primary_ip(host=admin_id, ifaces=ifaces) }} {{ admin_id 
}} {{ admin_id }}.{{ pillar['internal_infra_domain'] }}
+{{ salt.caasp_net.get_primary_ip(host=admin_id, ifaces=ifaces) }} {{ 
salt.caasp_net.get_nodename(host=admin_id) }} {{ 
salt.caasp_net.get_nodename(host=admin_id) }}.{{ 
pillar['internal_infra_domain'] }} {{ admin_id }} {{ admin_id }}.{{ 
pillar['internal_infra_domain'] }}
 {%- endfor %}
 
 ### kubernetes masters ###
 {%- set masters = salt['mine.get']('roles:kube-master', 'network.interfaces', 
'grain') %}
 {%- for master_id, ifaces in masters.items() %}
-{{ salt.caasp_net.get_primary_ip(host=master_id, ifaces=ifaces) }} {{ 
master_id }} {{ master_id }}.{{ pillar['internal_infra_domain'] }}
+{{ salt.caasp_net.get_primary_ip(host=master_id, ifaces=ifaces) }} {{ 
salt.caasp_net.get_nodename(host=master_id) }} {{ 
salt.caasp_net.get_nodename(host=master_id) }}.{{ 
pillar['internal_infra_domain'] }} {{ master_id }} {{ master_id }}.{{ 
pillar['internal_infra_domain'] }}
 {%- endfor %}
 
 ### kubernetes workers ###
 {%- set minions = salt['mine.get']('roles:kube-minion', 'network.interfaces', 
'grain') %}
 {%- for minion_id, ifaces in minions.items() %}
-{{ salt.caasp_net.get_primary_ip(host=minion_id, ifaces=ifaces) }} {{ 
minion_id }} {{ minion_id }}.{{ pillar['internal_infra_domain'] }}
+{{ salt.caasp_net.get_primary_ip(host=minion_id, ifaces=ifaces) }} {{ 
salt.caasp_net.get_nodename(host=minion_id) }} {{ 
salt.caasp_net.get_nodename(host=minion_id) }}.{{ 
pillar['internal_infra_domain'] }} {{ minion_id }} {{ minion_id }}.{{ 
pillar['internal_infra_domain'] }}
 {%- endfor %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd/etcd.conf.jinja 
new/salt-master/salt/etcd/etcd.conf.jinja
--- old/salt-master/salt/etcd/etcd.conf.jinja   2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/etcd/etcd.conf.jinja   2018-02-02 10:42:57.000000000 
+0100
@@ -6,7 +6,7 @@
 
 ETCD_LISTEN_CLIENT_URLS="https://0.0.0.0:2379";
 ETCD_LISTEN_PEER_URLS="https://0.0.0.0:2380";
-ETCD_ADVERTISE_CLIENT_URLS="https://{{ grains['caasp_fqdn'] }}:2379"
+ETCD_ADVERTISE_CLIENT_URLS="https://{{ grains['nodename'] }}:2379"
 ETCD_CLIENT_CERT_AUTH="true"
 
 ETCD_CA_FILE={{ pillar['ssl']['ca_file'] }}
@@ -26,7 +26,7 @@
 ETCD_DISCOVERY_FALLBACK="proxy"
 
 ETCD_INITIAL_CLUSTER_TOKEN="{{ pillar['etcd']['token'] }}"
-ETCD_INITIAL_ADVERTISE_PEER_URLS="https://{{ grains['caasp_fqdn'] }}:2380"
+ETCD_INITIAL_ADVERTISE_PEER_URLS="https://{{ grains['nodename'] }}:2380"
 ETCD_INITIAL_CLUSTER_STATE="new"
 
 # set log level
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd/etcdctl.conf.jinja 
new/salt-master/salt/etcd/etcdctl.conf.jinja
--- old/salt-master/salt/etcd/etcdctl.conf.jinja        2018-01-31 
14:11:12.000000000 +0100
+++ new/salt-master/salt/etcd/etcdctl.conf.jinja        2018-02-02 
10:42:57.000000000 +0100
@@ -6,7 +6,7 @@
 # etcdctl:
 # set -a; source /etc/sysconfig/etcdctl; set +a
 
-ETCDCTL_ENDPOINT="https://{{ grains['caasp_fqdn'] }}:2379"
+ETCDCTL_ENDPOINT="https://{{ grains['nodename'] }}:2379"
 
 # etcd v2 style flags
 ETCDCTL_CA_FILE={{ pillar['ssl']['ca_file'] }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd/init.sls 
new/salt-master/salt/etcd/init.sls
--- old/salt-master/salt/etcd/init.sls  2018-01-31 14:11:12.000000000 +0100
+++ new/salt-master/salt/etcd/init.sls  2018-02-02 10:42:57.000000000 +0100
@@ -58,7 +58,7 @@
         etcdctl --key-file {{ pillar['ssl']['key_file'] }} \
                 --cert-file {{ pillar['ssl']['crt_file'] }} \
                 --ca-file {{ pillar['ssl']['ca_file'] }} \
-                --endpoints https://{{ grains['caasp_fqdn'] }}:2379 \
+                --endpoints https://{{ grains['nodename'] }}:2379 \
                 cluster-health | grep "cluster is healthy"
     - retry:
         attempts: 10
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/haproxy/haproxy.cfg.jinja 
new/salt-master/salt/haproxy/haproxy.cfg.jinja
--- old/salt-master/salt/haproxy/haproxy.cfg.jinja      2018-01-31 
14:11:12.000000000 +0100
+++ new/salt-master/salt/haproxy/haproxy.cfg.jinja      2018-02-02 
10:42:57.000000000 +0100
@@ -25,8 +25,8 @@
         default-server inter 10s fall 3
         balance roundrobin
 
-{%- for minion_id, _ in salt['mine.get']('roles:kube-master', 
'network.interfaces', 'grain').items() %}
-        server master-{{ minion_id }} {{ minion_id }}.{{ 
pillar['internal_infra_domain'] }}:{{ pillar['api']['int_ssl_port'] }} check
+{%- for minion_id, nodename in salt['mine.get']('roles:kube-master', 
'nodename', 'grain').items() %}
+        server master-{{ minion_id }} {{ nodename }}:{{ 
pillar['api']['int_ssl_port'] }} check
 {% endfor -%}
 
 {%- if "admin" in salt['grains.get']('roles', []) %}
@@ -37,8 +37,8 @@
         default-server inter 10s fall 3
         balance roundrobin
 
-{%- for minion_id, _ in salt['mine.get']('roles:kube-master', 
'network.interfaces', 'grain').items() %}
-        server master-{{ minion_id }} {{ minion_id }}.{{ 
pillar['internal_infra_domain'] }}:32000 check
+{%- for minion_id, nodename in salt['mine.get']('roles:kube-master', 
'nodename', 'grain').items() %}
+        server master-{{ minion_id }} {{ nodename }}:32000 check
 {% endfor %}
 
 listen velum
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/hostname/init.sls 
new/salt-master/salt/hostname/init.sls
--- old/salt-master/salt/hostname/init.sls      2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/hostname/init.sls      1970-01-01 01:00:00.000000000 
+0100
@@ -1,3 +0,0 @@
-caasp_fqdn:
-  grains.present:
-    - value: {{ grains['id'] }}.{{ pillar['internal_infra_domain'] }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-apiserver/apiserver.jinja 
new/salt-master/salt/kube-apiserver/apiserver.jinja
--- old/salt-master/salt/kube-apiserver/apiserver.jinja 2018-01-31 
14:11:12.000000000 +0100
+++ new/salt-master/salt/kube-apiserver/apiserver.jinja 2018-02-02 
10:42:57.000000000 +0100
@@ -16,7 +16,7 @@
 KUBE_ETCD_SERVERS="--etcd-cafile={{ pillar['ssl']['ca_file'] }} \
                    --etcd-certfile={{ pillar['ssl']['kube_apiserver_crt'] }} \
                    --etcd-keyfile={{ pillar['ssl']['kube_apiserver_key'] }} \
-                   --etcd-servers=https://{{ grains['caasp_fqdn'] }}:2379"
+                   --etcd-servers=https://{{ grains['nodename'] }}:2379"
 
 # Address range to use for services
 # [alvaro] should not be in the same range as the flannel network 
(https://github.com/coreos/flannel/issues/232)
@@ -27,7 +27,7 @@
 
 # Add your own!
 KUBE_API_ARGS="--advertise-address={{ salt.caasp_net.get_primary_ip() }} \
-               --apiserver-count={{ salt['mine.get']('roles:kube-master', 
'caasp_fqdn', expr_form='grain').values()|length }} \
+               --apiserver-count={{ salt['mine.get']('roles:kube-master', 
'nodename', expr_form='grain').values()|length }} \
 {%- if cloud_provider %}
                --cloud-provider={{ pillar['cloud']['provider'] }} \
   {%- if cloud_provider == 'openstack' %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-apiserver/init.sls 
new/salt-master/salt/kube-apiserver/init.sls
--- old/salt-master/salt/kube-apiserver/init.sls        2018-01-31 
14:11:12.000000000 +0100
+++ new/salt-master/salt/kube-apiserver/init.sls        2018-02-02 
10:42:57.000000000 +0100
@@ -10,7 +10,7 @@
 {{ certs("kube-apiserver",
          pillar['ssl']['kube_apiserver_crt'],
          pillar['ssl']['kube_apiserver_key'],
-         cn = grains['caasp_fqdn'],
+         cn = grains['nodename'],
          o = pillar['certificate_information']['subject_properties']['O']) }}
 
 kube-apiserver:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-proxy/proxy.jinja 
new/salt-master/salt/kube-proxy/proxy.jinja
--- old/salt-master/salt/kube-proxy/proxy.jinja 2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/kube-proxy/proxy.jinja 2018-02-02 10:42:57.000000000 
+0100
@@ -6,7 +6,7 @@
 # Add your own!
 KUBE_PROXY_ARGS="\
     --cluster-cidr={{ pillar['cluster_cidr'] }} \
-    --hostname-override={{ grains['caasp_fqdn'] }} \
+    --hostname-override={{ grains['nodename'] }} \
     --kubeconfig={{ pillar['paths']['kube_proxy_config'] }} \
     --proxy-mode=iptables \
     {{ pillar['components']['proxy']['args'] }} \
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/init.sls 
new/salt-master/salt/kubelet/init.sls
--- old/salt-master/salt/kubelet/init.sls       2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/kubelet/init.sls       2018-02-02 10:42:57.000000000 
+0100
@@ -19,7 +19,7 @@
 {% endif %}
 
 {% from '_macros/certs.jinja' import certs with context %}
-{{ certs('node:' + grains['caasp_fqdn'],
+{{ certs('node:' + grains['nodename'],
          pillar['ssl']['kubelet_crt'],
          pillar['ssl']['kubelet_key'],
          o = 'system:nodes') }}
@@ -101,12 +101,12 @@
 {% if not "kube-master" in salt['grains.get']('roles', []) and 
salt['grains.get']('kubelet:should_uncordon', false) %}
   caasp_cmd.run:
     - name: |
-        kubectl uncordon {{ grains['caasp_fqdn'] }}
+        kubectl uncordon {{ grains['nodename'] }}
     - retry:
         attempts: 10
         interval: 3
         until: |
-          test "$(kubectl --kubeconfig={{ pillar['paths']['kubeconfig'] }} get 
nodes {{ grains['caasp_fqdn'] }} -o=jsonpath='{.spec.unschedulable}' 
2>/dev/null)" != "true"
+          test "$(kubectl --kubeconfig={{ pillar['paths']['kubeconfig'] }} get 
nodes {{ grains['nodename'] }} -o=jsonpath='{.spec.unschedulable}' 
2>/dev/null)" != "true"
     - require:
       - file: {{ pillar['paths']['kubeconfig'] }}
 {% endif %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/kubelet.jinja 
new/salt-master/salt/kubelet/kubelet.jinja
--- old/salt-master/salt/kubelet/kubelet.jinja  2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/kubelet/kubelet.jinja  2018-02-02 10:42:57.000000000 
+0100
@@ -12,8 +12,8 @@
 # The port for the info server to serve on
 KUBELET_PORT="--port={{ pillar['kubelet']['port'] }}"
 
-# Use <machine_id>.<internal_infra_domain> matching the SSL certificates
-KUBELET_HOSTNAME="--hostname-override={{ grains['caasp_fqdn'] }}"
+# Ensure we match the machine hostname
+KUBELET_HOSTNAME="--hostname-override={{ grains['nodename'] }}"
 
 # Add your own!
 KUBELET_ARGS="\
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/stop.sls 
new/salt-master/salt/kubelet/stop.sls
--- old/salt-master/salt/kubelet/stop.sls       2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/kubelet/stop.sls       2018-02-02 10:42:57.000000000 
+0100
@@ -4,13 +4,13 @@
 include:
   - kubectl-config
 
-{% set should_uncordon = salt['cmd.run']("kubectl --kubeconfig=" + 
pillar['paths']['kubeconfig'] + " get nodes " + grains['caasp_fqdn'] + " 
-o=jsonpath='{.spec.unschedulable}' 2>/dev/null") != "true" %}
+{% set should_uncordon = salt['cmd.run']("kubectl --kubeconfig=" + 
pillar['paths']['kubeconfig'] + " get nodes " + grains['nodename'] + " 
-o=jsonpath='{.spec.unschedulable}' 2>/dev/null") != "true" %}
 
 # If this fails we should ignore it and proceed anyway as Kubernetes will 
recover
 drain-kubelet:
   cmd.run:
     - name: |
-        kubectl --kubeconfig={{ pillar['paths']['kubeconfig'] }} drain {{ 
grains['caasp_fqdn'] }} --ignore-daemonsets --grace-period=300 --timeout=340s
+        kubectl --kubeconfig={{ pillar['paths']['kubeconfig'] }} drain {{ 
grains['nodename'] }} --ignore-daemonsets --grace-period=300 --timeout=340s
     - check_cmd:
       - /bin/true
     - require:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/kubelet/update-post-start-services.sls 
new/salt-master/salt/kubelet/update-post-start-services.sls
--- old/salt-master/salt/kubelet/update-post-start-services.sls 1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/kubelet/update-post-start-services.sls 2018-02-02 
10:42:57.000000000 +0100
@@ -0,0 +1,10 @@
+# invoked by the "update" orchestration after starting
+# all the services after rebooting
+
+remove-old-node-entry:
+  cmd.run:
+    - name: kubectl delete node {{ grains['machine_id'] + "." + 
pillar['internal_infra_domain'] }}
+    - check_cmd:
+      - /bin/true
+    - onlyif:
+      - kubectl get node {{ grains['machine_id'] + "." + 
pillar['internal_infra_domain'] }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/update-pre-orchestration.sh 
new/salt-master/salt/kubelet/update-pre-orchestration.sh
--- old/salt-master/salt/kubelet/update-pre-orchestration.sh    1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/kubelet/update-pre-orchestration.sh    2018-02-02 
10:42:57.000000000 +0100
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# Preseeds a node in Kubernetes with critical data migrated from
+# an old node.
+
+OLD_NODE_NAME="$1"
+NEW_NODE_NAME="$2"
+
+##########################################################
+
+log() { echo "[machine-id migration]: $1 " ; logger -t "machine-id-migration" 
"$1" ; }
+
+exit_changes() {
+       log "$2"
+       echo  # an empty line here so the next line will be the last.
+       echo "changed=$1 comment='"$2"'"
+       exit 0
+}
+
+get_node_data() {
+       local template="$1"
+       kubectl get node "$OLD_NODE_NAME" --template="{{$template}}"
+}
+
+##########################################################
+
+log "migrating $OLD_NODE_NAME to $NEW_NODE_NAME"
+
+kubectl get node $OLD_NODE_NAME || exit_changes "no" "$OLD_NODE_NAME does not 
exist, nothing to migrate"
+
+cat << EOF > /tmp/k8s-node-migration.yaml
+apiVersion: v1
+kind: Node
+metadata:
+  name: ${NEW_NODE_NAME}
+  annotations:
+    flannel.alpha.coreos.com/backend-data: '$(get_node_data 'index 
.metadata.annotations "flannel.alpha.coreos.com/backend-data"')'
+    flannel.alpha.coreos.com/backend-type: '$(get_node_data 'index 
.metadata.annotations "flannel.alpha.coreos.com/backend-type"')'
+    flannel.alpha.coreos.com/public-ip: $(get_node_data 'index 
.metadata.annotations "flannel.alpha.coreos.com/public-ip"')
+    flannel.alpha.coreos.com/kube-subnet-manager: "true"
+spec:
+  externalID: ${NEW_NODE_NAME}
+  podCIDR: $(get_node_data .spec.podCIDR)
+EOF
+
+kubectl create -f /tmp/k8s-node-migration.yaml 2>/dev/null
+
+rm /tmp/k8s-node-migration.yaml
+
+exit_changes "yes" "Node data migrated from $OLD_NODE_NAME to $NEW_NODE_NAME"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/kubelet/update-pre-orchestration.sls 
new/salt-master/salt/kubelet/update-pre-orchestration.sls
--- old/salt-master/salt/kubelet/update-pre-orchestration.sls   1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/kubelet/update-pre-orchestration.sls   2018-02-02 
10:42:57.000000000 +0100
@@ -0,0 +1,19 @@
+# invoked by the "update" orchestration right
+# before starting the real orchestration updating
+# and rebooting machines
+
+include:
+ - kubectl-config
+
+# Migrates critical data from the old K8S node, to a new one with updated names
+/tmp/kubelet-update-pre-orchestration.sh:
+  file.managed:
+    - source: salt://kubelet/update-pre-orchestration.sh
+    - mode: 0755
+  cmd.run:
+    - name: /tmp/kubelet-update-pre-orchestration.sh {{ grains['machine_id'] + 
"." + pillar['internal_infra_domain'] }} {{ grains['nodename'] }}
+    - stateful: True
+    - env:
+      - KUBECONFIG: {{ pillar['paths']['kubeconfig'] }}
+    - require:
+      - {{ pillar['paths']['kubeconfig'] }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/ldap/init.sls 
new/salt-master/salt/ldap/init.sls
--- old/salt-master/salt/ldap/init.sls  2018-01-31 14:11:12.000000000 +0100
+++ new/salt-master/salt/ldap/init.sls  2018-02-02 10:42:57.000000000 +0100
@@ -5,10 +5,10 @@
 {% set names = [salt.caasp_pillar.get('dashboard')] %}
 
 {% from '_macros/certs.jinja' import alt_names, certs with context %}
-{{ certs("ldap:" + grains['caasp_fqdn'],
+{{ certs("ldap:" + grains['nodename'],
          pillar['ssl']['ldap_crt'],
          pillar['ssl']['ldap_key'],
-         cn = grains['caasp_fqdn'],
+         cn = grains['nodename'],
          extra_alt_names = alt_names(names)) }}
 
 openldap_restart:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/motd/motd.jinja 
new/salt-master/salt/motd/motd.jinja
--- old/salt-master/salt/motd/motd.jinja        2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/motd/motd.jinja        2018-02-02 10:42:57.000000000 
+0100
@@ -1,7 +1,7 @@
 Welcome!
 
 Machine ID: {{ salt['grains.get']('machine_id', 'ERROR') }}
-Internal FQDN: {{ salt['grains.get']('caasp_fqdn', 'ERROR') }}
+Hostname: {{ salt['grains.get']('nodename', 'ERROR') }}
 
 The roles of this node are:
 {%- for role in salt['grains.get']('roles', []) %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/kubernetes.sls 
new/salt-master/salt/orch/kubernetes.sls
--- old/salt-master/salt/orch/kubernetes.sls    2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/orch/kubernetes.sls    2018-02-02 10:42:57.000000000 
+0100
@@ -6,7 +6,7 @@
 {%- set num_etcd_masters = salt.caasp_etcd.get_cluster_size() %}
 
 # Ensure the node is marked as bootstrapping
-set_bootstrap_in_progress_flag:
+set-bootstrap-in-progress-flag:
   salt.function:
     - tgt: '*'
     - name: grains.setval
@@ -14,99 +14,92 @@
       - bootstrap_in_progress
       - true
 
-sync_pillar:
+sync-pillar:
   salt.runner:
     - name: saltutil.sync_pillar
 
-disable_rebootmgr:
+disable-rebootmgr:
   salt.state:
     - tgt: 'roles:(admin|kube-(master|minion))'
     - tgt_type: grain_pcre
     - sls:
       - rebootmgr
     - require:
-      - salt: set_bootstrap_in_progress_flag
+      - set-bootstrap-in-progress-flag
 
-hostname_setup:
-  salt.state:
-    - tgt: 'roles:(admin|kube-(master|minion))'
-    - tgt_type: grain_pcre
-    - sls:
-      - hostname
-
-update_pillar:
+update-pillar:
   salt.function:
     - tgt: '*'
     - name: saltutil.refresh_pillar
     - require:
-      - salt: hostname_setup
+      - disable-rebootmgr
 
-update_grains:
+update-grains:
   salt.function:
     - tgt: '*'
     - name: saltutil.refresh_grains
     - require:
-      - salt: hostname_setup
+      - disable-rebootmgr
 
-update_mine:
+update-mine:
   salt.function:
     - tgt: '*'
     - name: mine.update
     - require:
-      - salt: update_pillar
-      - salt: update_grains
+      - update-pillar
+      - update-grains
 
-update_modules:
+update-modules:
   salt.function:
     - tgt: '*'
     - name: saltutil.sync_all
     - kwarg:
         refresh: True
 
-etc_hosts_setup:
+etc-hosts-setup:
   salt.state:
     - tgt: 'roles:(admin|kube-(master|minion))'
     - tgt_type: grain_pcre
     - sls:
       - etc-hosts
     - require:
-      - salt: update_mine
+      - update-mine
 
-ca_setup:
+ca-setup:
   salt.state:
     - tgt: 'roles:ca'
     - tgt_type: grain
     - highstate: True
     - require:
-      - salt: etc_hosts_setup
-      - salt: update_mine
+      - etc-hosts-setup
+      - update-mine
 
-generate_sa_key:
+generate-sa-key:
   salt.state:
     - tgt: 'roles:ca'
     - tgt_type: grain
     - sls:
       - kubernetes-common.generate-serviceaccount-key
     - require:
-      - salt: ca_setup
+      - ca-setup
 
-update_mine_again:
+update-mine-again:
   salt.function:
     - tgt: '*'
     - name: mine.update
     - require:
-      - salt: generate_sa_key
+      - generate-sa-key
 
-etcd_discovery_setup:
+etcd-discovery-setup:
   salt.state:
     - tgt: {{ super_master }}
     - sls:
       - etcd-discovery
     - require:
-      - salt: update_modules
+      - update-modules
 
 # setup {{ num_etcd_masters }} etcd masters
-etcd_setup:
+etcd-setup:
   salt.state:
     - tgt: 'roles:kube-(master|minion)'
     - tgt_type: grain_pcre
@@ -114,57 +107,57 @@
       - etcd
     - batch: {{ num_etcd_masters }}
     - require:
-      - salt: etcd_discovery_setup
+      - etcd-discovery-setup
 
-admin_setup:
+admin-setup:
   salt.state:
     - tgt: 'roles:admin'
     - tgt_type: grain
     - highstate: True
     - batch: {{ default_batch }}
     - require:
-      - salt: etcd_setup
+      - etcd-setup
 
-kube_master_setup:
+kube-master-setup:
   salt.state:
     - tgt: 'roles:kube-master'
     - tgt_type: grain
     - highstate: True
     - batch: {{ default_batch }}
     - require:
-      - salt: admin_setup
-      - salt: generate_sa_key
-      - salt: update_mine_again
+      - admin-setup
+      - generate-sa-key
+      - update-mine-again
 
-kube_minion_setup:
+kube-minion-setup:
   salt.state:
     - tgt: 'roles:kube-minion'
     - tgt_type: grain
     - highstate: True
     - batch: {{ default_batch }}
     - require:
-      - salt: kube_master_setup
+      - kube-master-setup
 
 # we must start CNI right after the masters/minions reach highstate,
 # as nodes will be NotReady until the CNI DaemonSet is loaded and running...
-cni_setup:
+cni-setup:
   salt.state:
     - tgt: {{ super_master }}
     - sls:
       - cni
     - require:
-      - salt: kube_master_setup
-      - salt: kube_minion_setup
+      - kube-master-setup
+      - kube-minion-setup
 
-reboot_setup:
+reboot-setup:
   salt.state:
     - tgt: {{ super_master }}
     - sls:
       - reboot
     - require:
-      - salt: cni_setup
+      - cni-setup
 
-services_setup:
+services-setup:
   salt.state:
     - tgt: {{ super_master }}
     - sls:
@@ -173,11 +166,11 @@
       - addons.tiller
       - dex
     - require:
-      - salt: reboot_setup
+      - reboot-setup
 
 # This flag indicates at least one bootstrap has completed at some
 # point in time on this node.
-set_bootstrap_complete_flag:
+set-bootstrap-complete-flag:
   salt.function:
     - tgt: '*'
     - name: grains.setval
@@ -185,10 +178,10 @@
       - bootstrap_complete
       - true
     - require:
-      - salt: services_setup
+      - services-setup
 
 # Ensure the node is marked as finished bootstrapping
-clear_bootstrap_in_progress_flag:
+clear-bootstrap-in-progress-flag:
   salt.function:
     - tgt: '*'
     - name: grains.setval
@@ -196,4 +189,4 @@
       - bootstrap_in_progress
       - false
     - require:
-      - salt: set_bootstrap_complete_flag
+      - set-bootstrap-complete-flag
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update-etc-hosts.sls 
new/salt-master/salt/orch/update-etc-hosts.sls
--- old/salt-master/salt/orch/update-etc-hosts.sls      2018-01-31 
14:11:12.000000000 +0100
+++ new/salt-master/salt/orch/update-etc-hosts.sls      2018-02-02 
10:42:57.000000000 +0100
@@ -1,6 +1,6 @@
 {%- set updates_all_target = 'P@roles:(admin|kube-(master|minion)) and 
G@bootstrap_complete:true and not G@bootstrap_in_progress:true and not 
G@update_in_progress:true' %}
 
-{%- if salt.saltutil.runner('mine.get', tgt=updates_all_target, 
fun='caasp_fqdn', tgt_type='compound')|length > 0 %}
+{%- if salt.saltutil.runner('mine.get', tgt=updates_all_target, 
fun='nodename', tgt_type='compound')|length > 0 %}
 update_pillar:
   salt.function:
     - tgt: {{ updates_all_target }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-01-31 14:11:12.000000000 
+0100
+++ new/salt-master/salt/orch/update.sls        2018-02-02 10:42:57.000000000 
+0100
@@ -11,7 +11,7 @@
 
 # Generate sa key (we should refactor this as part of the ca highstate along 
with its counterpart
 # in orch/kubernetes.sls)
-generate_sa_key:
+generate-sa-key:
   salt.state:
     - tgt: 'roles:ca'
     - tgt_type: grain
@@ -19,38 +19,38 @@
       - kubernetes-common.generate-serviceaccount-key
 
 # Generic Updates
-sync_pillar:
+sync-pillar:
   salt.runner:
     - name: saltutil.sync_pillar
 
-update_pillar:
+update-pillar:
   salt.function:
     - tgt: '*'
     - name: saltutil.refresh_pillar
     - require:
-      - salt: generate_sa_key
+      - generate-sa-key
 
-update_grains:
+update-grains:
   salt.function:
     - tgt: '*'
     - name: saltutil.refresh_grains
 
-update_mine:
+update-mine:
   salt.function:
     - tgt: '*'
     - name: mine.update
     - require:
-       - salt: update_pillar
-       - salt: update_grains
+       - update-pillar
+       - update-grains
 
-update_modules:
+update-modules:
   salt.function:
     - name: saltutil.sync_modules
     - tgt: '*'
     - kwarg:
         refresh: True
     - require:
-      - salt: update_mine
+      - update-mine
 
 # Perform any migrations necessary before starting the update orchestration. 
All services and
 # machines should be running and we can migrate some data on the whole cluster 
and then proceed
@@ -62,8 +62,9 @@
     - batch: 3
     - sls:
       - cni.update-pre-orchestration
+      - kubelet.update-pre-orchestration
     - require:
-      - salt: update_modules
+      - update-modules
 
 # Get list of masters needing reboot
 {%- set masters = salt.saltutil.runner('mine.get', tgt='G@roles:kube-master 
and G@tx_update_reboot_needed:true', fun='network.interfaces', 
tgt_type='compound') %}
@@ -87,7 +88,7 @@
     - sls:
       - cni.update-pre-reboot
     - require:
-      - salt: {{ master_id }}-clean-shutdown
+      - {{ master_id }}-clean-shutdown
 
 # Reboot the node
 {{ master_id }}-reboot:
@@ -99,7 +100,7 @@
     - kwarg:
         bg: True
     - require:
-      - salt: {{ master_id }}-pre-reboot
+      - {{ master_id }}-pre-reboot
 
 # Wait for it to start again
 {{ master_id }}-wait-for-start:
@@ -109,7 +110,7 @@
     - id_list:
       - {{ master_id }}
     - require:
-      - salt: {{ master_id }}-reboot
+      - {{ master_id }}-reboot
 
 # Start services
 {{ master_id }}-start-services:
@@ -117,7 +118,7 @@
     - tgt: {{ master_id }}
     - highstate: True
     - require:
-      - salt: {{ master_id }}-wait-for-start
+      - {{ master_id }}-wait-for-start
 
 # Perform any migratrions after services are started
 {{ master_id }}-post-start-services:
@@ -125,18 +126,20 @@
     - tgt: {{ master_id }}
     - sls:
       - cni.update-post-start-services
+      - kubelet.update-post-start-services
     - require:
-      - salt: {{ master_id }}-start-services
+      - {{ master_id }}-start-services
 
 {{ master_id }}-reboot-needed-grain:
   salt.function:
     - tgt: {{ master_id }}
-    - name: grains.setval
+    - name: grains.delval
     - arg:
       - tx_update_reboot_needed
-      - false
+    - kwarg:
+        destructive: True
     - require:
-      - salt: {{ master_id }}-post-start-services
+      - {{ master_id }}-post-start-services
 
 {% endfor %}
 
@@ -157,7 +160,7 @@
     - require:
       # wait until all the masters have been updated
 {%- for master_id in masters.keys() %}
-      - salt: {{ master_id }}-reboot-needed-grain
+      - {{ master_id }}-reboot-needed-grain
 {%- endfor %}
 {% endif %}
 
@@ -168,7 +171,7 @@
     - sls:
       - cni.update-pre-reboot
     - require:
-      - salt: {{ worker_id }}-clean-shutdown
+      - {{ worker_id }}-clean-shutdown
 
 # Reboot the node
 {{ worker_id }}-reboot:
@@ -180,7 +183,7 @@
     - kwarg:
         bg: True
     - require:
-      - salt: {{ worker_id }}-pre-reboot
+      - {{ worker_id }}-pre-reboot
 
 # Wait for it to start again
 {{ worker_id }}-wait-for-start:
@@ -190,7 +193,7 @@
     - id_list:
       - {{ worker_id }}
     - require:
-      - salt: {{ worker_id }}-reboot
+      - {{ worker_id }}-reboot
 
 # Start services
 {{ worker_id }}-start-services:
@@ -206,18 +209,20 @@
     - tgt: {{ worker_id }}
     - sls:
       - cni.update-post-start-services
+      - kubelet.update-post-start-services
     - require:
-      - salt: {{ worker_id }}-start-services
+      - {{ worker_id }}-start-services
 
 {{ worker_id }}-update-reboot-needed-grain:
   salt.function:
     - tgt: {{ worker_id }}
-    - name: grains.setval
+    - name: grains.delval
     - arg:
       - tx_update_reboot_needed
-      - false
+    - kwarg:
+        destructive: True
     - require:
-      - salt: {{ worker_id }}-update-post-start-services
+      - {{ worker_id }}-update-post-start-services
 
 # Ensure the node is marked as finished upgrading
 {{ worker_id }}-remove-update-grain:
@@ -229,7 +234,7 @@
     - kwarg:
         destructive: True
     - require:
-      - salt: {{ worker_id }}-update-reboot-needed-grain
+      - {{ worker_id }}-update-reboot-needed-grain
 
 {% endfor %}
 
@@ -244,7 +249,7 @@
 
 # we must start CNI right after the masters/minions reach highstate,
 # as nodes will be NotReady until the CNI DaemonSet is loaded and running...
-cni_setup:
+cni-setup:
   salt.state:
     - tgt: {{ super_master }}
     - sls:
@@ -253,13 +258,13 @@
     - require:
 # wait until all the machines in the cluster have been upgraded
 {%- for worker_id in workers.keys() %}
-      - salt: {{ worker_id }}-remove-update-grain
+      - {{ worker_id }}-remove-update-grain
 {%- endfor %}
 {% endif %}
 
 # (re-)apply all the manifests
 # this will perform a rolling-update for existing daemonsets
-services_setup:
+services-setup:
   salt.state:
     - tgt: {{ super_master }}
     - sls:
@@ -268,7 +273,19 @@
       - addons.tiller
       - dex
     - require:
-      - cni_setup
+      - cni-setup
+
+# Remove the now defuct caasp_fqdn grain (Remove for 4.0).
+remove-caasp-fqdn-grain:
+  salt.function:
+    - tgt: '*'
+    - name: grains.delval
+    - arg:
+      - caasp_fqdn
+    - kwarg:
+        destructive: True
+    - require:
+      - services-setup
 
 masters-remove-update-grain:
   salt.function:
@@ -280,4 +297,4 @@
     - kwarg:
         destructive: True
     - require:
-      - salt: services_setup
+      - remove-caasp-fqdn-grain
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/top.sls new/salt-master/salt/top.sls
--- old/salt-master/salt/top.sls        2018-01-31 14:11:12.000000000 +0100
+++ new/salt-master/salt/top.sls        2018-02-02 10:42:57.000000000 +0100
@@ -4,7 +4,6 @@
     - ca
   'roles:(admin|kube-(master|minion))':
     - match: grain_pcre
-    - hostname
     - swap
     - etc-hosts
     - proxy
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/velum/init.sls 
new/salt-master/salt/velum/init.sls
--- old/salt-master/salt/velum/init.sls 2018-01-31 14:11:12.000000000 +0100
+++ new/salt-master/salt/velum/init.sls 2018-02-02 10:42:57.000000000 +0100
@@ -7,8 +7,8 @@
                 salt.caasp_pillar.get('dashboard')] %}
 
 {% from '_macros/certs.jinja' import alt_names, certs with context %}
-{{ certs("velum:" + grains['caasp_fqdn'],
+{{ certs("velum:" + grains['nodename'],
          pillar['ssl']['velum_crt'],
          pillar['ssl']['velum_key'],
-         cn = grains['caasp_fqdn'],
+         cn = grains['nodename'],
          extra_alt_names = alt_names(names)) }}


Reply via email to