Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-04-16 12:48:46
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Mon Apr 16 12:48:46 2018 rev:13 rq:596327 version:3.0.0+git_r688_ac25f0d

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-04-13 12:50:00.187046065 +0200
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-04-16 12:48:51.829385650 +0200
@@ -1,0 +2,45 @@
+Fri Apr 13 12:11:18 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 0e7d745 by Alvaro Saurin alvaro.sau...@gmail.com
+ Configure taints/labels on the replacement node Fix typo
+ 
+ feature#node_removal
+
+
+-------------------------------------------------------------------
+Fri Apr 13 11:44:45 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 69d271d by Rafael Fernández López eresli...@ereslibre.es
+ Remove unneeded includes `ca-cert` and `cert` for `velum/init.sls` and
+ `ldap/init.sls`
+ 
+ feature#deployment-stability
+
+
+-------------------------------------------------------------------
+Fri Apr 13 11:04:32 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 1de5846 by Kiall Mac Innes ki...@macinnes.ie
+ Add PodSecurityPolicy Support
+ 
+ Add support for PodSecurityPolicy's, allowing us to disable use of the
+ hostPath volume type.
+ 
+ This change adds 2 PSP's:
+ 
+ * unprivileged (Default assigned to all users)
+ 
+ The unprivileged PodSecurityPolicy is intended to be a reasonable compromise
+ between the reality of Kubernetes workloads, and suse:caasp:psp:privileged.
+ By default, we'll grant this PSP to all users and service accounts.
+ 
+ * privileged
+ 
+ The privileged PodSecurityPolicy is intended to be given only to trusted
+ workloads. It provides for as few restrictions as possible and should only be
+ assigned to highly trusted users.
+ 
+ Fixes bsc#1047535
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.d4vtxZ/_old  2018-04-16 12:48:53.741316100 +0200
+++ /var/tmp/diff_new_pack.d4vtxZ/_new  2018-04-16 12:48:53.745315954 +0200
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        3.0.0+git_r682_143b3e6
+Version:        3.0.0+git_r688_ac25f0d
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/pillar/params.sls 
new/salt-master/pillar/params.sls
--- old/salt-master/pillar/params.sls   2018-04-11 09:40:33.000000000 +0200
+++ new/salt-master/pillar/params.sls   2018-04-13 14:11:30.000000000 +0200
@@ -67,6 +67,7 @@
 
 # install the addons (ie, DNS)
 addons:
+  psp:    'true'
   dns:    'true'
   tiller: 'false'
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dex/manifests/10-clusterrolebinding.yaml 
new/salt-master/salt/addons/dex/manifests/10-clusterrolebinding.yaml
--- old/salt-master/salt/addons/dex/manifests/10-clusterrolebinding.yaml        
2018-04-11 09:40:33.000000000 +0200
+++ new/salt-master/salt/addons/dex/manifests/10-clusterrolebinding.yaml        
2018-04-13 14:11:30.000000000 +0200
@@ -26,3 +26,18 @@
 - kind: Group
   name: "{{ pillar['ldap']['admin_group_name'] }}"
   apiGroup: rbac.authorization.k8s.io
+---
+# Allow Dex to use the suse:caasp:psp:privileged
+# PodSecurityPolicy.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: suse:caasp:psp:dex
+roleRef:
+  kind: ClusterRole
+  name: suse:caasp:psp:privileged
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: dex
+  namespace: kube-system
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dex/manifests/20-deployment.yaml 
new/salt-master/salt/addons/dex/manifests/20-deployment.yaml
--- old/salt-master/salt/addons/dex/manifests/20-deployment.yaml        
2018-04-11 09:40:33.000000000 +0200
+++ new/salt-master/salt/addons/dex/manifests/20-deployment.yaml        
2018-04-13 14:11:30.000000000 +0200
@@ -26,6 +26,7 @@
         checksum/secret: {{ 
salt.hashutil.digest_file("/etc/kubernetes/addons/dex/15-secret.yaml", 
"sha256") }}
     spec:
       serviceAccountName: dex
+
       tolerations:
       - key: node-role.kubernetes.io/master
         operator: Exists
@@ -90,9 +91,11 @@
           items:
           - key: config.yaml
             path: config.yaml
+
       - name: tls
         secret:
           secretName: dex-tls
+
       - name: ca
         hostPath:
           path: {{ pillar['ssl']['ca_file'] }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/dns/init.sls 
new/salt-master/salt/addons/dns/init.sls
--- old/salt-master/salt/addons/dns/init.sls    2018-04-11 09:40:33.000000000 
+0200
+++ new/salt-master/salt/addons/dns/init.sls    2018-04-13 14:11:30.000000000 
+0200
@@ -1,4 +1,4 @@
-{% if salt.caasp_pillar.get('addons:dns', False) %}
+{% if salt.caasp_pillar.get('addons:dns', True) %}
 
 include:
   - kube-apiserver
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dns/manifests/10-clusterrolebinding.yaml 
new/salt-master/salt/addons/dns/manifests/10-clusterrolebinding.yaml
--- old/salt-master/salt/addons/dns/manifests/10-clusterrolebinding.yaml        
2018-04-11 09:40:33.000000000 +0200
+++ new/salt-master/salt/addons/dns/manifests/10-clusterrolebinding.yaml        
2018-04-13 14:11:30.000000000 +0200
@@ -11,3 +11,18 @@
 - kind: ServiceAccount
   name: kube-dns
   namespace: kube-system
+---
+# Allow Kube DNS to use the suse:caasp:psp:privileged
+# PodSecurityPolicy.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: suse:caasp:psp:kube-dns
+roleRef:
+  kind: ClusterRole
+  name: suse:caasp:psp:privileged
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: kube-dns
+  namespace: kube-system
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/psp/init.sls 
new/salt-master/salt/addons/psp/init.sls
--- old/salt-master/salt/addons/psp/init.sls    1970-01-01 01:00:00.000000000 
+0100
+++ new/salt-master/salt/addons/psp/init.sls    2018-04-13 14:11:30.000000000 
+0200
@@ -0,0 +1,18 @@
+{% if salt.caasp_pillar.get('addons:psp', True) %}
+
+include:
+  - kube-apiserver
+  - kubectl-config
+
+{% from '_macros/kubectl.jinja' import kubectl, kubectl_apply_template, 
kubectl_apply_dir_template with context %}
+
+{{ kubectl_apply_dir_template("salt://addons/psp/manifests/",
+                              "/etc/kubernetes/addons/psp/") }}
+
+{% else %}
+
+dummy:
+  cmd.run:
+    - name: echo "PSP addon not enabled in config"
+
+{% endif %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/psp/manifests/10-podsecuritypolicy-privileged.yaml 
new/salt-master/salt/addons/psp/manifests/10-podsecuritypolicy-privileged.yaml
--- 
old/salt-master/salt/addons/psp/manifests/10-podsecuritypolicy-privileged.yaml  
    1970-01-01 01:00:00.000000000 +0100
+++ 
new/salt-master/salt/addons/psp/manifests/10-podsecuritypolicy-privileged.yaml  
    2018-04-13 14:11:30.000000000 +0200
@@ -0,0 +1,77 @@
+# The privileged PodSecurityPolicy is intended to be given
+# only to trusted workloads. It provides for as few restrictions as possible
+# and should only be assigned to highly trusted users.
+---
+apiVersion: extensions/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: suse.caasp.psp.privileged
+  annotations:
+    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
+    seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
+spec:
+  # Privileged
+  privileged: true
+  # Volumes and File Systems
+  volumes:
+    # Kubernetes Pseudo Volume Types
+    - configMap
+    - secret
+    - emptyDir
+    - downwardAPI
+    - projected
+    - persistentVolumeClaim
+    # Kubernetes Host Volume Types
+    - hostPath
+    # Networked Storage
+    - nfs
+    - rbd
+    - cephFS
+    - glusterfs
+    - fc
+    - iscsi
+    # Cloud Volumes
+    - gcePersistentDisk
+    - awsElasticBlockStore
+    - azureDisk
+    - azureFile
+    - vsphereVolume
+  allowedFlexVolumes: []
+  #allowedHostPaths: []
+  readOnlyRootFilesystem: false
+  # Users and groups
+  runAsUser:
+    rule: RunAsAny
+  supplementalGroups:
+    rule: RunAsAny
+  fsGroup:
+    rule: RunAsAny
+  # Privilege Escalation
+  allowPrivilegeEscalation: true
+  defaultAllowPrivilegeEscalation: true
+  # Capabilities
+  allowedCapabilities:
+    - '*'
+  defaultAddCapabilities: []
+  requiredDropCapabilities: []
+  # Host namespaces
+  hostPID: true
+  hostIPC: true
+  hostNetwork: true
+  hostPorts:
+  - min: 0
+    max: 65535
+  seLinux:
+    # SELinux is unsed in CaaSP
+    rule: 'RunAsAny'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: suse:caasp:psp:privileged
+rules:
+  - apiGroups: ['extensions']
+    resources: ['podsecuritypolicies']
+    verbs: ['use']
+    resourceNames: ['suse.caasp.psp.privileged']
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/psp/manifests/10-podsecuritypolicy-unprivileged.yaml
 
new/salt-master/salt/addons/psp/manifests/10-podsecuritypolicy-unprivileged.yaml
--- 
old/salt-master/salt/addons/psp/manifests/10-podsecuritypolicy-unprivileged.yaml
    1970-01-01 01:00:00.000000000 +0100
+++ 
new/salt-master/salt/addons/psp/manifests/10-podsecuritypolicy-unprivileged.yaml
    2018-04-13 14:11:30.000000000 +0200
@@ -0,0 +1,82 @@
+# The unprivileged PodSecurityPolicy is intended to be a
+# reasonable compromise between the reality of Kubernetes workloads, and
+# suse:caasp:psp:privileged. By default, we'll grant this PSP to all
+# users and service accounts.
+---
+apiVersion: extensions/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: suse.caasp.psp.unprivileged
+  annotations:
+    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
+    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
+    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
+    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
+spec:
+  # Privileged
+  privileged: false
+  # Volumes and File Systems
+  volumes:
+    # Kubernetes Pseudo Volume Types
+    - configMap
+    - secret
+    - emptyDir
+    - downwardAPI
+    - projected
+    - persistentVolumeClaim
+    # Networked Storage
+    - nfs
+    - rbd
+    - cephFS
+    - glusterfs
+    - fc
+    - iscsi
+    # Cloud Volumes
+    - gcePersistentDisk
+    - awsElasticBlockStore
+    - azureDisk
+    - azureFile
+    - vsphereVolume
+  allowedFlexVolumes: []
+  allowedHostPaths:
+    # Note: We don't allow hostPath volumes above, but set this to a path we
+    # control anyway as a belt+braces protection. /dev/null may be a better
+    # option, but the implications of pointing this towards a device are
+    # unclear.
+    - pathPrefix: /opt/kubernetes-hostpath-volumes
+  readOnlyRootFilesystem: false
+  # Users and groups
+  runAsUser:
+    rule: RunAsAny
+  supplementalGroups:
+    rule: RunAsAny
+  fsGroup:
+    rule: RunAsAny
+  # Privilege Escalation
+  allowPrivilegeEscalation: false
+  defaultAllowPrivilegeEscalation: false
+  # Capabilities
+  allowedCapabilities: []
+  defaultAddCapabilities: []
+  requiredDropCapabilities: []
+  # Host namespaces
+  hostPID: false
+  hostIPC: false
+  hostNetwork: false
+  hostPorts:
+  - min: 0
+    max: 65535
+  # SELinux
+  seLinux:
+    # SELinux is unsed in CaaSP
+    rule: 'RunAsAny'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: suse:caasp:psp:unprivileged
+rules:
+  - apiGroups: ['extensions']
+    resources: ['podsecuritypolicies']
+    verbs: ['use']
+    resourceNames: ['suse.caasp.psp.unprivileged']
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/psp/manifests/20-clusterrolebinding.yaml 
new/salt-master/salt/addons/psp/manifests/20-clusterrolebinding.yaml
--- old/salt-master/salt/addons/psp/manifests/20-clusterrolebinding.yaml        
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/psp/manifests/20-clusterrolebinding.yaml        
2018-04-13 14:11:30.000000000 +0200
@@ -0,0 +1,34 @@
+---
+# Allow all users and serviceaccounts to use the unprivileged
+# PodSecurityPolicy
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: suse:caasp:psp:default
+roleRef:
+  kind: ClusterRole
+  name: suse:caasp:psp:unprivileged
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: Group
+  apiGroup: rbac.authorization.k8s.io
+  name: system:serviceaccounts
+- kind: Group
+  apiGroup: rbac.authorization.k8s.io
+  name: system:authenticated
+
+---
+# Allow CaaSP nodes to use the privileged
+# PodSecurityPolicy.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: suse:caasp:psp:nodes
+roleRef:
+  kind: ClusterRole
+  name: suse:caasp:psp:privileged
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: Group
+  apiGroup: rbac.authorization.k8s.io
+  name: system:nodes
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/cni/kube-flannel-rbac.yaml.jinja 
new/salt-master/salt/cni/kube-flannel-rbac.yaml.jinja
--- old/salt-master/salt/cni/kube-flannel-rbac.yaml.jinja       2018-04-11 
09:40:33.000000000 +0200
+++ new/salt-master/salt/cni/kube-flannel-rbac.yaml.jinja       2018-04-13 
14:11:30.000000000 +0200
@@ -1,11 +1,4 @@
 ---
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: flannel
-  namespace: kube-system
-
----
 kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
@@ -43,4 +36,20 @@
 subjects:
 - kind: ServiceAccount
   name: flannel
+  namespace: kube-system
+
+---
+# Allow Flannel to use the suse:caasp:psp:privileged
+# PodSecurityPolicy.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: suse:caasp:psp:flannel
+roleRef:
+  kind: ClusterRole
+  name: suse:caasp:psp:privileged
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: flannel
   namespace: kube-system
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/cni/kube-flannel.yaml.jinja 
new/salt-master/salt/cni/kube-flannel.yaml.jinja
--- old/salt-master/salt/cni/kube-flannel.yaml.jinja    2018-04-11 
09:40:33.000000000 +0200
+++ new/salt-master/salt/cni/kube-flannel.yaml.jinja    2018-04-13 
14:11:30.000000000 +0200
@@ -4,12 +4,13 @@
 metadata:
   name: flannel
   namespace: kube-system
+
 ---
 kind: ConfigMap
 apiVersion: v1
 metadata:
   name: flannel-plugin-config-map
-  namespace: "kube-system"
+  namespace: kube-system
   labels:
     tier: node
     app: flannel
@@ -52,12 +53,13 @@
         "Type": "{{ salt.caasp_pillar.get('flannel:backend') }}"
       }
     }
+
 ---
 apiVersion: apps/v1beta2
 kind: DaemonSet
 metadata:
   name: kube-flannel
-  namespace: "kube-system"
+  namespace: kube-system
   labels:
     tier: node
     k8s-app: flannel
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-apiserver/apiserver.jinja 
new/salt-master/salt/kube-apiserver/apiserver.jinja
--- old/salt-master/salt/kube-apiserver/apiserver.jinja 2018-04-11 
09:40:33.000000000 +0200
+++ new/salt-master/salt/kube-apiserver/apiserver.jinja 2018-04-13 
14:11:30.000000000 +0200
@@ -23,7 +23,7 @@
 KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ pillar['services_cidr'] 
}}"
 
 # default admission control policies
-KUBE_ADMISSION_CONTROL="--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds"
+KUBE_ADMISSION_CONTROL="--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,PodSecurityPolicy,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds"
 
 # Add your own!
 KUBE_API_ARGS="--advertise-address={{ salt.caasp_net.get_primary_ip() }} \
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/ldap/init.sls 
new/salt-master/salt/ldap/init.sls
--- old/salt-master/salt/ldap/init.sls  2018-04-11 09:40:33.000000000 +0200
+++ new/salt-master/salt/ldap/init.sls  2018-04-13 14:11:30.000000000 +0200
@@ -1,7 +1,3 @@
-include:
-  - ca-cert
-  - cert
-
 {% set names = [salt.caasp_pillar.get('dashboard'), 'ldap.' + 
pillar['internal_infra_domain']] %}
 
 {% from '_macros/certs.jinja' import alt_names, certs with context %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/kubernetes.sls 
new/salt-master/salt/orch/kubernetes.sls
--- old/salt-master/salt/orch/kubernetes.sls    2018-04-11 09:40:33.000000000 
+0200
+++ new/salt-master/salt/orch/kubernetes.sls    2018-04-13 14:11:30.000000000 
+0200
@@ -185,29 +185,23 @@
       - kube-master-setup
       - kube-minion-setup
 
-# we must start CNI right after the masters/minions reach highstate,
-# as nodes will be NotReady until the CNI DaemonSet is loaded and running...
-cni-setup:
-  salt.state:
-    - tgt: {{ super_master }}
-    - sls:
-      - cni
-    - require:
-      - kubelet-setup
-
 reboot-setup:
   salt.state:
     - tgt: {{ super_master }}
     - sls:
       - reboot
     - require:
-      - cni-setup
+      - kubelet-setup
 
+# we must start CNI before any other pods, as nodes will be NotReady until
+# the CNI DaemonSet is loaded and running...
 services-setup:
   salt.state:
     - tgt: {{ super_master }}
     - sls:
       - addons
+      - addons.psp
+      - cni
       - addons.dns
       - addons.tiller
       - addons.dex
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/removal.sls 
new/salt-master/salt/orch/removal.sls
--- old/salt-master/salt/orch/removal.sls       2018-04-11 09:40:33.000000000 
+0200
+++ new/salt-master/salt/orch/removal.sls       2018-04-13 14:11:30.000000000 
+0200
@@ -97,6 +97,15 @@
     - require:
       - sync-all
 
+kubelet-setup:
+  salt.state:
+    - tgt: {{ replacement }}
+    - sls:
+      - kubelet.configure-taints
+      - kubelet.configure-labels
+    - require:
+      - highstate-replacement
+
 set-bootstrap-complete-flag-in-replacement:
   salt.function:
     - tgt: {{ replacement }}
@@ -105,7 +114,7 @@
       - bootstrap_complete
       - true
     - require:
-      - highstate-replacement
+      - kubelet-setup
 
 # remove the we-are-adding-this-node grain
 remove-addition-grain:
@@ -255,6 +264,6 @@
     - kwarg:
         destructive: True
     - require:
-      - highstate-affected-{{ affected_roles|join('-and-') }}
+      - highstate-affected
 
 {% endif %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-04-11 09:40:33.000000000 
+0200
+++ new/salt-master/salt/orch/update.sls        2018-04-13 14:11:30.000000000 
+0200
@@ -382,16 +382,6 @@
 {%- set all_masters = salt.saltutil.runner('mine.get', tgt=is_master_tgt, 
fun='network.interfaces', tgt_type='compound').keys() %}
 {%- set super_master = all_masters|first %}
 
-# we must start CNI right after the masters/minions reach highstate,
-# as nodes will be NotReady until the CNI DaemonSet is loaded and running...
-cni-setup:
-  salt.state:
-    - tgt: '{{ super_master }}'
-    - sls:
-      - cni
-    - require:
-      - kubelet-setup
-
 # (re-)apply all the manifests
 # this will perform a rolling-update for existing daemonsets
 services-setup:
@@ -399,11 +389,13 @@
     - tgt: '{{ super_master }}'
     - sls:
       - addons
+      - addons.psp
+      - cni
       - addons.dns
       - addons.tiller
       - addons.dex
     - require:
-      - cni-setup
+      - kubelet-setup
 
 # Wait for deployments to have the expected number of pods running.
 super-master-wait-for-services:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/top.sls new/salt-master/salt/top.sls
--- old/salt-master/salt/top.sls        2018-04-11 09:40:33.000000000 +0200
+++ new/salt-master/salt/top.sls        2018-04-13 14:11:30.000000000 +0200
@@ -4,6 +4,7 @@
     - ca
   'roles:(admin|kube-master|kube-minion|etcd)':
     - match: grain_pcre
+    - ca-cert
     - swap
     - etc-hosts
     - proxy
@@ -25,7 +26,6 @@
     - kube-scheduler
   'roles:(kube-master|kube-minion|etcd)':
     - match: grain_pcre
-    - ca-cert
     - repositories
     - motd
     - users
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/velum/init.sls 
new/salt-master/salt/velum/init.sls
--- old/salt-master/salt/velum/init.sls 2018-04-11 09:40:33.000000000 +0200
+++ new/salt-master/salt/velum/init.sls 2018-04-13 14:11:30.000000000 +0200
@@ -1,7 +1,5 @@
 include:
   - etc-hosts
-  - ca-cert
-  - cert
 
 {% set names = [salt.caasp_pillar.get('dashboard_external_fqdn'),
                 salt.caasp_pillar.get('dashboard')] %}


Reply via email to