Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-03-26 13:06:58
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Mon Mar 26 13:06:58 2018 rev:8 rq:590602 version:3.0.0+git_r666_603e9dc

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-03-22 12:12:13.138400408 +0100
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-03-26 13:07:02.288772975 +0200
@@ -1,0 +2,52 @@
+Thu Mar 22 16:53:56 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 0901ff0 by Kiall Mac Innes ki...@macinnes.ie
+ Increase Kube-DNS replicas to 3
+ 
+ Having only a single Kube-DNS replica means that, during upgrades or other
+ failure scenarios, Kube-DNS will not be functional. A value of 3 matches what
+ we use for Dex.
+ 
+ Commit 2c42773 by Kiall Mac Innes ki...@macinnes.ie
+ Dex should not have cluster-admin
+ 
+ Dex does not require cluster admin access. Instead, it should use a new role
+ defined with just the permissions Dex requires.
+ 
+ Commit 38e654d by Kiall Mac Innes ki...@macinnes.ie
+ Kube-DNS should not have cluster-admin
+ 
+ Kubernetes DNS service does not require cluster admin access. Instead, it
+ should use the build in system:kube-dns role.
+ 
+ Commit 9dec359 by Kiall Mac Innes ki...@macinnes.ie
+ Remove duplicated Dex ClusterRoleBinding
+ 
+ The ClusterRoleBinding's for Dex were duplicated - this removes the extra
+ copy.
+ 
+ Commit 0aebc0d by Kiall Mac Innes ki...@macinnes.ie
+ Match addons/{dns,tiller} patterns to addons/dex
+ 
+ This pattern is cleaner, and lets Kubernetes do more of the hard work related
+ to applying and updating manifests changes. This will be further extended to
+ CNI/flannel soon.
+
+
+-------------------------------------------------------------------
+Thu Mar 22 11:54:08 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 3b3f0ae by Rafael Fernández López eresli...@ereslibre.es
+ Refresh modules before we call to any `sls`, they might use undiscovered
+ modules
+ 
+ Commit 8b49308 by Rafael Fernández López eresli...@ereslibre.es
+ When we explicitly run `haproxy` sls in the update, run `etc-hosts` too.
+ 
+ During a rename, it might happen that `haproxy` refuses to start because it
+ cannot resolve the new names `nodename.infra.caasp.local` in the
+ configuration because its
+ `/etc/hosts` file hasn't been updated yet.
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.nF9Tw8/_old  2018-03-26 13:07:04.420696334 +0200
+++ /var/tmp/diff_new_pack.nF9Tw8/_new  2018-03-26 13:07:04.424696190 +0200
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        3.0.0+git_r657_c294782
+Version:        3.0.0+git_r666_603e9dc
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/pillar/params.sls 
new/salt-master/pillar/params.sls
--- old/salt-master/pillar/params.sls   2018-03-21 18:10:02.000000000 +0100
+++ new/salt-master/pillar/params.sls   2018-03-22 17:53:50.000000000 +0100
@@ -46,7 +46,7 @@
 dns:
   cluster_ip:     '172.24.0.2'
   domain:         'cluster.local'
-  replicas:       '1'
+  replicas:       '3'
 
 # user and group for running services and some other stuff...
 kube_user:        'kube'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dex/manifests/05-clusterrole.yaml 
new/salt-master/salt/addons/dex/manifests/05-clusterrole.yaml
--- old/salt-master/salt/addons/dex/manifests/05-clusterrole.yaml       
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/dex/manifests/05-clusterrole.yaml       
2018-03-22 17:53:50.000000000 +0100
@@ -0,0 +1,12 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: suse:caasp:dex
+rules:
+- apiGroups: ["dex.coreos.com"]
+  resources: ["*"]
+  verbs: ["*"]
+- apiGroups: ["apiextensions.k8s.io"]
+  resources: ["customresourcedefinitions"]
+  verbs: ["create"]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dex/manifests/10-clusterrolebinding.yaml 
new/salt-master/salt/addons/dex/manifests/10-clusterrolebinding.yaml
--- old/salt-master/salt/addons/dex/manifests/10-clusterrolebinding.yaml        
2018-03-21 18:10:02.000000000 +0100
+++ new/salt-master/salt/addons/dex/manifests/10-clusterrolebinding.yaml        
2018-03-22 17:53:50.000000000 +0100
@@ -4,25 +4,25 @@
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: suse:caasp:dex
+roleRef:
+  kind: ClusterRole
+  name: suse:caasp:dex
+  apiGroup: rbac.authorization.k8s.io
 subjects:
 - kind: ServiceAccount
   name: dex
   namespace: kube-system
-roleRef:
-  kind: ClusterRole
-  name: cluster-admin
-  apiGroup: rbac.authorization.k8s.io
 ---
 # Map the LDAP Administrators group to the Kubernetes cluster-admin role
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: suse:caasp:ldap-administrators
-subjects:
-- kind: Group
-  name: "{{ pillar['ldap']['admin_group_name'] }}"
-  apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: cluster-admin
   apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: Group
+  name: "{{ pillar['ldap']['admin_group_name'] }}"
+  apiGroup: rbac.authorization.k8s.io
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dex/manifests/10-rrolebinding.yaml 
new/salt-master/salt/addons/dex/manifests/10-rrolebinding.yaml
--- old/salt-master/salt/addons/dex/manifests/10-rrolebinding.yaml      
2018-03-21 18:10:02.000000000 +0100
+++ new/salt-master/salt/addons/dex/manifests/10-rrolebinding.yaml      
1970-01-01 01:00:00.000000000 +0100
@@ -1,28 +0,0 @@
----
-# Map the Dex SA to the Kubernetes cluster-admin role
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: suse:caasp:dex
-subjects:
-- kind: ServiceAccount
-  name: dex
-  namespace: kube-system
-roleRef:
-  kind: ClusterRole
-  name: cluster-admin
-  apiGroup: rbac.authorization.k8s.io
----
-# Map the LDAP Administrators group to the Kubernetes cluster-admin role
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: suse:caasp:ldap-administrators
-subjects:
-- kind: Group
-  name: "{{ pillar['ldap']['admin_group_name'] }}"
-  apiGroup: rbac.authorization.k8s.io
-roleRef:
-  kind: ClusterRole
-  name: cluster-admin
-  apiGroup: rbac.authorization.k8s.io
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/dns/init.sls 
new/salt-master/salt/addons/dns/init.sls
--- old/salt-master/salt/addons/dns/init.sls    2018-03-21 18:10:02.000000000 
+0100
+++ new/salt-master/salt/addons/dns/init.sls    2018-03-22 17:53:50.000000000 
+0100
@@ -4,11 +4,11 @@
   - kube-apiserver
   - kubectl-config
 
-{% from '_macros/kubectl.jinja' import kubectl, kubectl_apply_template with 
context %}
+{% from '_macros/kubectl.jinja' import kubectl, kubectl_apply_dir_template 
with context %}
 
-{{ kubectl_apply_template("salt://addons/dns/kubedns.yaml.jinja",
-                          "/etc/kubernetes/addons/kubedns.yaml",
-                          check_cmd="kubectl get deploy kube-dns -n 
kube-system | grep kube-dns") }}
+
+{{ kubectl_apply_dir_template("salt://addons/dns/manifests/",
+                              "/etc/kubernetes/addons/dns/") }}
 
 # TODO: Transitional code, remove for CaaSP v4
 {{ kubectl("remove-old-kube-dns-clusterrolebinding",
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/dns/kubedns-sa.yaml 
new/salt-master/salt/addons/dns/kubedns-sa.yaml
--- old/salt-master/salt/addons/dns/kubedns-sa.yaml     2018-03-21 
18:10:02.000000000 +0100
+++ new/salt-master/salt/addons/dns/kubedns-sa.yaml     1970-01-01 
01:00:00.000000000 +0100
@@ -1,7 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: kube-dns
-  namespace: kube-system
-  labels:
-    kubernetes.io/cluster-service: "true"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/dns/kubedns.yaml.jinja 
new/salt-master/salt/addons/dns/kubedns.yaml.jinja
--- old/salt-master/salt/addons/dns/kubedns.yaml.jinja  2018-03-21 
18:10:02.000000000 +0100
+++ new/salt-master/salt/addons/dns/kubedns.yaml.jinja  1970-01-01 
01:00:00.000000000 +0100
@@ -1,203 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: kube-dns
-  namespace: kube-system
-  labels:
-    kubernetes.io/cluster-service: "true"
-
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
-  name: suse:caasp:kube-dns
-subjects:
-- kind: ServiceAccount
-  name: kube-dns
-  namespace: kube-system
-roleRef:
-  kind: ClusterRole
-  name: cluster-admin
-  apiGroup: rbac.authorization.k8s.io
-
----
-apiVersion: apps/v1beta2
-kind: Deployment
-metadata:
-  name: kube-dns
-  namespace: kube-system
-  labels:
-    k8s-app: kube-dns
-    kubernetes.io/cluster-service: "true"
-spec:
-  replicas: {{ pillar['dns']['replicas'] }}
-  strategy:
-    rollingUpdate:
-      maxSurge: 10%
-      maxUnavailable: 0
-  selector:
-    matchLabels:
-      k8s-app: kube-dns
-  template:
-    metadata:
-      labels:
-        k8s-app: kube-dns
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
-    spec:
-      tolerations:
-      - key: node-role.kubernetes.io/master
-        operator: Exists
-        effect: NoSchedule
-      - key: "CriticalAddonsOnly"
-        operator: "Exists"
-      volumes:
-      - name: kube-dns-config
-        configMap:
-          name: kube-dns
-      containers:
-      - name: kubedns
-        image: sles12/kubedns:1.0.0
-        resources:
-          # TODO: Set memory limits when we've profiled the container for large
-          # clusters, then set request = limit to keep this container in
-          # guaranteed class. Currently, this container falls into the
-          # "burstable" category so the kubelet doesn't backoff from 
restarting it.
-          limits:
-            memory: 170Mi
-          requests:
-            cpu: 100m
-            memory: 70Mi
-        livenessProbe:
-          httpGet:
-            path: /healthcheck/kubedns
-            port: 10054
-            scheme: HTTP
-          initialDelaySeconds: 60
-          timeoutSeconds: 5
-          successThreshold: 1
-          failureThreshold: 5
-        readinessProbe:
-          httpGet:
-            path: /readiness
-            port: 8081
-            scheme: HTTP
-          # we poll on pod startup for the Kubernetes master service and
-          # only setup the /readiness HTTP server once that's available.
-          initialDelaySeconds: 3
-          timeoutSeconds: 5
-        args:
-        - --domain={{ pillar['dns']['domain'] }}
-        - --dns-port=10053
-        - --config-dir=/kube-dns-config
-        - --v=2
-        env:
-        - name: PROMETHEUS_PORT
-          value: "10055"
-        ports:
-        - containerPort: 10053
-          name: dns-local
-          protocol: UDP
-        - containerPort: 10053
-          name: dns-tcp-local
-          protocol: TCP
-        - containerPort: 10055
-          name: metrics
-          protocol: TCP
-        volumeMounts:
-        - name: kube-dns-config
-          mountPath: /kube-dns-config
-      - name: dnsmasq
-        image: sles12/dnsmasq-nanny:1.0.0 
-        livenessProbe:
-          httpGet:
-            path: /healthcheck/dnsmasq
-            port: 10054
-            scheme: HTTP
-          initialDelaySeconds: 60
-          timeoutSeconds: 5
-          successThreshold: 1
-          failureThreshold: 5
-        args:
-        - -v=2
-        - -logtostderr
-        - -configDir=/etc/k8s/dns/dnsmasq-nanny
-        - -restartDnsmasq=true
-        - --
-        - -k
-        - --cache-size=1000
-        - --log-facility=-
-        - --server=/{{ pillar['dns']['domain'] }}/127.0.0.1#10053
-        - --server=/in-addr.arpa/127.0.0.1#10053
-        - --server=/ip6.arpa/127.0.0.1#10053
-        ports:
-        - containerPort: 53
-          name: dns
-          protocol: UDP
-        - containerPort: 53
-          name: dns-tcp
-          protocol: TCP
-        # see: https://github.com/kubernetes/kubernetes/issues/29055 for 
details
-        resources:
-          requests:
-            cpu: 150m
-            memory: 20Mi
-        volumeMounts:
-        - name: kube-dns-config
-          mountPath: /etc/k8s/dns/dnsmasq-nanny
-      - name: sidecar
-        image: sles12/sidecar:1.0.0
-        livenessProbe:
-          httpGet:
-            path: /metrics
-            port: 10054
-            scheme: HTTP
-          initialDelaySeconds: 60
-          timeoutSeconds: 5
-          successThreshold: 1
-          failureThreshold: 5
-        args:
-        - --v=2
-        - --logtostderr
-        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ 
pillar['dns']['domain'] }},5,A
-        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ 
pillar['dns']['domain'] }},5,A
-        ports:
-        - containerPort: 10054
-          name: metrics
-          protocol: TCP
-        resources:
-          requests:
-            memory: 20Mi
-            cpu: 10m
-      dnsPolicy: Default  # Don't use cluster DNS.
-      serviceAccountName: kube-dns
-
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: kube-dns
-  namespace: kube-system
-  labels:
-    k8s-app: kube-dns
-    kubernetes.io/cluster-service: "true"
-    kubernetes.io/name: "KubeDNS"
-spec:
-  selector:
-    k8s-app: kube-dns
-  clusterIP: {{ pillar['dns']['cluster_ip'] }}
-  ports:
-  - name: dns
-    port: 53
-    protocol: UDP
-  - name: dns-tcp
-    port: 53
-    protocol: TCP
-
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: kube-dns
-  namespace: kube-system
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dns/manifests/05-serviceaccount.yaml 
new/salt-master/salt/addons/dns/manifests/05-serviceaccount.yaml
--- old/salt-master/salt/addons/dns/manifests/05-serviceaccount.yaml    
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/dns/manifests/05-serviceaccount.yaml    
2018-03-22 17:53:50.000000000 +0100
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: kube-dns
+  namespace: kube-system
+  labels:
+    kubernetes.io/cluster-service: "true"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dns/manifests/10-clusterrolebinding.yaml 
new/salt-master/salt/addons/dns/manifests/10-clusterrolebinding.yaml
--- old/salt-master/salt/addons/dns/manifests/10-clusterrolebinding.yaml        
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/dns/manifests/10-clusterrolebinding.yaml        
2018-03-22 17:53:50.000000000 +0100
@@ -0,0 +1,13 @@
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: suse:caasp:kube-dns
+roleRef:
+  kind: ClusterRole
+  name: system:kube-dns
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: kube-dns
+  namespace: kube-system
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dns/manifests/15-configmap.yaml 
new/salt-master/salt/addons/dns/manifests/15-configmap.yaml
--- old/salt-master/salt/addons/dns/manifests/15-configmap.yaml 1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/dns/manifests/15-configmap.yaml 2018-03-22 
17:53:50.000000000 +0100
@@ -0,0 +1,6 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: kube-dns
+  namespace: kube-system
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dns/manifests/20-deployment.yaml 
new/salt-master/salt/addons/dns/manifests/20-deployment.yaml
--- old/salt-master/salt/addons/dns/manifests/20-deployment.yaml        
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/dns/manifests/20-deployment.yaml        
2018-03-22 17:53:50.000000000 +0100
@@ -0,0 +1,151 @@
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+  name: kube-dns
+  namespace: kube-system
+  labels:
+    k8s-app: kube-dns
+    kubernetes.io/cluster-service: "true"
+spec:
+  replicas: {{ pillar['dns']['replicas'] }}
+  strategy:
+    rollingUpdate:
+      maxSurge: 10%
+      maxUnavailable: 0
+  selector:
+    matchLabels:
+      k8s-app: kube-dns
+  template:
+    metadata:
+      labels:
+        k8s-app: kube-dns
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+      - key: node-role.kubernetes.io/master
+        operator: Exists
+        effect: NoSchedule
+      - key: "CriticalAddonsOnly"
+        operator: "Exists"
+      volumes:
+      - name: kube-dns-config
+        configMap:
+          name: kube-dns
+      containers:
+      - name: kubedns
+        image: sles12/kubedns:1.0.0
+        resources:
+          # TODO: Set memory limits when we've profiled the container for large
+          # clusters, then set request = limit to keep this container in
+          # guaranteed class. Currently, this container falls into the
+          # "burstable" category so the kubelet doesn't backoff from 
restarting it.
+          limits:
+            memory: 170Mi
+          requests:
+            cpu: 100m
+            memory: 70Mi
+        livenessProbe:
+          httpGet:
+            path: /healthcheck/kubedns
+            port: 10054
+            scheme: HTTP
+          initialDelaySeconds: 60
+          timeoutSeconds: 5
+          successThreshold: 1
+          failureThreshold: 5
+        readinessProbe:
+          httpGet:
+            path: /readiness
+            port: 8081
+            scheme: HTTP
+          # we poll on pod startup for the Kubernetes master service and
+          # only setup the /readiness HTTP server once that's available.
+          initialDelaySeconds: 3
+          timeoutSeconds: 5
+        args:
+        - --domain={{ pillar['dns']['domain'] }}
+        - --dns-port=10053
+        - --config-dir=/kube-dns-config
+        - --v=2
+        env:
+        - name: PROMETHEUS_PORT
+          value: "10055"
+        ports:
+        - containerPort: 10053
+          name: dns-local
+          protocol: UDP
+        - containerPort: 10053
+          name: dns-tcp-local
+          protocol: TCP
+        - containerPort: 10055
+          name: metrics
+          protocol: TCP
+        volumeMounts:
+        - name: kube-dns-config
+          mountPath: /kube-dns-config
+      - name: dnsmasq
+        image: sles12/dnsmasq-nanny:1.0.0 
+        livenessProbe:
+          httpGet:
+            path: /healthcheck/dnsmasq
+            port: 10054
+            scheme: HTTP
+          initialDelaySeconds: 60
+          timeoutSeconds: 5
+          successThreshold: 1
+          failureThreshold: 5
+        args:
+        - -v=2
+        - -logtostderr
+        - -configDir=/etc/k8s/dns/dnsmasq-nanny
+        - -restartDnsmasq=true
+        - --
+        - -k
+        - --cache-size=1000
+        - --log-facility=-
+        - --server=/{{ pillar['dns']['domain'] }}/127.0.0.1#10053
+        - --server=/in-addr.arpa/127.0.0.1#10053
+        - --server=/ip6.arpa/127.0.0.1#10053
+        ports:
+        - containerPort: 53
+          name: dns
+          protocol: UDP
+        - containerPort: 53
+          name: dns-tcp
+          protocol: TCP
+        # see: https://github.com/kubernetes/kubernetes/issues/29055 for 
details
+        resources:
+          requests:
+            cpu: 150m
+            memory: 20Mi
+        volumeMounts:
+        - name: kube-dns-config
+          mountPath: /etc/k8s/dns/dnsmasq-nanny
+      - name: sidecar
+        image: sles12/sidecar:1.0.0
+        livenessProbe:
+          httpGet:
+            path: /metrics
+            port: 10054
+            scheme: HTTP
+          initialDelaySeconds: 60
+          timeoutSeconds: 5
+          successThreshold: 1
+          failureThreshold: 5
+        args:
+        - --v=2
+        - --logtostderr
+        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ 
pillar['dns']['domain'] }},5,A
+        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ 
pillar['dns']['domain'] }},5,A
+        ports:
+        - containerPort: 10054
+          name: metrics
+          protocol: TCP
+        resources:
+          requests:
+            memory: 20Mi
+            cpu: 10m
+      dnsPolicy: Default  # Don't use cluster DNS.
+      serviceAccountName: kube-dns
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/dns/manifests/25-service.yaml 
new/salt-master/salt/addons/dns/manifests/25-service.yaml
--- old/salt-master/salt/addons/dns/manifests/25-service.yaml   1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/dns/manifests/25-service.yaml   2018-03-22 
17:53:50.000000000 +0100
@@ -0,0 +1,21 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: kube-dns
+  namespace: kube-system
+  labels:
+    k8s-app: kube-dns
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "KubeDNS"
+spec:
+  selector:
+    k8s-app: kube-dns
+  clusterIP: {{ pillar['dns']['cluster_ip'] }}
+  ports:
+  - name: dns
+    port: 53
+    protocol: UDP
+  - name: dns-tcp
+    port: 53
+    protocol: TCP
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/tiller/init.sls 
new/salt-master/salt/addons/tiller/init.sls
--- old/salt-master/salt/addons/tiller/init.sls 2018-03-21 18:10:02.000000000 
+0100
+++ new/salt-master/salt/addons/tiller/init.sls 2018-03-22 17:53:50.000000000 
+0100
@@ -4,11 +4,10 @@
   - kube-apiserver
   - kubectl-config
 
-{% from '_macros/kubectl.jinja' import kubectl, kubectl_apply_template with 
context %}
+{% from '_macros/kubectl.jinja' import kubectl, kubectl_apply_dir_template 
with context %}
 
-{{ kubectl_apply_template("salt://addons/tiller/tiller.yaml.jinja",
-                          "/etc/kubernetes/addons/tiller.yaml",
-                          check_cmd="kubectl get deploy tiller-deploy -n 
kube-system | grep tiller-deploy") }}
+{{ kubectl_apply_dir_template("salt://addons/tiller/manifests/",
+                              "/etc/kubernetes/addons/tiller/") }}
 
 # TODO: Transitional code, remove for CaaSP v4
 {{ kubectl("remove-old-tiller-clusterrolebinding",
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/tiller/manifests/05-clusterrole.yaml 
new/salt-master/salt/addons/tiller/manifests/05-clusterrole.yaml
--- old/salt-master/salt/addons/tiller/manifests/05-clusterrole.yaml    
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/tiller/manifests/05-clusterrole.yaml    
2018-03-22 17:53:50.000000000 +0100
@@ -0,0 +1,18 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: suse:caasp:tiller-user
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods/portforward
+  verbs:
+  - create
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  verbs:
+  - list
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/tiller/manifests/05-serviceaccount.yaml 
new/salt-master/salt/addons/tiller/manifests/05-serviceaccount.yaml
--- old/salt-master/salt/addons/tiller/manifests/05-serviceaccount.yaml 
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/tiller/manifests/05-serviceaccount.yaml 
2018-03-22 17:53:50.000000000 +0100
@@ -0,0 +1,8 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: tiller
+  namespace: kube-system
+  labels:
+    kubernetes.io/cluster-service: "true"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/tiller/manifests/10-clusterrolebinding.yaml 
new/salt-master/salt/addons/tiller/manifests/10-clusterrolebinding.yaml
--- old/salt-master/salt/addons/tiller/manifests/10-clusterrolebinding.yaml     
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/tiller/manifests/10-clusterrolebinding.yaml     
2018-03-22 17:53:50.000000000 +0100
@@ -0,0 +1,13 @@
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: suse:caasp:tiller
+roleRef:
+  kind: ClusterRole
+  name: cluster-admin
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: tiller
+  namespace: kube-system
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/tiller/manifests/20-deployment.yaml 
new/salt-master/salt/addons/tiller/manifests/20-deployment.yaml
--- old/salt-master/salt/addons/tiller/manifests/20-deployment.yaml     
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/tiller/manifests/20-deployment.yaml     
2018-03-22 17:53:50.000000000 +0100
@@ -0,0 +1,58 @@
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+  creationTimestamp: null
+  labels:
+    app: helm
+    name: tiller
+    kubernetes.io/cluster-service: "true"
+  name: tiller-deploy
+  namespace: kube-system
+spec:
+  selector:
+    matchLabels:
+      app: helm
+      name: tiller
+  strategy: {}
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        app: helm
+        name: tiller
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+      - key: node-role.kubernetes.io/master
+        operator: Exists
+        effect: NoSchedule
+      - key: "CriticalAddonsOnly"
+        operator: "Exists"
+      containers:
+      - env:
+        - name: TILLER_NAMESPACE
+          value: kube-system
+        image: sles12/tiller:2.7.2
+        imagePullPolicy: IfNotPresent
+        livenessProbe:
+          httpGet:
+            path: /liveness
+            port: 44135
+          initialDelaySeconds: 1
+          timeoutSeconds: 1
+        name: tiller
+        ports:
+        - containerPort: 44134
+          name: tiller
+        readinessProbe:
+          httpGet:
+            path: /readiness
+            port: 44135
+          initialDelaySeconds: 1
+          timeoutSeconds: 1
+        resources: {}
+      nodeSelector:
+        beta.kubernetes.io/os: linux
+      serviceAccountName: tiller
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/addons/tiller/manifests/25-service.yaml 
new/salt-master/salt/addons/tiller/manifests/25-service.yaml
--- old/salt-master/salt/addons/tiller/manifests/25-service.yaml        
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/addons/tiller/manifests/25-service.yaml        
2018-03-22 17:53:50.000000000 +0100
@@ -0,0 +1,21 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  creationTimestamp: null
+  labels:
+    app: helm
+    name: tiller
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "Tiller"
+  name: tiller
+  namespace: kube-system
+spec:
+  type: ClusterIP
+  ports:
+  - name: tiller
+    port: 44134
+    targetPort: tiller
+  selector:
+    app: helm
+    name: tiller
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/addons/tiller/tiller.yaml.jinja 
new/salt-master/salt/addons/tiller/tiller.yaml.jinja
--- old/salt-master/salt/addons/tiller/tiller.yaml.jinja        2018-03-21 
18:10:02.000000000 +0100
+++ new/salt-master/salt/addons/tiller/tiller.yaml.jinja        1970-01-01 
01:00:00.000000000 +0100
@@ -1,125 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: tiller
-  namespace: kube-system
-  labels:
-    kubernetes.io/cluster-service: "true"
-
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
-  name: suse:caasp:tiller
-subjects:
-- kind: ServiceAccount
-  name: tiller
-  namespace: kube-system
-roleRef:
-  kind: ClusterRole
-  name: cluster-admin
-  apiGroup: rbac.authorization.k8s.io
-
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
-  name: suse:caasp:tiller-user
-rules:
-- apiGroups:
-  - ""
-  resources:
-  - pods/portforward
-  verbs:
-  - create
-- apiGroups:
-  - ""
-  resources:
-  - pods
-  verbs:
-  - list
-
----
-apiVersion: apps/v1beta2
-kind: Deployment
-metadata:
-  creationTimestamp: null
-  labels:
-    app: helm
-    name: tiller
-    kubernetes.io/cluster-service: "true"
-  name: tiller-deploy
-  namespace: kube-system
-spec:
-  selector:
-    matchLabels:
-      app: helm
-      name: tiller
-  strategy: {}
-  template:
-    metadata:
-      creationTimestamp: null
-      labels:
-        app: helm
-        name: tiller
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
-    spec:
-      tolerations:
-      - key: node-role.kubernetes.io/master
-        operator: Exists
-        effect: NoSchedule
-      - key: "CriticalAddonsOnly"
-        operator: "Exists"
-      containers:
-      - env:
-        - name: TILLER_NAMESPACE
-          value: kube-system
-        image: sles12/tiller:2.7.2
-        imagePullPolicy: IfNotPresent
-        livenessProbe:
-          httpGet:
-            path: /liveness
-            port: 44135
-          initialDelaySeconds: 1
-          timeoutSeconds: 1
-        name: tiller
-        ports:
-        - containerPort: 44134
-          name: tiller
-        readinessProbe:
-          httpGet:
-            path: /readiness
-            port: 44135
-          initialDelaySeconds: 1
-          timeoutSeconds: 1
-        resources: {}
-      nodeSelector:
-        beta.kubernetes.io/os: linux
-      serviceAccountName: tiller
-status: {}
-
----
-apiVersion: v1
-kind: Service
-metadata:
-  creationTimestamp: null
-  labels:
-    app: helm
-    name: tiller
-    kubernetes.io/cluster-service: "true"
-    kubernetes.io/name: "Tiller"
-  name: tiller
-  namespace: kube-system
-spec:
-  ports:
-  - name: tiller
-    port: 44134
-    targetPort: tiller
-  selector:
-    app: helm
-    name: tiller
-  type: ClusterIP
-status:
-  loadBalancer: {}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/haproxy/init.sls 
new/salt-master/salt/haproxy/init.sls
--- old/salt-master/salt/haproxy/init.sls       2018-03-21 18:10:02.000000000 
+0100
+++ new/salt-master/salt/haproxy/init.sls       2018-03-22 17:53:50.000000000 
+0100
@@ -53,7 +53,7 @@
 # Send a SIGTERM to haproxy when the config changes
 # TODO: There should be a better way to handle this, but currently, there is 
not. See
 # kubernetes/kubernetes#24957
-haproxy_restart:
+haproxy-restart:
   cmd.run:
     - name: |-
         haproxy_id=$(docker ps | grep -E "k8s_haproxy.*_kube-system_" | awk 
'{print $1}')
@@ -68,10 +68,12 @@
 {% if "admin" in salt['grains.get']('roles', []) %}
 # The admin node is serving the internal API with the pillars. Wait for it to 
come back
 # before going on with the orchestration/highstates.
-wait_for_haproxy:
-  cmd.run:
-    - name: |-
-        until $(docker ps | grep -E "k8s_haproxy.*_kube-system_" &> 
/dev/null); do
-            sleep 1
-        done
+wait-for-haproxy:
+  http.wait_for_successful_query:
+    - name:       https://localhost:444/internal-api/v1/pillar.json
+    - wait_for:   300
+    - status:     401
+    - verify_ssl: False
+    - onchanges:
+      - haproxy-restart
 {% endif %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-03-21 18:10:02.000000000 
+0100
+++ new/salt-master/salt/orch/update.sls        2018-03-22 17:53:50.000000000 
+0100
@@ -1,11 +1,3 @@
-admin-apply-haproxy:
-  salt.state:
-    - tgt: 'roles:admin'
-    - tgt_type: grain
-    - batch: 1
-    - sls:
-      - haproxy
-
 # Ensure all nodes with updates are marked as upgrading. This will reduce the 
time window in which
 # the update-etc-hosts orchestration can run in between machine restarts.
 set-update-grain:
@@ -17,39 +9,33 @@
       - update_in_progress
       - true
 
-# Generate sa key (we should refactor this as part of the ca highstate along 
with its counterpart
-# in orch/kubernetes.sls)
-generate-sa-key:
-  salt.state:
-    - tgt: 'roles:ca'
-    - tgt_type: grain
-    - sls:
-      - kubernetes-common.generate-serviceaccount-key
-
 # Generic Updates
 sync-pillar:
   salt.runner:
     - name: saltutil.sync_pillar
+    - require:
+      - set-update-grain
 
 update-pillar:
   salt.function:
     - tgt: '*'
     - name: saltutil.refresh_pillar
     - require:
-      - generate-sa-key
+      - sync-pillar
 
 update-grains:
   salt.function:
     - tgt: '*'
     - name: saltutil.refresh_grains
+    - require:
+      - update-pillar
 
 update-mine:
   salt.function:
     - tgt: '*'
     - name: mine.update
     - require:
-       - update-pillar
-       - update-grains
+      - update-grains
 
 update-modules:
   salt.function:
@@ -60,13 +46,35 @@
     - require:
       - update-mine
 
+# Generate sa key (we should refactor this as part of the ca highstate along 
with its counterpart
+# in orch/kubernetes.sls)
+generate-sa-key:
+  salt.state:
+    - tgt: 'roles:ca'
+    - tgt_type: grain
+    - sls:
+      - kubernetes-common.generate-serviceaccount-key
+    - require:
+      - update-modules
+
+admin-apply-haproxy:
+  salt.state:
+    - tgt: 'roles:admin'
+    - tgt_type: grain
+    - batch: 1
+    - sls:
+      - etc-hosts
+      - haproxy
+    - require:
+      - generate-sa-key
+
 admin-setup:
   salt.state:
     - tgt: 'roles:admin'
     - tgt_type: grain
     - highstate: True
     - require:
-      - update-modules
+      - admin-apply-haproxy
 
 # Perform any migrations necessary before starting the update orchestration. 
All services and
 # machines should be running and we can migrate some data on the whole cluster 
and then proceed


Reply via email to