Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-02-13 10:31:39
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Tue Feb 13 10:31:39 2018 rev:4 rq:575829 version:3.0.0+git_r575_cbc22fb

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-02-06 16:49:15.219788996 +0100
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-02-13 10:31:41.396150322 +0100
@@ -1,0 +2,100 @@
+Mon Feb 12 15:21:42 UTC 2018 - containers-bugow...@suse.de
+
+- Commit cbc22fb by Alvaro Saurin alvaro.sau...@gmail.com
+ Make sure we do not crash on pillars that are not properly formatted.
+
+
+-------------------------------------------------------------------
+Mon Feb 12 13:38:51 UTC 2018 - containers-bugow...@suse.de
+
+- Commit c194707 by Alvaro Saurin alvaro.sau...@gmail.com
+ Remove the etcd discovery mechanism Mark all the etcd members of the cluster
+ with the 'etcd' role before doing the update
+
+
+-------------------------------------------------------------------
+Mon Feb 12 11:25:24 UTC 2018 - containers-bugow...@suse.de
+
+- Commit d85fb55 by Kiall Mac Innes ki...@macinnes.ie
+ Move haproxy config to /etc/caasp/haproxy
+ 
+ This avoids a conflict between the caasp-container-manifests package, and the
+ haproxy package.
+
+
+-------------------------------------------------------------------
+Thu Feb  8 17:55:45 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 37fccd3 by Flavio Castelli fcaste...@suse.com
+ Dex pods: introduce anti-affinity rule
+ 
+ Our dex deployment creates 3 pods running the dex service. There are really
+ high chances (or even certainty in the case of clusters made by 1 or 2 worker
+ nodes) that all the dex pods end up running on the same node.
+ 
+ This is bad from a HA perspective, plus we end up taking away resources from
+ small clusters.
+ 
+ With the following change we enforce the kubernetes scheduler to always
+ spread the dex pods over different nodes.
+ 
+ On small clusters (1 or 2 nodes) the deployment will be running with a lower
+ number of replicas until new nodes are added. This doesn't cause our
+ orchestration to fail.
+ 
+ Adding new nodes at a later stage will allow the deployment to reach the
+ desired replica size without any intervention from us or the user.
+ 
+ Signed-off-by: Flavio Castelli <fcaste...@suse.com>
+
+
+-------------------------------------------------------------------
+Thu Feb  8 17:35:30 UTC 2018 - containers-bugow...@suse.de
+
+- Commit b578f87 by Kiall Mac Innes ki...@macinnes.ie
+ Dex: Avoid using the external_fqdn to reach dex
+ 
+ In some environments, the external_fqdn is unreachable from inside the
+ cluster - avoid using it where possible.
+
+
+-------------------------------------------------------------------
+Wed Feb  7 17:24:14 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 6a11de3 by Kiall Mac Innes ki...@macinnes.ie
+ Use separate Dex clients for each actual client
+ 
+ Previously Velum, CaaSP CLI, and Kubernetes all shared a single Dex client.
+ From a security perspective, this was far from ideal.
+ 
+ Update Dex with 3 clients, one for each actual client. Both the Velum and
+ CaaSP CLI clients are allowed to issue tokens for the Kubernetes client.
+
+
+-------------------------------------------------------------------
+Wed Feb  7 10:12:48 UTC 2018 - containers-bugow...@suse.de
+
+- Commit 3d63b18 by Joachim Gleissner jgleiss...@suse.com
+ Add pillar root for public cloud specific config
+
+
+-------------------------------------------------------------------
+Tue Feb  6 17:49:24 UTC 2018 - containers-bugow...@suse.de
+
+- Commit e23fb43 by Flavio Castelli fcaste...@suse.com
+ Mark the haproxy as critical pod
+ 
+ Flag the haproxy pods providing connectivity to the API server as critical
+ ones.
+ 
+ This should force kubelet and the scheduler to never ever get rid of them. If
+ these pods are killed to make more space for other ones, the node would not
+ be able to talk with the API server making it useless.
+ 
+ More details inside upstream doc:
+ 
https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
+ 
+ Signed-off-by: Flavio Castelli <fcaste...@suse.com>
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.tCVscJ/_old  2018-02-13 10:31:43.104088780 +0100
+++ /var/tmp/diff_new_pack.tCVscJ/_new  2018-02-13 10:31:43.108088636 +0100
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        3.0.0+git_r561_e96818e
+Version:        3.0.0+git_r575_cbc22fb
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/config/master.d/50-master.conf 
new/salt-master/config/master.d/50-master.conf
--- old/salt-master/config/master.d/50-master.conf      2018-02-05 
17:51:16.000000000 +0100
+++ new/salt-master/config/master.d/50-master.conf      2018-02-12 
16:21:22.000000000 +0100
@@ -21,6 +21,8 @@
 pillar_roots:
   base:
     - /usr/share/salt/kubernetes/pillar
+  prod:
+    - /srv/pillar
 
 # The mechanism that provides custom modules to the master
 # is different from that which serves them to the minions.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/pillar/params.sls 
new/salt-master/pillar/params.sls
--- old/salt-master/pillar/params.sls   2018-02-05 17:51:16.000000000 +0100
+++ new/salt-master/pillar/params.sls   2018-02-12 16:21:22.000000000 +0100
@@ -90,7 +90,7 @@
 # - the discovery id is also unique for all the machines in the
 #   cluster (in fact, it can be the same as the token)
 # - if masters is null, we will determine the number of etcd members
-#   based on the number of nodes with the kube-master role applied
+#   based on the recommended member count (currently, 3)
 # - For an etcd cluster to be effective, the number of cluster members
 #   must be both odd and reasonably small, for example - 1,3,5 are
 #   valid while 2,4,6 are not. In addition, clusters larger than 5 are
@@ -100,9 +100,6 @@
 etcd:
   masters:        null
   token:          'k8s'
-  disco:
-    port:         '2379'
-    id:           'k8s'
 # set log level for etcd service
 # potential log levels are:
 # [ CRITICAL, ERROR, WARNING NOTICE, INFO, DEBUG ]
@@ -155,6 +152,9 @@
 
 dex:
   node_port: '32000'
+  client_secrets:
+    kubernetes: ''
+    velum: ''
 
 # configuration parameters for interacting with LDAP via Dex
 # these get filled in by velum during bootstrap. they're listed
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_docker.py 
new/salt-master/salt/_modules/caasp_docker.py
--- old/salt-master/salt/_modules/caasp_docker.py       2018-02-05 
17:51:16.000000000 +0100
+++ new/salt-master/salt/_modules/caasp_docker.py       2018-02-12 
16:21:22.000000000 +0100
@@ -40,44 +40,53 @@
 
     LOG.debug('Finding certificates in: %s', lst)
     for registry in lst:
-        url = registry.get('url')
-        cert = registry.get('cert', '')
-        if len(cert) > 0:
-
-            # parse the name as an URL or "host:port", and return 
<HOST>[:<PORT>]
-            hostname, port = _get_hostname_and_port(url)
-            host_port = hostname
-            if port:
-                host_port += ":" + str(port)
-
-            LOG.debug('Adding certificate for: %s', host_port)
-            certs[host_port] = cert
-
-            if port:
-                if port == default_port:
-                    # When using the standar port (5000), if the user 
introduces
-                    # "my-registry:5000" as a trusted registry, he/she will be 
able
-                    # to do "docker pull my-registry:5000/some/image" but not
-                    # "docker pull my-registry/some/image".
-                    # So we must also create the "ca.crt" for "my-registry"
-                    # as he/she could just access "docker pull 
my-registry/some/image",
-                    # and Docker would fail to find "my-registry/ca.crt"
-                    name = hostname
+        try:
+            url = registry.get('url')
+
+            cert = registry.get('cert', '')
+            if cert:
+
+                # parse the name as an URL or "host:port", and return 
<HOST>[:<PORT>]
+                hostname, port = _get_hostname_and_port(url)
+                host_port = hostname
+                if port:
+                    host_port += ":" + str(port)
+
+                LOG.debug('Adding certificate for: %s', host_port)
+                certs[host_port] = cert
+
+                if port:
+                    if port == default_port:
+                        # When using the standar port (5000), if the user 
introduces
+                        # "my-registry:5000" as a trusted registry, he/she 
will be able
+                        # to do "docker pull my-registry:5000/some/image" but 
not
+                        # "docker pull my-registry/some/image".
+                        # So we must also create the "ca.crt" for "my-registry"
+                        # as he/she could just access "docker pull 
my-registry/some/image",
+                        # and Docker would fail to find "my-registry/ca.crt"
+                        name = hostname
+                        LOG.debug(
+                            'Using default port: adding certificate for "%s" 
too', name)
+                        certs[name] = cert
+                else:
+                    # the same happens if the user introduced a certificate for
+                    # "my-registry": we must fix the "docker pull 
my-registry:5000/some/image" case...
+                    name = hostname + ':' + str(default_port)
                     LOG.debug(
-                        'Using default port: adding certificate for "%s" too', 
name)
+                        'Adding certificate for default port, "%s", too', name)
                     certs[name] = cert
-            else:
-                # the same happens if the user introduced a certificate for
-                # "my-registry": we must fix the "docker pull 
my-registry:5000/some/image" case...
-                name = hostname + ':' + str(default_port)
-                LOG.debug('Adding certificate for default port, "%s", too', 
name)
-                certs[name] = cert
-
-        mirrors = registry.get('mirrors', [])
-        if len(mirrors) > 0:
-            LOG.debug('Looking recursively for certificates in mirrors')
-            certs_mirrors = get_registries_certs(mirrors,
-                                                 default_port=default_port)
-            certs.update(certs_mirrors)
+
+        except Exception as e:
+            LOG.error('Could not parse certificate: %s', e)
+
+        try:
+            mirrors = registry.get('mirrors', [])
+            if mirrors:
+                LOG.debug('Looking recursively for certificates in mirrors')
+                certs_mirrors = get_registries_certs(mirrors,
+                                                     default_port=default_port)
+                certs.update(certs_mirrors)
+        except Exception as e:
+            LOG.error('Could not parse mirrors: %s', e)
 
     return certs
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_modules/caasp_etcd.py 
new/salt-master/salt/_modules/caasp_etcd.py
--- old/salt-master/salt/_modules/caasp_etcd.py 2018-02-05 17:51:16.000000000 
+0100
+++ new/salt-master/salt/_modules/caasp_etcd.py 2018-02-12 16:21:22.000000000 
+0100
@@ -7,14 +7,24 @@
 # minimum number of etcd masters we recommend
 MIN_RECOMMENDED_MEMBER_COUNT = 3
 
+# port where etcd listens for clients
+ETCD_CLIENT_PORT = 2379
+
 
 def __virtual__():
     return "caasp_etcd"
 
 
 # Grain used for getting nodes
-# make sure 'network.interfaces' is in the pillar/mine.sls
-_GRAIN = 'network.interfaces'
+_GRAIN_NAME = 'nodename'
+
+
+class OnlyOnMasterException(Exception):
+    pass
+
+
+class NoEtcdServersException(Exception):
+    pass
 
 
 def _optimal_etcd_number(num_nodes):
@@ -28,24 +38,29 @@
         return 1
 
 
+def _get_grain_on_master(expr, grain=_GRAIN_NAME, type='grain'):
+    return __salt__['saltutil.runner']('mine.get',
+                                       tgt=expr,
+                                       fun=grain, tgt_type=type)
+
+
 def _get_num_kube(expr):
     """
     Get the number of kubernetes nodes that in the cluster that match "expr"
     """
     log.debug("Finding nodes that match '%s' in the cluster", expr)
-    nodes = __salt__['mine.get'](expr, _GRAIN, expr_form='grain').values()
+    nodes = __salt__['mine.get'](expr, _GRAIN_NAME, expr_form='grain').values()
     # 'mine.get' is not available in the master, so it will return nothing
     # in that case, we can try again with saltutil.runner... uh?
     if not nodes:
         log.debug("... using 'saltutil.runner' for getting the '%s' nodes", 
expr)
-        nodes = __salt__['saltutil.runner']('mine.get',
-                                            tgt=expr, fun=_GRAIN, 
tgt_type='grain').values()
+        nodes = _get_grain_on_master(expr).values()
     return len(nodes)
 
 
 def get_cluster_size():
     """
-    Determines the etcd discovery cluster size
+    Determines the optimal/desired (but possible) etcd cluster size
 
     Determines the desired number of cluster members, defaulting to
     the value supplied in the etcd:masters pillar, falling back to
@@ -87,3 +102,87 @@
     member_count = max(1, member_count)
     log.debug("using member count = %d", member_count)
     return member_count
+
+
+def get_additional_etcd_members():
+    '''
+    Get a list of nodes that are not running etcd members
+    and they should.
+    '''
+    if __opts__['__role'] != 'master':
+        log.error(
+            'get_additional_etcd_members should only be called in the Salt 
master', expr)
+        raise OnlyOnMasterException()
+
+    # machine IDs in the cluster that are currently etcd servers
+    current_etcd_members = _get_grain_on_master(
+        'G@roles:etcd', type='compound').keys()
+    num_current_etcd_members = len(current_etcd_members)
+
+    # the number of etcd masters that should be in the cluster
+    num_wanted_etcd_members = get_cluster_size()
+    #... and the number we are missing
+    num_additional_etcd_members = num_wanted_etcd_members - 
num_current_etcd_members
+    log.debug(
+        'get_additional_etcd_members: curr:{} wanted:{} -> {} 
missing'.format(num_current_etcd_members, num_wanted_etcd_members, 
num_additional_etcd_members))
+
+    new_etcd_members = []
+
+    if num_additional_etcd_members > 0:
+
+        masters_no_etcd = _get_grain_on_master(
+            'G@roles:kube-master and not G@roles:etcd', type='compound').keys()
+
+        # get k8s masters until we complete the etcd cluster
+        masters_and_etcd = masters_no_etcd[:num_additional_etcd_members]
+        new_etcd_members = new_etcd_members + masters_and_etcd
+        num_additional_etcd_members = num_additional_etcd_members - \
+            len(masters_and_etcd)
+        log.debug(
+            'get_additional_etcd_members: taking {} masters -> {} 
missing'.format(len(masters_and_etcd), num_additional_etcd_members))
+
+        # if we have run out of k8s masters and we do not have
+        # enough etcd members, go for the k8s workers too...
+        if num_additional_etcd_members > 0:
+            workers_no_etcd = _get_grain_on_master(
+                'G@roles:kube-minion and not G@roles:etcd', 
type='compound').keys()
+
+            workers_and_etcd = workers_no_etcd[:num_additional_etcd_members]
+            new_etcd_members = new_etcd_members + workers_and_etcd
+            num_additional_etcd_members = num_additional_etcd_members - \
+                len(workers_and_etcd)
+            log.debug(
+                'get_additional_etcd_members: taking {} minions -> {} 
missing'.format(len(workers_and_etcd), num_additional_etcd_members))
+
+            # TODO: if num_additional_etcd_members is still >0,
+            #       fail/raise/message/something...
+            if num_additional_etcd_members > 0:
+                log.error(
+                    'get_additional_etcd_members: cannot satisfy the {} 
members missing'.format(num_additional_etcd_members))
+
+    return new_etcd_members
+
+
+def get_endpoints(skip_this=False, etcd_members=[]):
+    """
+    Build a comma-separated list of etcd endpoints
+    """
+    this_name = __salt__['grains.get'](_GRAIN_NAME)
+
+    # build the list of etcd masters
+    if len(etcd_members) == 0:
+        etcd_members = __salt__["mine.get"](
+            "G@roles:etcd", _GRAIN_NAME, expr_form="compound").values()
+
+    etcd_members_urls = []
+    for name in etcd_members:
+        if skip_this and name == this_name:
+            continue
+        url = "https://{}:{}".format(name, ETCD_CLIENT_PORT)
+        etcd_members_urls.append(url)
+
+    if len(etcd_members) == 0:
+        log.error("no etcd members available!!")
+        raise NoEtcdServersException()
+
+    return ",".join(etcd_members_urls)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/cni/update-post-start-services.sls 
new/salt-master/salt/cni/update-post-start-services.sls
--- old/salt-master/salt/cni/update-post-start-services.sls     2018-02-05 
17:51:16.000000000 +0100
+++ new/salt-master/salt/cni/update-post-start-services.sls     2018-02-12 
16:21:22.000000000 +0100
@@ -1,10 +1,9 @@
 # invoked by the "update" orchestration after starting
 # all the services after rebooting
 
-# CNI does not use the docker0 bridge: remote it
+# CNI does not use the docker0 bridge: remove it
 remove-docker-iface:
   cmd.run:
     - name: ip link delete docker0
-    - check_cmd:
-      - /bin/true
+    - onlyif: grep -q docker0 /proc/net/dev
     # TODO: maybe we should restart dockerd...
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/dex/dex.yaml 
new/salt-master/salt/dex/dex.yaml
--- old/salt-master/salt/dex/dex.yaml   2018-02-05 17:51:16.000000000 +0100
+++ new/salt-master/salt/dex/dex.yaml   2018-02-12 16:21:22.000000000 +0100
@@ -69,13 +69,30 @@
       skipApprovalScreen: true
 
     staticClients:
+    - id: kubernetes
+      redirectURIs:
+      - 'urn:ietf:wg:oauth:2.0:oob'
+      name: "Kubernetes"
+      secret: "{{ pillar['dex']['client_secrets']['kubernetes'] }}"
+      trustedPeers:
+      - caasp-cli
+      - velum
+
     - id: caasp-cli
       redirectURIs:
+      - 'urn:ietf:wg:oauth:2.0:oob'
       - 'http://127.0.0.1'
-      - 'https://{{ pillar['dashboard'] }}/oidc/done'
-      - 'https://{{ pillar['dashboard_external_fqdn'] }}/oidc/done'
+      - 'http://localhost'
       name: "CaaSP CLI"
       secret: "swac7qakes7AvucH8bRucucH"
+      public: true
+
+    - id: velum
+      redirectURIs:
+      - 'https://{{ pillar['dashboard'] }}/oidc/done'
+      - 'https://{{ pillar['dashboard_external_fqdn'] }}/oidc/done'
+      name: "Velum"
+      secret: "{{ pillar['dex']['client_secrets']['velum'] }}"
 ---
 apiVersion: apps/v1beta2
 kind: Deployment
@@ -101,6 +118,19 @@
       tolerations:
       - key: "CriticalAddonsOnly"
         operator: "Exists"
+
+      # ensure dex pods are running on different hosts
+      affinity:
+        podAntiAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchExpressions:
+              - key: app
+                operator: In
+                values:
+                - dex
+            topologyKey: "kubernetes.io/hostname"
+
       containers:
       - image: sles12/caasp-dex:2.7.1
         name: dex
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/dex/init.sls 
new/salt-master/salt/dex/init.sls
--- old/salt-master/salt/dex/init.sls   2018-02-05 17:51:16.000000000 +0100
+++ new/salt-master/salt/dex/init.sls   2018-02-12 16:21:22.000000000 +0100
@@ -54,12 +54,15 @@
 ensure_dex_running:
   # Wait until the Dex API is actually up and running
   http.wait_for_successful_query:
-    {% set dex_api_server = pillar['api']['server']['external_fqdn'] -%}
+    {% set dex_api_server = "api." + pillar['internal_infra_domain']  -%}
+    {% set dex_api_server_ext = pillar['api']['server']['external_fqdn'] -%}
     {% set dex_api_port = pillar['dex']['node_port'] -%}
     - name:       {{ 'https://' + dex_api_server + ':' + dex_api_port 
}}/.well-known/openid-configuration
     - wait_for:   300
     - ca_bundle:  {{ pillar['ssl']['ca_file'] }}
     - status:     200
+    - header_dict:
+        Host: {{ dex_api_server_ext + ':' + dex_api_port }}
     - watch:
       - /root/dex.yaml
       - /root/roles.yaml
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd/etcd.conf.jinja 
new/salt-master/salt/etcd/etcd.conf.jinja
--- old/salt-master/salt/etcd/etcd.conf.jinja   2018-02-05 17:51:16.000000000 
+0100
+++ new/salt-master/salt/etcd/etcd.conf.jinja   2018-02-12 16:21:22.000000000 
+0100
@@ -1,12 +1,15 @@
 # etcd configuration
 # see https://coreos.com/etcd/docs/latest/op-guide/configuration.html
 
+{%- set this_id = grains['id'] %}
+{%- set this_addr = grains['nodename'] %}
+
 ETCD_DATA_DIR="/var/lib/etcd/"
-ETCD_NAME="{{ grains['id'] }}"
+ETCD_NAME="{{ this_id }}"
 
 ETCD_LISTEN_CLIENT_URLS="https://0.0.0.0:2379";
 ETCD_LISTEN_PEER_URLS="https://0.0.0.0:2380";
-ETCD_ADVERTISE_CLIENT_URLS="https://{{ grains['nodename'] }}:2379"
+ETCD_ADVERTISE_CLIENT_URLS="https://{{ this_addr }}:2379"
 ETCD_CLIENT_CERT_AUTH="true"
 
 ETCD_CA_FILE={{ pillar['ssl']['ca_file'] }}
@@ -21,13 +24,17 @@
 ETCD_PEER_CLIENT_CERT_AUTH="true"
 # ETCD_PEER_AUTO_TLS=on
 
-# discovery
-ETCD_DISCOVERY="http://{{ pillar['dashboard'] }}:{{ 
pillar['etcd']['disco']['port'] }}/v2/keys/_etcd/registry/{{ 
pillar['etcd']['token'] }}"
-ETCD_DISCOVERY_FALLBACK="proxy"
+{%- set etcd_members_tuples = salt['mine.get']('G@roles:etcd', 'nodename', 
expr_form='compound').items() %}
+{%- set etcd_members_urls = [] %}
+{%- for id, addr in etcd_members_tuples %}
+  {%- do etcd_members_urls.append(id + "=https://"; + addr + ":2380") %}
+{%- endfor %}
+
+ETCD_INITIAL_CLUSTER="{{ etcd_members_urls|join(',') }}"
+# ETCD_INITIAL_CLUSTER_STATE="existing"
 
 ETCD_INITIAL_CLUSTER_TOKEN="{{ pillar['etcd']['token'] }}"
-ETCD_INITIAL_ADVERTISE_PEER_URLS="https://{{ grains['nodename'] }}:2380"
-ETCD_INITIAL_CLUSTER_STATE="new"
+ETCD_INITIAL_ADVERTISE_PEER_URLS="https://{{ this_addr }}:2380"
 
 # set log level
 {% if pillar['etcd']['log_level'] == 'DEBUG' -%}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd/init.sls 
new/salt-master/salt/etcd/init.sls
--- old/salt-master/salt/etcd/init.sls  2018-02-05 17:51:16.000000000 +0100
+++ new/salt-master/salt/etcd/init.sls  2018-02-12 16:21:22.000000000 +0100
@@ -34,6 +34,7 @@
     - connstate: NEW
     # TODO: add "- source: <local-subnet>"
     - dports:
+        - 2379
         - 2380
     - proto: tcp
   caasp_service.running_stable:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd/update-pre-reboot.sls 
new/salt-master/salt/etcd/update-pre-reboot.sls
--- old/salt-master/salt/etcd/update-pre-reboot.sls     1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/etcd/update-pre-reboot.sls     2018-02-12 
16:21:22.000000000 +0100
@@ -0,0 +1,37 @@
+{% set roles = salt['grains.get']('roles', []) %}
+{% set has_etcd_role = ("etcd" in roles) %}
+
+{% set is_etcd_member = salt['file.directory_exists' ]('/var/lib/etcd/member') 
and
+                    not salt['file.directory_exists' ]('/var/lib/etcd/proxy') 
%}
+
+{%- if is_etcd_member and not has_etcd_role -%}
+  # this is really running a member of the etcd cluster but it doesn't
+  # have the 'etcd' role: set the 'etcd' role so we are sure it will be
+  # running etcd after the update
+
+add-etcd-role:
+  grains.append:
+    - name: roles
+    - value: etcd
+
+{% elif not has_etcd_role %}
+  # make sure there is nothing left in /var/lib/etcd
+
+cleanup-old-etcd-stuff:
+  cmd.run:
+    - name: rm -rf /var/lib/etcd/*
+
+uninstall-etcd:
+  # we cannot remove the etcd package, so we can only
+  # make sure that the service is disabled
+  service.disabled:
+    - name: etcd
+
+{%- else %}
+
+{# See https://github.com/saltstack/salt/issues/14553 #}
+dummy_step:
+  cmd.run:
+    - name: "echo saltstack bug 14553"
+
+{%- endif %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd-discovery/init.sls 
new/salt-master/salt/etcd-discovery/init.sls
--- old/salt-master/salt/etcd-discovery/init.sls        2018-02-05 
17:51:16.000000000 +0100
+++ new/salt-master/salt/etcd-discovery/init.sls        1970-01-01 
01:00:00.000000000 +0100
@@ -1,22 +0,0 @@
-{% set disco = salt.caasp_pillar.get('etcd:disco:id') %}
-{% if disco %}
-
-  {% set etcd_base = "http://"; + pillar['dashboard'] + ":" + 
pillar['etcd']['disco']['port'] %}
-  {% set etcd_size_uri = etcd_base + "/v2/keys/_etcd/registry/" + 
pillar['etcd']['disco']['id'] + "/_config/size" %}
-
-# set the cluster size in the private Discovery registry
-etcd-discovery-setup:
-  pkg.installed:
-    - name: curl
-  # wait for etcd before trying to set anything...
-  http.wait_for_successful_query:
-    - name:       {{ etcd_base }}/health
-    - wait_for:   300
-    - status:     200
-  cmd.run:
-    - name: curl -L -X PUT {{ etcd_size_uri }} -d value={{ 
salt.caasp_etcd.get_cluster_size() }}
-    - onlyif: curl {{ etcd_size_uri }} | grep '"message":"Key not found"'
-    - require:
-      - pkg: curl
-
-{% endif %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/haproxy/haproxy.yaml.jinja 
new/salt-master/salt/haproxy/haproxy.yaml.jinja
--- old/salt-master/salt/haproxy/haproxy.yaml.jinja     2018-02-05 
17:51:16.000000000 +0100
+++ new/salt-master/salt/haproxy/haproxy.yaml.jinja     2018-02-12 
16:21:22.000000000 +0100
@@ -6,6 +6,8 @@
   namespace: kube-system
   labels:
     name: haproxy
+  annotations:
+    scheduler.alpha.kubernetes.io/critical-pod: ''
 spec:
   restartPolicy: Always
   hostNetwork: true
@@ -13,6 +15,8 @@
     - key: node-role.kubernetes.io/master
       operator: Exists
       effect: NoSchedule
+    - key: "CriticalAddonsOnly"
+      operator: "Exists"
   containers:
     - name: haproxy
       image: sles12/haproxy:1.6.0
@@ -39,7 +43,7 @@
   volumes:
     - name: haproxy-cfg
       hostPath:
-        path: /etc/haproxy
+        path: /etc/caasp/haproxy
 {% if "admin" in salt['grains.get']('roles', []) %}
     - name: etc-hosts
       hostPath:
@@ -53,4 +57,4 @@
     - name: velum-unix-socket
       hostPath:
         path: /var/run/puma
-{% endif %}
\ No newline at end of file
+{% endif %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/haproxy/init.sls 
new/salt-master/salt/haproxy/init.sls
--- old/salt-master/salt/haproxy/init.sls       2018-02-05 17:51:16.000000000 
+0100
+++ new/salt-master/salt/haproxy/init.sls       2018-02-12 16:21:22.000000000 
+0100
@@ -1,4 +1,12 @@
-/etc/haproxy/haproxy.cfg:
+/etc/caasp/haproxy:
+  file.directory:
+    - name: /etc/caasp/haproxy
+    - user:     root
+    - group:    root
+    - dir_mode: 755
+    - makedirs: True
+
+/etc/caasp/haproxy/haproxy.cfg:
   file.managed:
     - source: salt://haproxy/haproxy.cfg.jinja
     - template: jinja
@@ -48,4 +56,4 @@
             docker kill -s HUP $haproxy_id
         fi
     - onchanges:
-      - file: /etc/haproxy/haproxy.cfg
+      - file: /etc/caasp/haproxy/haproxy.cfg
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-apiserver/apiserver.jinja 
new/salt-master/salt/kube-apiserver/apiserver.jinja
--- old/salt-master/salt/kube-apiserver/apiserver.jinja 2018-02-05 
17:51:16.000000000 +0100
+++ new/salt-master/salt/kube-apiserver/apiserver.jinja 2018-02-12 
16:21:22.000000000 +0100
@@ -16,7 +16,7 @@
 KUBE_ETCD_SERVERS="--etcd-cafile={{ pillar['ssl']['ca_file'] }} \
                    --etcd-certfile={{ pillar['ssl']['kube_apiserver_crt'] }} \
                    --etcd-keyfile={{ pillar['ssl']['kube_apiserver_key'] }} \
-                   --etcd-servers=https://{{ grains['nodename'] }}:2379"
+                   --etcd-servers={{ salt.caasp_etcd.get_endpoints() }}"
 
 # Address range to use for services
 # [alvaro] should not be in the same range as the flannel network 
(https://github.com/coreos/flannel/issues/232)
@@ -47,7 +47,7 @@
                --runtime-config=admissionregistration.k8s.io/v1alpha1 \
                --authorization-mode=Node,RBAC \
                --oidc-issuer-url=https://{{ 
pillar['api']['server']['external_fqdn'] }}:{{ pillar['dex']['node_port'] }} \
-               --oidc-client-id=caasp-cli \
+               --oidc-client-id=kubernetes \
                --oidc-ca-file={{ pillar['ssl']['ca_file'] }} \
                --oidc-username-claim=email \
                --oidc-groups-claim=groups"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kube-apiserver/init.sls 
new/salt-master/salt/kube-apiserver/init.sls
--- old/salt-master/salt/kube-apiserver/init.sls        2018-02-05 
17:51:16.000000000 +0100
+++ new/salt-master/salt/kube-apiserver/init.sls        2018-02-12 
16:21:22.000000000 +0100
@@ -2,7 +2,6 @@
   - repositories
   - ca-cert
   - cert
-  - etcd
   - kubernetes-common
   - kubernetes-common.serviceaccount-key
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/init.sls 
new/salt-master/salt/kubelet/init.sls
--- old/salt-master/salt/kubelet/init.sls       2018-02-05 17:51:16.000000000 
+0100
+++ new/salt-master/salt/kubelet/init.sls       2018-02-12 16:21:22.000000000 
+0100
@@ -2,7 +2,6 @@
   - repositories
   - ca-cert
   - cert
-  - etcd
   - kubernetes-common
   - kubectl-config
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/kubernetes.sls 
new/salt-master/salt/orch/kubernetes.sls
--- old/salt-master/salt/orch/kubernetes.sls    2018-02-05 17:51:16.000000000 
+0100
+++ new/salt-master/salt/orch/kubernetes.sls    2018-02-12 16:21:22.000000000 
+0100
@@ -1,9 +1,12 @@
+{%- set default_batch = 5 %}
+
+{# machine IDs that have the master roles assigned #}
 {%- set masters = salt.saltutil.runner('mine.get', tgt='G@roles:kube-master', 
fun='network.interfaces', tgt_type='compound').keys() %}
 {%- set super_master = masters|first %}
 
-{%- set default_batch = 5 %}
-
-{%- set num_etcd_masters = salt.caasp_etcd.get_cluster_size() %}
+{# the number of etcd masters that should be in the cluster #}
+{%- set num_etcd_members = salt.caasp_etcd.get_cluster_size() %}
+{%- set additional_etcd_members = 
salt.caasp_etcd.get_additional_etcd_members() %}
 
 # Ensure the node is marked as bootstrapping
 set-bootstrap-in-progress-flag:
@@ -14,9 +17,28 @@
       - bootstrap_in_progress
       - true
 
+{% if additional_etcd_members|length > 0 %}
+# Mark some machines as new etcd members
+set-etcd-roles:
+  salt.function:
+    - tgt: {{ additional_etcd_members|join(',') }}
+    - tgt_type: list
+    - name: grains.append
+    - arg:
+      - roles
+      - etcd
+    - require:
+      - set-bootstrap-in-progress-flag
+{% endif %}
+
 sync-pillar:
   salt.runner:
     - name: saltutil.sync_pillar
+    - require:
+      - set-bootstrap-in-progress-flag
+{%- if additional_etcd_members|length > 0 %}
+      - set-etcd-roles
+{%- endif %}
 
 disable-rebootmgr:
   salt.state:
@@ -25,7 +47,7 @@
     - sls:
       - rebootmgr
     - require:
-      - set-bootstrap-in-progress-flag
+      - sync-pillar
 
 update-pillar:
   salt.function:
@@ -55,6 +77,8 @@
     - name: saltutil.sync_all
     - kwarg:
         refresh: True
+    - require:
+      - update-mine
 
 etc-hosts-setup:
   salt.state:
@@ -63,7 +87,7 @@
     - sls:
       - etc-hosts
     - require:
-      - update-mine
+      - update-modules
 
 ca-setup:
   salt.state:
@@ -72,7 +96,6 @@
     - highstate: True
     - require:
       - etc-hosts-setup
-      - update-mine
 
 generate-sa-key:
   salt.state:
@@ -90,24 +113,16 @@
     - require:
       - generate-sa-key
 
-etcd-discovery-setup:
-  salt.state:
-    - tgt: {{ super_master }}
-    - sls:
-      - etcd-discovery
-    - require:
-      - update-modules
-
-# setup {{ num_etcd_masters }} etcd masters
+# setup {{ num_etcd_members }} etcd masters
 etcd-setup:
   salt.state:
-    - tgt: 'roles:kube-(master|minion)'
-    - tgt_type: grain_pcre
+    - tgt: 'roles:etcd'
+    - tgt_type: grain
     - sls:
       - etcd
-    - batch: {{ num_etcd_masters }}
+    - batch: {{ num_etcd_members }}
     - require:
-      - etcd-discovery-setup
+      - update-mine-again
 
 admin-setup:
   salt.state:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-02-05 17:51:16.000000000 
+0100
+++ new/salt-master/salt/orch/update.sls        2018-02-12 16:21:22.000000000 
+0100
@@ -87,6 +87,7 @@
     - tgt: {{ master_id }}
     - sls:
       - cni.update-pre-reboot
+      - etcd.update-pre-reboot
     - require:
       - {{ master_id }}-clean-shutdown
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/reboot/init.sls 
new/salt-master/salt/reboot/init.sls
--- old/salt-master/salt/reboot/init.sls        2018-02-05 17:51:16.000000000 
+0100
+++ new/salt-master/salt/reboot/init.sls        2018-02-12 16:21:22.000000000 
+0100
@@ -2,12 +2,16 @@
 # Configuration for the reboot manager
 ##################################################
 
-include:
-  - etcd
+{%- set etcd_members = salt['mine.get']('G@roles:etcd', 'nodename', 
expr_form='compound').values() %}
+{%- set etcd_server = etcd_members|first %}
 
-{% set reboot_uri = "http://127.0.0.1:2379/v2/keys/"; + 
pillar['reboot']['directory'] + "/" +
+{% set reboot_uri = "https://"; + etcd_server + ":2379/v2/keys/" + 
pillar['reboot']['directory'] + "/" +
          pillar['reboot']['group'] %}
 
+{% set curl_args = " --cacert " + pillar['ssl']['ca_file'] +
+                   " --cert " + pillar['ssl']['crt_file'] +
+                   " --key " + pillar['ssl']['key_file'] %}
+
 # `max_holders` contains the maximum number of lock holders for the cluster. It
 # must comply with the optimal cluster size as defined here:
 #   https://coreos.com/etcd/docs/latest/v2/admin_guide.html
@@ -23,10 +27,8 @@
   pkg.installed:
     - name: curl
   cmd.run:
-    - name: curl -L -X PUT {{ reboot_uri }}/mutex?prevExist=false -d value="0"
-    - onlyif: curl {{ reboot_uri }}/mutex?prevExist=false | grep -i "key not 
found"
-    - watch:
-      - etcd
+    - name: curl -L -X PUT {{ curl_args}} {{ reboot_uri 
}}/mutex?prevExist=false -d value="0"
+    - onlyif: curl {{ curl_args}} {{ reboot_uri }}/mutex?prevExist=false | 
grep -i "key not found"
 
 # Initialize the `data` key, which is JSON data with: the maximum number of
 # holders, and a list of current holders.
@@ -34,9 +36,8 @@
   pkg.installed:
     - name: curl
   cmd.run:
-    - name: >-
-        curl -L -X PUT {{ reboot_uri }}/data?prevExist=false -d value='{ 
"max":"{{ max_holders }}", "holders":[] }'
-    - onlyif: curl {{ reboot_uri }}/data?prevExist=false | grep -i "key not 
found"
+    - name:
+        curl -L -X PUT {{ curl_args}} {{ reboot_uri }}/data?prevExist=false -d 
value='{ "max":"{{ max_holders }}", "holders":[] }'
+    - onlyif: curl {{ curl_args}} {{ reboot_uri }}/data?prevExist=false | grep 
-i "key not found"
     - watch:
       - cmd: set_max_holders_mutex
-      - etcd
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/top.sls new/salt-master/salt/top.sls
--- old/salt-master/salt/top.sls        2018-02-05 17:51:16.000000000 +0100
+++ new/salt-master/salt/top.sls        2018-02-12 16:21:22.000000000 +0100
@@ -21,12 +21,14 @@
     - motd
     - users
     - cert
-    - etcd
     - docker
     - container-feeder
     - kubectl-config
     - kubelet
     - kube-proxy
+  'roles:etcd':
+    - match: grain
+    - etcd
   'roles:kube-master':
     - match: grain
     - kube-apiserver


Reply via email to