Hello community,

here is the log from the commit of package salt for openSUSE:Factory checked in 
at 2019-05-03 22:37:55
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/salt (Old)
 and      /work/SRC/openSUSE:Factory/.salt.new.5148 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "salt"

Fri May  3 22:37:55 2019 rev:83 rq:700350 version:2019.2.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/salt/salt.changes        2019-04-15 
11:51:54.894533129 +0200
+++ /work/SRC/openSUSE:Factory/.salt.new.5148/salt.changes      2019-05-03 
22:37:57.482680698 +0200
@@ -1,0 +2,50 @@
+Fri May  3 09:42:06 UTC 2019 - mdinca <[email protected]>
+
+- Include aliases in FQDNS grain (bsc#1121439)
+
+-------------------------------------------------------------------
+Thu May  2 16:18:45 UTC 2019 - mdinca <[email protected]>
+
+- Fix issue preventing syndic to start
+- Update year on spec copyright notice
+
+- Added:
+  * fix-syndic-start-issue.patch
+
+-------------------------------------------------------------------
+Tue Apr 30 11:51:59 UTC 2019 - [email protected]
+
+- Use ThreadPool from multiprocessing.pool to avoid leakings
+  when calculating FQDNs
+- Do not report patches as installed on RHEL systems when not all
+  the related packages are installed (bsc#1128061)
+
+- Added:
+  * use-threadpool-from-multiprocessing.pool-to-avoid-le.patch
+  * do-not-report-patches-as-installed-when-not-all-the-.patch
+
+-------------------------------------------------------------------
+Fri Apr 26 10:00:01 UTC 2019 - [email protected]
+
+- Update to 2019.2.0 complete (FATE#327138, bsc#1133523)
+- Fix batch/batch-async related issues
+- Calculate FQDNs in parallel to avoid blockings (bsc#1129079)
+- Incorporate virt.volume_info fixes (PR#131)
+- Re-adds patch because of increased offset due to previous patch removal
+- Removing patch to add root parameter to zypper module
+- Fix for -t parameter in mount module
+
+- Added:
+  * mount-fix-extra-t-parameter.patch
+  * add-batch_presence_ping_timeout-and-batch_presence_p.patch
+  * fix-async-batch-race-conditions.patch
+  * calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch
+
+- Modified:
+  * don-t-call-zypper-with-more-than-one-no-refresh.patch
+  * add-virt.volume_infos-and-virt.volume_delete.patch
+
+- Removed:
+  * zypper-add-root-configuration-parameter.patch
+
+-------------------------------------------------------------------

Old:
----
  zypper-add-root-configuration-parameter.patch

New:
----
  add-batch_presence_ping_timeout-and-batch_presence_p.patch
  calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch
  do-not-report-patches-as-installed-when-not-all-the-.patch
  fix-async-batch-race-conditions.patch
  fix-syndic-start-issue.patch
  mount-fix-extra-t-parameter.patch
  use-threadpool-from-multiprocessing.pool-to-avoid-le.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ salt.spec ++++++
--- /var/tmp/diff_new_pack.5ZApBc/_old  2019-05-03 22:38:02.786691716 +0200
+++ /var/tmp/diff_new_pack.5ZApBc/_new  2019-05-03 22:38:02.822691791 +0200
@@ -1,7 +1,7 @@
 #
 # spec file for package salt
 #
-# Copyright (c) 2018 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -141,22 +141,34 @@
 Patch38:        remove-arch-from-name-when-pkg.list_pkgs-is-called-w.patch
 # PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51119
 Patch39:        fix-issue-2068-test.patch
-# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/50125
-Patch40:        zypper-add-root-configuration-parameter.patch
 # PATCH_FIX_OPENSUSE: Temporary fix allowing "id_" and "force" params while 
upstrem figures it out
-Patch41:        temporary-fix-extend-the-whitelist-of-allowed-comman.patch
+Patch40:        temporary-fix-extend-the-whitelist-of-allowed-comman.patch
 # PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51382
-Patch42:        don-t-call-zypper-with-more-than-one-no-refresh.patch
+Patch41:        don-t-call-zypper-with-more-than-one-no-refresh.patch
 # PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/50109
 # PATCH_FIX_OPENSUSE https://github.com/openSUSE/salt/pull/121
-Patch43:        add-virt.all_capabilities.patch
+Patch42:        add-virt.all_capabilities.patch
 # PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51691
-Patch44:        add-virt.volume_infos-and-virt.volume_delete.patch
+Patch43:        add-virt.volume_infos-and-virt.volume_delete.patch
 # PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51384
-Patch45:        include-aliases-in-the-fqdns-grains.patch
+Patch44:        include-aliases-in-the-fqdns-grains.patch
 # PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/50546
 # PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51863
-Patch46:        async-batch-implementation.patch
+Patch45:        async-batch-implementation.patch
+# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/51905
+Patch46:        mount-fix-extra-t-parameter.patch
+# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/52527
+Patch47:        calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch
+#PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/139
+Patch48:       fix-async-batch-race-conditions.patch
+#PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/141
+Patch49:       add-batch_presence_ping_timeout-and-batch_presence_p.patch
+#PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/52657
+Patch50:       do-not-report-patches-as-installed-when-not-all-the-.patch
+# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/52527
+Patch51:       use-threadpool-from-multiprocessing.pool-to-avoid-le.patch
+# PATCH-FIX_UPSTREAM https://github.com/saltstack/salt/pull/52519 (partial 
porting)
+Patch52:       fix-syndic-start-issue.patch
 
 # BuildRoot:      %{_tmppath}/%{name}-%{version}-build
 BuildRoot:      %{_tmppath}/%{name}-%{version}-build
@@ -661,6 +673,12 @@
 %patch44 -p1
 %patch45 -p1
 %patch46 -p1
+%patch47 -p1
+%patch48 -p1
+%patch49 -p1
+%patch50 -p1
+%patch51 -p1
+%patch52 -p1
 
 %build
 %if 0%{?build_py2}

++++++ _lastrevision ++++++
--- /var/tmp/diff_new_pack.5ZApBc/_old  2019-05-03 22:38:03.446693087 +0200
+++ /var/tmp/diff_new_pack.5ZApBc/_new  2019-05-03 22:38:03.462693121 +0200
@@ -1 +1 @@
-65afa65b0d69f90c1cd716474cdddcdc98751274
\ No newline at end of file
+8d79ae9a816ab27810786c5a4a60021af08ec366
\ No newline at end of file

++++++ add-batch_presence_ping_timeout-and-batch_presence_p.patch ++++++
>From 902a3527415807448be0aa28a651374a189d102c Mon Sep 17 00:00:00 2001
From: Marcelo Chiaradia <[email protected]>
Date: Thu, 4 Apr 2019 13:57:38 +0200
Subject: [PATCH] Add 'batch_presence_ping_timeout' and
 'batch_presence_ping_gather_job_timeout' parameters for synchronous batching

---
 salt/cli/batch.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/salt/cli/batch.py b/salt/cli/batch.py
index 4bd07f584a..ce239215cb 100644
--- a/salt/cli/batch.py
+++ b/salt/cli/batch.py
@@ -83,6 +83,9 @@ def batch_get_opts(
         if key not in opts:
             opts[key] = val
 
+    opts['batch_presence_ping_timeout'] = 
kwargs.get('batch_presence_ping_timeout', opts['timeout'])
+    opts['batch_presence_ping_gather_job_timeout'] = 
kwargs.get('batch_presence_ping_gather_job_timeout', opts['gather_job_timeout'])
+
     return opts
 
 
@@ -119,7 +122,7 @@ class Batch(object):
         args = [self.opts['tgt'],
                 'test.ping',
                 [],
-                self.opts['timeout'],
+                self.opts.get('batch_presence_ping_timeout', 
self.opts['timeout']),
                 ]
 
         selected_target_option = self.opts.get('selected_target_option', None)
@@ -130,7 +133,7 @@ class Batch(object):
 
         self.pub_kwargs['yield_pub_data'] = True
         ping_gen = self.local.cmd_iter(*args,
-                                       
gather_job_timeout=self.opts['gather_job_timeout'],
+                                       
gather_job_timeout=self.opts.get('batch_presence_ping_gather_job_timeout', 
self.opts['gather_job_timeout']),
                                        **self.pub_kwargs)
 
         # Broadcast to targets
-- 
2.20.1


++++++ add-virt.volume_infos-and-virt.volume_delete.patch ++++++
--- /var/tmp/diff_new_pack.5ZApBc/_old  2019-05-03 22:38:04.246694749 +0200
+++ /var/tmp/diff_new_pack.5ZApBc/_new  2019-05-03 22:38:04.266694791 +0200
@@ -1,4 +1,4 @@
-From 2536ee56bd0060c024994f97388f9975ccbe1ee1 Mon Sep 17 00:00:00 2001
+From 5e202207d02d2bf4860cc5487ed19f9d835993d1 Mon Sep 17 00:00:00 2001
 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= <[email protected]>
 Date: Fri, 15 Feb 2019 17:28:00 +0100
 Subject: [PATCH] Add virt.volume_infos() and virt.volume_delete()
@@ -11,15 +11,15 @@
 
 virt.volume_delete() allows removing a given volume.
 ---
- salt/modules/virt.py            | 113 ++++++++++++++++++++
- tests/unit/modules/test_virt.py | 184 ++++++++++++++++++++++++++++++++
- 2 files changed, 297 insertions(+)
+ salt/modules/virt.py            | 126 +++++++++++++++++++++
+ tests/unit/modules/test_virt.py | 195 ++++++++++++++++++++++++++++++++
+ 2 files changed, 321 insertions(+)
 
 diff --git a/salt/modules/virt.py b/salt/modules/virt.py
-index 0921122a8a..4a301f289c 100644
+index 0921122a8a..17039444c4 100644
 --- a/salt/modules/virt.py
 +++ b/salt/modules/virt.py
-@@ -4988,3 +4988,116 @@ def pool_list_volumes(name, **kwargs):
+@@ -4988,3 +4988,129 @@ def pool_list_volumes(name, **kwargs):
          return pool.listVolumes()
      finally:
          conn.close()
@@ -34,6 +34,19 @@
 +    return pool_obj.storageVolLookupByName(vol)
 +
 +
++def _is_valid_volume(vol):
++    '''
++    Checks whether a volume is valid for further use since those may have 
disappeared since
++    the last pool refresh.
++    '''
++    try:
++        # Getting info on an invalid volume raises error
++        vol.info()
++        return True
++    except libvirt.libvirtError as err:
++        return False
++
++
 +def _get_all_volumes_paths(conn):
 +    '''
 +    Extract the path and backing stores path of all volumes.
@@ -42,17 +55,17 @@
 +    '''
 +    volumes = [vol for l in [obj.listAllVolumes() for obj in 
conn.listAllStoragePools()] for vol in l]
 +    return {vol.path(): [path.text for path in 
ElementTree.fromstring(vol.XMLDesc()).findall('.//backingStore/path')]
-+            for vol in volumes}
++            for vol in volumes if _is_valid_volume(vol)}
 +
 +
-+def volume_infos(pool, volume, **kwargs):
++def volume_infos(pool=None, volume=None, **kwargs):
 +    '''
 +    Provide details on a storage volume. If no volume name is provided, the 
infos
 +    all the volumes contained in the pool are provided. If no pool is 
provided,
 +    the infos of the volumes of all pools are output.
 +
-+    :param pool: libvirt storage pool name
-+    :param volume: name of the volume to get infos from
++    :param pool: libvirt storage pool name (default: ``None``)
++    :param volume: name of the volume to get infos from (default: ``None``)
 +    :param connection: libvirt connection URI, overriding defaults
 +    :param username: username to connect with, overriding defaults
 +    :param password: password to connect with, overriding defaults
@@ -102,7 +115,7 @@
 +        pools = [obj for obj in conn.listAllStoragePools() if pool is None or 
obj.name() == pool]
 +        vols = {pool_obj.name(): {vol.name(): _volume_extract_infos(vol)
 +                                  for vol in pool_obj.listAllVolumes()
-+                                  if volume is None or vol.name() == volume}
++                                  if (volume is None or vol.name() == volume) 
and _is_valid_volume(vol)}
 +                for pool_obj in pools}
 +        return {pool_name: volumes for (pool_name, volumes) in vols.items() 
if volumes}
 +    except libvirt.libvirtError as err:
@@ -137,10 +150,10 @@
 +    finally:
 +        conn.close()
 diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
-index bd34962a6a..55005f1d04 100644
+index bd34962a6a..14e51e1e2a 100644
 --- a/tests/unit/modules/test_virt.py
 +++ b/tests/unit/modules/test_virt.py
-@@ -2698,3 +2698,187 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
+@@ -2698,3 +2698,198 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
          self.mock_conn.storagePoolLookupByName.return_value = mock_pool
          # pylint: enable=no-member
          self.assertEqual(names, virt.pool_list_volumes('default'))
@@ -203,6 +216,13 @@
 +                'name': 'pool1',
 +                'volumes': [
 +                    {
++                        'key': '/key/of/vol0bad',
++                        'name': 'vol0bad',
++                        'path': '/path/to/vol0bad.qcow2',
++                        'info': None,
++                        'backingStore': None
++                    },
++                    {
 +                        'key': '/key/of/vol1',
 +                        'name': 'vol1',
 +                        'path': '/path/to/vol1.qcow2',
@@ -230,23 +250,27 @@
 +                mock_volume.name.return_value = vol_data['name']
 +                mock_volume.key.return_value = vol_data['key']
 +                mock_volume.path.return_value = 
'/path/to/{0}.qcow2'.format(vol_data['name'])
-+                mock_volume.info.return_value = vol_data['info']
-+                backing_store = '''
-+                    <backingStore>
-+                      <format>qcow2</format>
-+                      <path>{0}</path>
-+                    </backingStore>
-+                '''.format(vol_data['backingStore']) if 
vol_data['backingStore'] else '<backingStore/>'
-+                mock_volume.XMLDesc.return_value = '''
-+                    <volume type='file'>
-+                      <name>{0}</name>
-+                      <target>
-+                        <format>qcow2</format>
-+                        <path>/path/to/{0}.qcow2</path>
-+                      </target>
-+                      {1}
-+                    </volume>
-+                '''.format(vol_data['name'], backing_store)
++                if vol_data['info']:
++                    mock_volume.info.return_value = vol_data['info']
++                    backing_store = '''
++                        <backingStore>
++                          <format>qcow2</format>
++                          <path>{0}</path>
++                        </backingStore>
++                    '''.format(vol_data['backingStore']) if 
vol_data['backingStore'] else '<backingStore/>'
++                    mock_volume.XMLDesc.return_value = '''
++                        <volume type='file'>
++                          <name>{0}</name>
++                          <target>
++                            <format>qcow2</format>
++                            <path>/path/to/{0}.qcow2</path>
++                          </target>
++                          {1}
++                        </volume>
++                    '''.format(vol_data['name'], backing_store)
++                else:
++                    mock_volume.info.side_effect = 
self.mock_libvirt.libvirtError('No such volume')
++                    mock_volume.XMLDesc.side_effect = 
self.mock_libvirt.libvirtError('No such volume')
 +                mock_volumes.append(mock_volume)
 +                # pylint: enable=no-member
 +            mock_pool.listAllVolumes.return_value = mock_volumes  # pylint: 
disable=no-member

++++++ calculate-fqdns-in-parallel-to-avoid-blockings-bsc-1.patch ++++++
>From 722b9395a6489da7626e6a388c78bf8e8812190e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
 <[email protected]>
Date: Fri, 12 Apr 2019 16:47:03 +0100
Subject: [PATCH] Calculate FQDNs in parallel to avoid blockings (bsc#1129079)

Fix pylint issue
---
 salt/grains/core.py | 31 ++++++++++++++++++++++++++-----
 1 file changed, 26 insertions(+), 5 deletions(-)

diff --git a/salt/grains/core.py b/salt/grains/core.py
index 05a9d5035d..796458939d 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -20,11 +20,14 @@ import platform
 import logging
 import locale
 import uuid
+import time
 import zlib
 from errno import EACCES, EPERM
 import datetime
 import warnings
 
+from multiprocessing.dummy import Pool as ThreadPool
+
 # pylint: disable=import-error
 try:
     import dateutil.tz
@@ -2200,13 +2203,10 @@ def fqdns():
     grains = {}
     fqdns = set()
 
-    addresses = salt.utils.network.ip_addrs(include_loopback=False, 
interface_data=_get_interfaces())
-    addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, 
interface_data=_get_interfaces()))
-    err_message = 'Exception during resolving address: %s'
-    for ip in addresses:
+    def _lookup_fqdn(ip):
         try:
             name, aliaslist, addresslist = socket.gethostbyaddr(ip)
-            fqdns.update([socket.getfqdn(name)] + [als for als in aliaslist if 
salt.utils.network.is_fqdn(als)])
+            return [socket.getfqdn(name)] + [als for als in aliaslist if 
salt.utils.network.is_fqdn(als)]
         except socket.herror as err:
             if err.errno == 0:
                 # No FQDN for this IP address, so we don't need to know this 
all the time.
@@ -2216,6 +2216,27 @@ def fqdns():
         except (socket.error, socket.gaierror, socket.timeout) as err:
             log.error(err_message, err)
 
+    start = time.time()
+
+    addresses = salt.utils.network.ip_addrs(include_loopback=False, 
interface_data=_get_interfaces())
+    addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, 
interface_data=_get_interfaces()))
+    err_message = 'Exception during resolving address: %s'
+
+    # Create a ThreadPool to process the underlying calls to 
'socket.gethostbyaddr' in parallel.
+    # This avoid blocking the execution when the "fqdn" is not defined for 
certains IP addresses, which was causing
+    # that "socket.timeout" was reached multiple times secuencially, blocking 
execution for several seconds.
+    pool = ThreadPool(8)
+    results = pool.map(_lookup_fqdn, addresses)
+    pool.close()
+    pool.join()
+
+    for item in results:
+        if item:
+            fqdns.update(item)
+
+    elapsed = time.time() - start
+    log.debug('Elapsed time getting FQDNs: {} seconds'.format(elapsed))
+
     return {"fqdns": sorted(list(fqdns))}
 
 
-- 
2.17.1

++++++ do-not-report-patches-as-installed-when-not-all-the-.patch ++++++
>From 769c9e85499bc9912b050fff7d3105690f1d7c7b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
 <[email protected]>
Date: Wed, 13 Mar 2019 16:14:07 +0000
Subject: [PATCH] Do not report patches as installed when not all the
 related packages are installed (bsc#1128061)

Co-authored-by: Mihai Dinca <[email protected]>
---
 salt/modules/yumpkg.py | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
index 4f26a41670..5ec3835574 100644
--- a/salt/modules/yumpkg.py
+++ b/salt/modules/yumpkg.py
@@ -3212,12 +3212,18 @@ def _get_patches(installed_only=False):
     for line in salt.utils.itertools.split(ret, os.linesep):
         inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) 
+([^\s]+)',
                                                line).groups()
-        if inst != 'i' and installed_only:
-            continue
-        patches[advisory_id] = {
-            'installed': True if inst == 'i' else False,
-            'summary': pkg
-        }
+        if not advisory_id in patches:
+            patches[advisory_id] = {
+                'installed': True if inst == 'i' else False,
+                'summary': [pkg]
+            }
+        else:
+            patches[advisory_id]['summary'].append(pkg)
+            if inst != 'i':
+                patches[advisory_id]['installed'] = False
+
+    if installed_only:
+        patches = {k: v for k, v in patches.items() if v['installed']}
     return patches
 
 
-- 
2.20.1


++++++ don-t-call-zypper-with-more-than-one-no-refresh.patch ++++++
--- /var/tmp/diff_new_pack.5ZApBc/_old  2019-05-03 22:38:04.914696137 +0200
+++ /var/tmp/diff_new_pack.5ZApBc/_new  2019-05-03 22:38:04.926696161 +0200
@@ -1,4 +1,4 @@
-From 1c3f8f32d475701e8b7fab64b8cb9dcd44b587d4 Mon Sep 17 00:00:00 2001
+From 5e0fe08c6afd75a7d65d6ccd6cf6b4b197fb1064 Mon Sep 17 00:00:00 2001
 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= <[email protected]>
 Date: Tue, 29 Jan 2019 09:44:03 +0100
 Subject: [PATCH] Don't call zypper with more than one --no-refresh
@@ -11,23 +11,23 @@
  2 files changed, 2 insertions(+), 2 deletions(-)
 
 diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py
-index c442337c58..bab9e22dec 100644
+index 92e7052020..7ac0df26c6 100644
 --- a/salt/modules/zypperpkg.py
 +++ b/salt/modules/zypperpkg.py
-@@ -291,7 +291,7 @@ class _Zypper(object):
+@@ -282,7 +282,7 @@ class _Zypper(object):
          self.__called = True
          if self.__xml:
              self.__cmd.append('--xmlout')
 -        if not self.__refresh:
 +        if not self.__refresh and '--no-refresh' not in args:
              self.__cmd.append('--no-refresh')
-         if self.__root:
-             self.__cmd.extend(['--root', self.__root])
+ 
+         self.__cmd.extend(args)
 diff --git a/tests/unit/modules/test_zypperpkg.py 
b/tests/unit/modules/test_zypperpkg.py
-index e7474ff777..9d109a431d 100644
+index f586c23fd0..5c5091a570 100644
 --- a/tests/unit/modules/test_zypperpkg.py
 +++ b/tests/unit/modules/test_zypperpkg.py
-@@ -141,7 +141,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
+@@ -138,7 +138,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
              self.assertEqual(zypper.__zypper__.call('foo'), 
stdout_xml_snippet)
              self.assertEqual(len(sniffer.calls), 1)
  

++++++ fix-async-batch-race-conditions.patch ++++++
>From 33c5e10c2912f584243d29c764c2c6cca86edf4a Mon Sep 17 00:00:00 2001
From: Mihai Dinca <[email protected]>
Date: Thu, 11 Apr 2019 15:57:59 +0200
Subject: [PATCH] Fix async batch race conditions

Close batching when there is no next batch
---
 salt/cli/batch_async.py            | 80 +++++++++++++++---------------
 tests/unit/cli/test_batch_async.py | 35 ++++++-------
 2 files changed, 54 insertions(+), 61 deletions(-)

diff --git a/salt/cli/batch_async.py b/salt/cli/batch_async.py
index 3160d46d8b..9c20b2fc6e 100644
--- a/salt/cli/batch_async.py
+++ b/salt/cli/batch_async.py
@@ -37,14 +37,14 @@ class BatchAsync(object):
          - tag: salt/batch/<batch-jid>/start
          - data: {
              "available_minions": self.minions,
-             "down_minions": self.down_minions
+             "down_minions": targeted_minions - presence_ping_minions
            }
 
     When the batch ends, an `done` event is fired:
         - tag: salt/batch/<batch-jid>/done
         - data: {
              "available_minions": self.minions,
-             "down_minions": self.down_minions,
+             "down_minions": targeted_minions - presence_ping_minions
              "done_minions": self.done_minions,
              "timedout_minions": self.timedout_minions
          }
@@ -67,7 +67,7 @@ class BatchAsync(object):
         self.eauth = batch_get_eauth(clear_load['kwargs'])
         self.metadata = clear_load['kwargs'].get('metadata', {})
         self.minions = set()
-        self.down_minions = set()
+        self.targeted_minions = set()
         self.timedout_minions = set()
         self.done_minions = set()
         self.active = set()
@@ -108,8 +108,7 @@ class BatchAsync(object):
                 minion = data['id']
                 if op == 'ping_return':
                     self.minions.add(minion)
-                    self.down_minions.remove(minion)
-                    if not self.down_minions:
+                    if self.targeted_minions == self.minions:
                         self.event.io_loop.spawn_callback(self.start_batch)
                 elif op == 'find_job_return':
                     self.find_job_returned.add(minion)
@@ -120,9 +119,6 @@ class BatchAsync(object):
                         # call later so that we maybe gather more returns
                         self.event.io_loop.call_later(self.batch_delay, 
self.schedule_next)
 
-        if self.initialized and self.done_minions == 
self.minions.difference(self.timedout_minions):
-            self.end_batch()
-
     def _get_next(self):
         to_run = self.minions.difference(
             self.done_minions).difference(
@@ -135,16 +131,13 @@ class BatchAsync(object):
         return set(list(to_run)[:next_batch_size])
 
     @tornado.gen.coroutine
-    def check_find_job(self, minions):
-        did_not_return = minions.difference(self.find_job_returned)
-        if did_not_return:
-            for minion in did_not_return:
-                if minion in self.find_job_returned:
-                    self.find_job_returned.remove(minion)
-                if minion in self.active:
-                    self.active.remove(minion)
-                self.timedout_minions.add(minion)
-        running = 
minions.difference(did_not_return).difference(self.done_minions).difference(self.timedout_minions)
+    def check_find_job(self, batch_minions):
+        timedout_minions = 
batch_minions.difference(self.find_job_returned).difference(self.done_minions)
+        self.timedout_minions = self.timedout_minions.union(timedout_minions)
+        self.active = self.active.difference(self.timedout_minions)
+        running = 
batch_minions.difference(self.done_minions).difference(self.timedout_minions)
+        if timedout_minions:
+            self.event.io_loop.call_later(self.batch_delay, self.schedule_next)
         if running:
             self.event.io_loop.add_callback(self.find_job, running)
 
@@ -183,7 +176,7 @@ class BatchAsync(object):
             jid=self.ping_jid,
             metadata=self.metadata,
             **self.eauth)
-        self.down_minions = set(ping_return['minions'])
+        self.targeted_minions = set(ping_return['minions'])
 
     @tornado.gen.coroutine
     def start_batch(self):
@@ -192,36 +185,43 @@ class BatchAsync(object):
             self.initialized = True
             data = {
                 "available_minions": self.minions,
-                "down_minions": self.down_minions,
+                "down_minions": self.targeted_minions.difference(self.minions),
                 "metadata": self.metadata
             }
             self.event.fire_event(data, 
"salt/batch/{0}/start".format(self.batch_jid))
             yield self.schedule_next()
 
     def end_batch(self):
-        data = {
-            "available_minions": self.minions,
-            "down_minions": self.down_minions,
-            "done_minions": self.done_minions,
-            "timedout_minions": self.timedout_minions,
-            "metadata": self.metadata
-        }
-        self.event.fire_event(data, 
"salt/batch/{0}/done".format(self.batch_jid))
-        self.event.remove_event_handler(self.__event_handler)
+        left = 
self.minions.symmetric_difference(self.done_minions.union(self.timedout_minions))
+        if not left:
+            data = {
+                "available_minions": self.minions,
+                "down_minions": self.targeted_minions.difference(self.minions),
+                "done_minions": self.done_minions,
+                "timedout_minions": self.timedout_minions,
+                "metadata": self.metadata
+            }
+            self.event.fire_event(data, 
"salt/batch/{0}/done".format(self.batch_jid))
+            self.event.remove_event_handler(self.__event_handler)
 
     @tornado.gen.coroutine
     def schedule_next(self):
         next_batch = self._get_next()
         if next_batch:
-            yield self.local.run_job_async(
-                next_batch,
-                self.opts['fun'],
-                self.opts['arg'],
-                'list',
-                raw=self.opts.get('raw', False),
-                ret=self.opts.get('return', ''),
-                gather_job_timeout=self.opts['gather_job_timeout'],
-                jid=self.batch_jid,
-                metadata=self.metadata)
-            self.event.io_loop.call_later(self.opts['timeout'], self.find_job, 
set(next_batch))
             self.active = self.active.union(next_batch)
+            try:
+                yield self.local.run_job_async(
+                    next_batch,
+                    self.opts['fun'],
+                    self.opts['arg'],
+                    'list',
+                    raw=self.opts.get('raw', False),
+                    ret=self.opts.get('return', ''),
+                    gather_job_timeout=self.opts['gather_job_timeout'],
+                    jid=self.batch_jid,
+                    metadata=self.metadata)
+                self.event.io_loop.call_later(self.opts['timeout'], 
self.find_job, set(next_batch))
+            except Exception as ex:
+                self.active = self.active.difference(next_batch)
+        else:
+            self.end_batch()
diff --git a/tests/unit/cli/test_batch_async.py 
b/tests/unit/cli/test_batch_async.py
index f65b6a06c3..d519157d92 100644
--- a/tests/unit/cli/test_batch_async.py
+++ b/tests/unit/cli/test_batch_async.py
@@ -75,8 +75,8 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
             self.batch.local.run_job_async.call_args[0],
             ('*', 'test.ping', [], 'glob')
         )
-        # assert down_minions == all minions matched by tgt
-        self.assertEqual(self.batch.down_minions, set(['foo', 'bar']))
+        # assert targeted_minions == all minions matched by tgt
+        self.assertEqual(self.batch.targeted_minions, set(['foo', 'bar']))
 
     @tornado.testing.gen_test
     def test_batch_start_on_gather_job_timeout(self):
@@ -121,7 +121,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
         self.assertEqual(len(self.batch.schedule_next.mock_calls), 1)
 
     def test_batch_fire_done_event(self):
+        self.batch.targeted_minions = {'foo', 'baz', 'bar'}
         self.batch.minions = set(['foo', 'bar'])
+        self.batch.done_minions = {'foo'}
+        self.batch.timedout_minions = {'bar'}
         self.batch.event = MagicMock()
         self.batch.metadata = {'mykey': 'myvalue'}
         self.batch.end_batch()
@@ -130,9 +133,9 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
             (
                 {
                     'available_minions': set(['foo', 'bar']),
-                    'done_minions': set(),
-                    'down_minions': set(),
-                    'timedout_minions': set(),
+                    'done_minions': self.batch.done_minions,
+                    'down_minions': {'baz'},
+                    'timedout_minions': self.batch.timedout_minions,
                     'metadata': self.batch.metadata
                 },
                 "salt/batch/1235/done"
@@ -212,7 +215,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
         self.assertEqual(self.batch._get_next(), set())
 
     def test_batch__event_handler_ping_return(self):
-        self.batch.down_minions = {'foo'}
+        self.batch.targeted_minions = {'foo'}
         self.batch.event = MagicMock(
             unpack=MagicMock(return_value=('salt/job/1234/ret/foo', {'id': 
'foo'})))
         self.batch.start()
@@ -222,7 +225,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
         self.assertEqual(self.batch.done_minions, set())
 
     def test_batch__event_handler_call_start_batch_when_all_pings_return(self):
-        self.batch.down_minions = {'foo'}
+        self.batch.targeted_minions = {'foo'}
         self.batch.event = MagicMock(
             unpack=MagicMock(return_value=('salt/job/1234/ret/foo', {'id': 
'foo'})))
         self.batch.start()
@@ -232,7 +235,7 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
             (self.batch.start_batch,))
 
     def 
test_batch__event_handler_not_call_start_batch_when_not_all_pings_return(self):
-        self.batch.down_minions = {'foo', 'bar'}
+        self.batch.targeted_minions = {'foo', 'bar'}
         self.batch.event = MagicMock(
             unpack=MagicMock(return_value=('salt/job/1234/ret/foo', {'id': 
'foo'})))
         self.batch.start()
@@ -260,20 +263,10 @@ class AsyncBatchTestCase(AsyncTestCase, TestCase):
         self.assertEqual(self.batch.find_job_returned, {'foo'})
 
     @tornado.testing.gen_test
-    def test_batch__event_handler_end_batch(self):
-        self.batch.event = MagicMock(
-            unpack=MagicMock(return_value=('salt/job/not-my-jid/ret/foo', 
{'id': 'foo'})))
-        future = tornado.gen.Future()
-        future.set_result({'minions': ['foo', 'bar', 'baz']})
-        self.batch.local.run_job_async.return_value = future
-        self.batch.start()
-        self.batch.initialized = True
-        self.assertEqual(self.batch.down_minions, {'foo', 'bar', 'baz'})
+    def test_batch_schedule_next_end_batch_when_no_next(self):
         self.batch.end_batch = MagicMock()
-        self.batch.minions = {'foo', 'bar', 'baz'}
-        self.batch.done_minions = {'foo', 'bar'}
-        self.batch.timedout_minions = {'baz'}
-        self.batch._BatchAsync__event_handler(MagicMock())
+        self.batch._get_next = MagicMock(return_value={})
+        self.batch.schedule_next()
         self.assertEqual(len(self.batch.end_batch.mock_calls), 1)
 
     @tornado.testing.gen_test
-- 
2.20.1


++++++ fix-syndic-start-issue.patch ++++++
>From 0b15fe1ecc3ed468714a5a8d84787ab23ac6144e Mon Sep 17 00:00:00 2001
From: Mihai Dinca <[email protected]>
Date: Thu, 2 May 2019 10:50:17 +0200
Subject: [PATCH] Fix syndic start issue

---
 salt/utils/event.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/salt/utils/event.py b/salt/utils/event.py
index d2700bd2a0..160cba9bde 100644
--- a/salt/utils/event.py
+++ b/salt/utils/event.py
@@ -879,7 +879,7 @@ class SaltEvent(object):
         self.subscriber.callbacks.add(event_handler)
         if not self.subscriber.reading:
             # This will handle reconnects
-            self.subscriber.read_async()
+            return self.subscriber.read_async()
 
     def __del__(self):
         # skip exceptions in destroy-- since destroy() doesn't cover 
interpreter
-- 
2.20.1


++++++ mount-fix-extra-t-parameter.patch ++++++
>From 215d8d9c8f872b510a1c3fbb19ab4e91bc96bb64 Mon Sep 17 00:00:00 2001
From: Alberto Planas <[email protected]>
Date: Thu, 28 Feb 2019 15:45:28 +0100
Subject: [PATCH] mount: fix extra -t parameter

If 'fstype' parameter is not set in Linux environments, salt will
build a mount command with an empty -t value, making the command
fail.
---
 salt/modules/mount.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/salt/modules/mount.py b/salt/modules/mount.py
index 4ba370e5b3..e807b1729e 100644
--- a/salt/modules/mount.py
+++ b/salt/modules/mount.py
@@ -1218,7 +1218,8 @@ def mount(name, device, mkmnt=False, fstype='', 
opts='defaults', user=None, util
         if fstype:
             args += ' -v {0}'.format(fstype)
     else:
-        args += ' -t {0}'.format(fstype)
+        if fstype:
+            args += ' -t {0}'.format(fstype)
     cmd = 'mount {0} {1} {2} '.format(args, device, name)
     out = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False)
     if out['retcode']:
-- 
2.20.1


++++++ use-threadpool-from-multiprocessing.pool-to-avoid-le.patch ++++++
>From cd8e175738f7742fbb7c9e9d329039371bc0e579 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?=
 <[email protected]>
Date: Tue, 30 Apr 2019 10:51:42 +0100
Subject: [PATCH] Use ThreadPool from multiprocessing.pool to avoid
 leakings

---
 salt/grains/core.py | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/salt/grains/core.py b/salt/grains/core.py
index 796458939d..fec7b204bc 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -26,7 +26,7 @@ from errno import EACCES, EPERM
 import datetime
 import warnings
 
-from multiprocessing.dummy import Pool as ThreadPool
+from multiprocessing.pool import ThreadPool
 
 # pylint: disable=import-error
 try:
@@ -2225,10 +2225,14 @@ def fqdns():
     # Create a ThreadPool to process the underlying calls to 
'socket.gethostbyaddr' in parallel.
     # This avoid blocking the execution when the "fqdn" is not defined for 
certains IP addresses, which was causing
     # that "socket.timeout" was reached multiple times secuencially, blocking 
execution for several seconds.
-    pool = ThreadPool(8)
-    results = pool.map(_lookup_fqdn, addresses)
-    pool.close()
-    pool.join()
+
+    try:
+       pool = ThreadPool(8)
+       results = pool.map(_lookup_fqdn, addresses)
+       pool.close()
+       pool.join()
+    except Exception as exc:
+       log.error("Exception while creating a ThreadPool for resolving FQDNs: 
%s", exc)
 
     for item in results:
         if item:
-- 
2.17.1



Reply via email to