Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package drbd for openSUSE:Factory checked in 
at 2022-08-23 14:29:53
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/drbd (Old)
 and      /work/SRC/openSUSE:Factory/.drbd.new.2083 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "drbd"

Tue Aug 23 14:29:53 2022 rev:98 rq:998746 version:9.0.30~1+git.8e9c0812

Changes:
--------
--- /work/SRC/openSUSE:Factory/drbd/drbd.changes        2022-07-18 
18:34:42.953823921 +0200
+++ /work/SRC/openSUSE:Factory/.drbd.new.2083/drbd.changes      2022-08-23 
14:30:16.483691366 +0200
@@ -1,0 +2,12 @@
+Tue Aug 23 00:13:00 UTC 2022 - Heming Zhao <heming.z...@suse.com>
+
+- drbd: build error against kernel v5.19 (bsc#1202600)
+  - add patch:
+    + bsc-1202600_01-remove-QUEUE_FLAG_DISCARD.patch
+    + bsc-1202600_02-dax-introduce-DAX_RECOVERY_WRITE-dax-access-mode.patch
+    + bsc-1202600_03-block-decouple-REQ_OP_SECURE_ERASE-from-REQ_OP_DISCA.patch
+    + bsc-1202600_04-remove-assign_p_sizes_qlim.patch
+  - For rpmbuild warning, modify symlink /usr/sbin/rcdrbd to relative path
+    + drbd.spec
+
+-------------------------------------------------------------------

New:
----
  bsc-1202600_01-remove-QUEUE_FLAG_DISCARD.patch
  bsc-1202600_02-dax-introduce-DAX_RECOVERY_WRITE-dax-access-mode.patch
  bsc-1202600_03-block-decouple-REQ_OP_SECURE_ERASE-from-REQ_OP_DISCA.patch
  bsc-1202600_04-remove-assign_p_sizes_qlim.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ drbd.spec ++++++
--- /var/tmp/diff_new_pack.XIhqNb/_old  2022-08-23 14:30:17.263692996 +0200
+++ /var/tmp/diff_new_pack.XIhqNb/_new  2022-08-23 14:30:17.275693021 +0200
@@ -52,6 +52,10 @@
 Patch16:        bsc-1201335_06-bdi.patch
 Patch17:        bsc-1201335_07-write-same.patch
 Patch18:        bsc-1201335_08-bio_clone_fast.patch
+Patch19:        bsc-1202600_01-remove-QUEUE_FLAG_DISCARD.patch
+Patch20:        
bsc-1202600_02-dax-introduce-DAX_RECOVERY_WRITE-dax-access-mode.patch
+Patch21:        
bsc-1202600_03-block-decouple-REQ_OP_SECURE_ERASE-from-REQ_OP_DISCA.patch
+Patch22:        bsc-1202600_04-remove-assign_p_sizes_qlim.patch
 Patch99:        suse-coccinelle.patch
 #https://github.com/openSUSE/rpmlint-checks/blob/master/KMPPolicyCheck.py
 BuildRequires:  coccinelle >= 1.0.8
@@ -103,6 +107,10 @@
 %patch16 -p1
 %patch17 -p1
 %patch18 -p1
+%patch19 -p1
+%patch20 -p1
+%patch21 -p1
+%patch22 -p1
 %patch99 -p1
 
 mkdir source
@@ -142,7 +150,7 @@
 done
 
 mkdir -p %{buildroot}/%{_sbindir}
-ln -s -f %{_sbindir}/service %{buildroot}/%{_sbindir}/rc%{name}
+ln -s service %{buildroot}/%{_sbindir}/rc%{name}
 rm -f drbd.conf
 
 %files

++++++ bsc-1202600_01-remove-QUEUE_FLAG_DISCARD.patch ++++++
/*
   This patch is related with following upstream kernel commit.
   This patch uses QUEUE_FLAG_STABLE_WRITES to replace QUEUE_FLAG_DISCARD.
 */

commit 70200574cc229f6ba038259e8142af2aa09e6976
Author: Christoph Hellwig <h...@lst.de>
Date:   Fri Apr 15 06:52:55 2022 +0200

    block: remove QUEUE_FLAG_DISCARD

    Just use a non-zero max_discard_sectors as an indicator for discard
    support, similar to what is done for write zeroes.

    The only places where needs special attention is the RAID5 driver,
    which must clear discard support for security reasons by default,
    even if the default stacking rules would allow for it.

    Signed-off-by: Christoph Hellwig <h...@lst.de>
    Reviewed-by: Martin K. Petersen <martin.peter...@oracle.com>
    Acked-by: Christoph B??hmwalder <christoph.boehmwal...@linbit.com> [drbd]
    Acked-by: Jan H??ppner <hoepp...@linux.ibm.com> [s390]
    Acked-by: Coly Li <col...@suse.de> [bcache]
    Acked-by: David Sterba <dste...@suse.com> [btrfs]
    Reviewed-by: Chaitanya Kulkarni <k...@nvidia.com>
    Link: https://lore.kernel.org/r/20220415045258.199825-25-...@lst.de
    Signed-off-by: Jens Axboe <ax...@kernel.dk>
---

diff -Nupr a/drbd/drbd-kernel-compat/tests/have_blk_queue_flag_set.c 
b/drbd/drbd-kernel-compat/tests/have_blk_queue_flag_set.c
--- a/drbd/drbd-kernel-compat/tests/have_blk_queue_flag_set.c   2022-08-22 
18:14:20.639382230 +0800
+++ b/drbd/drbd-kernel-compat/tests/have_blk_queue_flag_set.c   2022-08-22 
18:14:43.819285373 +0800
@@ -3,5 +3,5 @@
 
 void dummy(struct request_queue *q)
 {
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+       blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
 }
diff -Nupr a/drbd/drbd_main.c b/drbd/drbd_main.c
--- a/drbd/drbd_main.c  2022-08-23 08:04:26.097721587 +0800
+++ b/drbd/drbd_main.c  2022-08-23 08:04:41.449655955 +0800
@@ -1574,7 +1574,7 @@ static void assign_p_sizes_qlim(struct d
                p->qlim->alignment_offset = 
cpu_to_be32(queue_alignment_offset(q));
                p->qlim->io_min = cpu_to_be32(queue_io_min(q));
                p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
-               p->qlim->discard_enabled = blk_queue_discard(q);
+               p->qlim->discard_enabled = 
!!bdev_max_discard_sectors(device->ldev->backing_bdev);
                p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q);
                p->qlim->write_same_capable = 
!!q->limits.max_write_same_sectors;
        } else {
diff -Nupr a/drbd/drbd_nl.c b/drbd/drbd_nl.c
--- a/drbd/drbd_nl.c    2022-08-23 08:04:26.101721570 +0800
+++ b/drbd/drbd_nl.c    2022-08-23 08:04:41.453655938 +0800
@@ -1967,13 +1967,14 @@ static unsigned int drbd_max_discard_sec
 static void decide_on_discard_support(struct drbd_device *device,
                        struct request_queue *q,
                        struct request_queue *b,
-                       bool discard_zeroes_if_aligned)
+                       bool discard_zeroes_if_aligned,
+                       struct drbd_backing_dev *bdev)
 {
        /* q = drbd device queue (device->rq_queue)
         * b = backing device queue 
(device->ldev->backing_bdev->bd_disk->queue),
         *     or NULL if diskless
         */
-       bool can_do = b ? blk_queue_discard(b) : true;
+       bool can_do = b ? bdev_max_discard_sectors(bdev->backing_bdev) : true;
 
        if (can_do && b && !queue_discard_zeroes_data(b) && 
!discard_zeroes_if_aligned) {
                can_do = false;
@@ -1992,23 +1993,12 @@ static void decide_on_discard_support(st
                 * topology on all peers. */
                blk_queue_discard_granularity(q, 512);
                q->limits.max_discard_sectors = 
drbd_max_discard_sectors(device->resource);
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+               q->limits.max_write_zeroes_sectors =
+                       drbd_max_discard_sectors(device->resource);
        } else {
-               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
                blk_queue_discard_granularity(q, 0);
                q->limits.max_discard_sectors = 0;
-       }
-}
-
-static void fixup_discard_if_not_supported(struct request_queue *q)
-{
-       /* To avoid confusion, if this queue does not support discard, clear
-        * max_discard_sectors, which is what lsblk -D reports to the user.
-        * Older kernels got this wrong in "stack limits".
-        * */
-       if (!blk_queue_discard(q)) {
-               blk_queue_max_discard_sectors(q, 0);
-               blk_queue_discard_granularity(q, 0);
+               q->limits.max_write_zeroes_sectors = 0;
        }
 }
 
@@ -2116,7 +2106,7 @@ static void drbd_setup_queue_param(struc
        blk_queue_max_hw_sectors(q, max_hw_sectors);
        /* This is the workaround for "bio would need to, but cannot, be split" 
*/
        blk_queue_segment_boundary(q, PAGE_SIZE-1);
-       decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
+       decide_on_discard_support(device, q, b, discard_zeroes_if_aligned, 
bdev);
        decide_on_write_same_support(device, q, b, o, disable_write_same);
 
        if (b) {
@@ -2127,7 +2117,6 @@ static void drbd_setup_queue_param(struc
                blk_queue_update_readahead(q);
 #endif
        }
-       fixup_discard_if_not_supported(q);
        fixup_write_zeroes(device, q);
 }
 
@@ -2233,13 +2222,14 @@ static void sanitize_disk_conf(struct dr
                               struct drbd_backing_dev *nbc)
 {
        struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
+       struct block_device *bdev = nbc->backing_bdev;
 
        if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
                disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
        if (disk_conf->al_extents > drbd_al_extents_max(nbc))
                disk_conf->al_extents = drbd_al_extents_max(nbc);
 
-       if (!blk_queue_discard(q) ||
+       if (!bdev_max_discard_sectors(bdev) ||
            (!queue_discard_zeroes_data(q) && 
!disk_conf->discard_zeroes_if_aligned)) {
                if (disk_conf->rs_discard_granularity) {
                        disk_conf->rs_discard_granularity = 0; /* disable 
feature */
@@ -2261,7 +2251,7 @@ static void sanitize_disk_conf(struct dr
                /* compat:
                 * old kernel has 0 granularity means "unknown" means one 
sector.
                 * current kernel has 0 granularity means "discard not 
supported".
-                * Not supported is checked above already with 
!blk_queue_discard(q).
+                * Not supported is checked above already with 
!blk_max_discard_sectors().
                 */
                unsigned int ql_dg = q->limits.discard_granularity ?: 512;
 
diff -Nupr a/drbd/drbd_receiver.c b/drbd/drbd_receiver.c
--- a/drbd/drbd_receiver.c      2022-08-23 08:04:26.105721553 +0800
+++ b/drbd/drbd_receiver.c      2022-08-23 08:25:31.188262629 +0800
@@ -1686,11 +1686,10 @@ int drbd_issue_discard_or_zero_out(struc
 
 static bool can_do_reliable_discards(struct drbd_device *device)
 {
-       struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
        struct disk_conf *dc;
        bool can_do;
 
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(device->ldev->backing_bdev))
                return false;
 
        if (queue_discard_zeroes_data(q))

++++++ bsc-1202600_02-dax-introduce-DAX_RECOVERY_WRITE-dax-access-mode.patch 
++++++
/* This patch is related with following upstream kernel commit. */

>From e511c4a3d2a1f64aafc1f5df37a2ffcf7ef91b55 Mon Sep 17 00:00:00 2001
From: Jane Chu <jane....@oracle.com>
Date: Fri, 13 May 2022 15:10:58 -0700
Subject: [PATCH] dax: introduce DAX_RECOVERY_WRITE dax access mode

Up till now, dax_direct_access() is used implicitly for normal
access, but for the purpose of recovery write, dax range with
poison is requested.  To make the interface clear, introduce
        enum dax_access_mode {
                DAX_ACCESS,
                DAX_RECOVERY_WRITE,
        }
where DAX_ACCESS is used for normal dax access, and
DAX_RECOVERY_WRITE is used for dax recovery write.

Suggested-by: Dan Williams <dan.j.willi...@intel.com>
Signed-off-by: Jane Chu <jane....@oracle.com>
Reviewed-by: Christoph Hellwig <h...@lst.de>
Cc: Mike Snitzer <snit...@redhat.com>
Reviewed-by: Vivek Goyal <vgo...@redhat.com>
Link: 
https://lore.kernel.org/r/165247982851.52965.11024212198889762949.st...@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
---

diff -Nupr a/drbd/drbd_dax_pmem.c b/drbd/drbd_dax_pmem.c
--- a/drbd/drbd_dax_pmem.c      2022-08-22 16:26:24.786369807 +0800
+++ b/drbd/drbd_dax_pmem.c      2022-08-22 16:26:47.706276429 +0800
@@ -86,7 +86,7 @@ static int map_superblock_for_dax(struct
        int id;
 
        id = dax_read_lock();
-       len = dax_direct_access(dax_dev, pgoff, want, &kaddr, &pfn_unused);
+       len = dax_direct_access(dax_dev, pgoff, want, DAX_ACCESS, &kaddr, 
&pfn_unused);
        dax_read_unlock(id);
 
        if (len < want)
@@ -147,7 +147,7 @@ int drbd_dax_map(struct drbd_backing_dev
        int id;
 
        id = dax_read_lock();
-       len = dax_direct_access(dax_dev, pgoff, want, &kaddr, &pfn_unused);
+       len = dax_direct_access(dax_dev, pgoff, want, DAX_ACCESS, &kaddr, 
&pfn_unused);
        dax_read_unlock(id);
 
        if (len < want)

++++++ 
bsc-1202600_03-block-decouple-REQ_OP_SECURE_ERASE-from-REQ_OP_DISCA.patch ++++++
/* This patch is related with following upstream kernel commit. */

>From 44abff2c0b970ae3d310b97617525dc01f248d7c Mon Sep 17 00:00:00 2001
From: Christoph Hellwig <h...@lst.de>
Date: Fri, 15 Apr 2022 06:52:57 +0200
Subject: [PATCH] block: decouple REQ_OP_SECURE_ERASE from REQ_OP_DISCARD
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Secure erase is a very different operation from discard in that it is
a data integrity operation vs hint.  Fully split the limits and helper
infrastructure to make the separation more clear.

Signed-off-by: Christoph Hellwig <h...@lst.de>
Reviewed-by: Martin K. Petersen <martin.peter...@oracle.com>
Acked-by: Christoph B??hmwalder <christoph.boehmwal...@linbit.com> [drbd]
Acked-by: Ryusuke Konishi <konishi.ryus...@gmail.com> [nifs2]
Acked-by: Jaegeuk Kim <jaeg...@kernel.org> [f2fs]
Acked-by: Coly Li <col...@suse.de> [bcache]
Acked-by: David Sterba <dste...@suse.com> [btrfs]
Acked-by: Chao Yu <c...@kernel.org>
Reviewed-by: Chaitanya Kulkarni <k...@nvidia.com>
Link: https://lore.kernel.org/r/20220415045258.199825-27-...@lst.de
Signed-off-by: Jens Axboe <ax...@kernel.dk>
---

diff -Nupr a/drbd/drbd_receiver.c b/drbd/drbd_receiver.c
--- a/drbd/drbd_receiver.c      2022-08-22 16:34:38.332333231 +0800
+++ b/drbd/drbd_receiver.c      2022-08-22 16:36:22.651901537 +0800
@@ -1658,7 +1658,8 @@ int drbd_issue_discard_or_zero_out(struc
                start = tmp;
        }
        while (nr_sectors >= max_discard_sectors) {
-               err |= blkdev_issue_discard(bdev, start, max_discard_sectors, 
GFP_NOIO, 0);
+               err |= blkdev_issue_discard(bdev, start, max_discard_sectors,
+                                           GFP_NOIO);
                nr_sectors -= max_discard_sectors;
                start += max_discard_sectors;
        }
@@ -1670,7 +1671,7 @@ int drbd_issue_discard_or_zero_out(struc
                nr = nr_sectors;
                nr -= (unsigned int)nr % granularity;
                if (nr) {
-                       err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 
0);
+                       err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO);
                        nr_sectors -= nr;
                        start += nr;
                }

++++++ bsc-1202600_04-remove-assign_p_sizes_qlim.patch ++++++
/* This patch is related with following upstream kernel commit. */

>From 40349d0e16cedd0de561f59752c3249780fb749b Mon Sep 17 00:00:00 2001
From: Christoph Hellwig <h...@lst.de>
Date: Fri, 15 Apr 2022 06:52:35 +0200
Subject: [PATCH] drbd: remove assign_p_sizes_qlim
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Fold each branch into its only caller.

Signed-off-by: Christoph Hellwig <h...@lst.de>
Acked-by: Christoph B??hmwalder <christoph.boehmwal...@linbit.com>
Link: https://lore.kernel.org/r/20220415045258.199825-5-...@lst.de
Signed-off-by: Jens Axboe <ax...@kernel.dk>
---

diff -Nupr a/drbd/drbd_main.c b/drbd/drbd_main.c
--- a/drbd/drbd_main.c  2022-08-22 23:16:08.082402396 +0800
+++ b/drbd/drbd_main.c  2022-08-22 23:22:31.292772570 +0800
@@ -1565,31 +1565,6 @@ int drbd_attach_peer_device(struct drbd_
        return err;
 }
 
-/* communicated if (agreed_features & DRBD_FF_WSAME) */
-static void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, 
struct request_queue *q)
-{
-       if (q) {
-               p->qlim->physical_block_size = 
cpu_to_be32(queue_physical_block_size(q));
-               p->qlim->logical_block_size = 
cpu_to_be32(queue_logical_block_size(q));
-               p->qlim->alignment_offset = 
cpu_to_be32(queue_alignment_offset(q));
-               p->qlim->io_min = cpu_to_be32(queue_io_min(q));
-               p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
-               p->qlim->discard_enabled = 
!!bdev_max_discard_sectors(device->ldev->backing_bdev);
-               p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q);
-               p->qlim->write_same_capable = 
!!q->limits.max_write_same_sectors;
-       } else {
-               q = device->rq_queue;
-               p->qlim->physical_block_size = 
cpu_to_be32(queue_physical_block_size(q));
-               p->qlim->logical_block_size = 
cpu_to_be32(queue_logical_block_size(q));
-               p->qlim->alignment_offset = 0;
-               p->qlim->io_min = cpu_to_be32(queue_io_min(q));
-               p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
-               p->qlim->discard_enabled = 0;
-               p->qlim->discard_zeroes_data = 0;
-               p->qlim->write_same_capable = 0;
-       }
-}
-
 int drbd_send_sizes(struct drbd_peer_device *peer_device,
                    uint64_t u_size_diskless, enum dds_flags flags)
 {
@@ -1610,7 +1585,9 @@ int drbd_send_sizes(struct drbd_peer_dev
 
        memset(p, 0, packet_size);
        if (get_ldev_if_state(device, D_NEGOTIATING)) {
-               struct request_queue *q = 
bdev_get_queue(device->ldev->backing_bdev);
+               struct block_device *bdev = device->ldev->backing_bdev;
+               struct request_queue *q = bdev_get_queue(bdev);
+
                d_size = drbd_get_max_capacity(device, device->ldev, false);
                rcu_read_lock();
                u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
@@ -1618,14 +1595,32 @@ int drbd_send_sizes(struct drbd_peer_dev
                q_order_type = drbd_queue_order_type(device);
                max_bio_size = queue_max_hw_sectors(q) << 9;
                max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
-               assign_p_sizes_qlim(device, p, q);
+               p->qlim->physical_block_size =
+                       cpu_to_be32(bdev_physical_block_size(bdev));
+               p->qlim->logical_block_size =
+                       cpu_to_be32(bdev_logical_block_size(bdev));
+               p->qlim->alignment_offset =
+                       cpu_to_be32(bdev_alignment_offset(bdev));
+               p->qlim->io_min = cpu_to_be32(bdev_io_min(bdev));
+               p->qlim->io_opt = cpu_to_be32(bdev_io_opt(bdev));
+               p->qlim->discard_enabled = !!bdev_max_discard_sectors(bdev);
                put_ldev(device);
        } else {
+               struct request_queue *q = device->rq_queue;
+
+               p->qlim->physical_block_size =
+                       cpu_to_be32(queue_physical_block_size(q));
+               p->qlim->logical_block_size =
+                       cpu_to_be32(queue_logical_block_size(q));
+               p->qlim->alignment_offset = 0;
+               p->qlim->io_min = cpu_to_be32(queue_io_min(q));
+               p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
+               p->qlim->discard_enabled = 0;
+
                d_size = 0;
                u_size = u_size_diskless;
                q_order_type = QUEUE_ORDERED_NONE;
                max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per 
peer_request */
-               assign_p_sizes_qlim(device, p, NULL);
        }
 
        if (peer_device->connection->agreed_pro_version <= 94)

Reply via email to