The commit is pushed to "branch-rh9-5.14.0-427.44.1.vz9.80.x-ovz" and will 
appear at g...@bitbucket.org:openvz/vzkernel.git
after rh9-5.14.0-427.44.1.vz9.80.6
------>
commit 82040b9a30ca738170689738aa53ab617ccb6169
Author: Alexander Atanasov <alexander.atana...@virtuozzo.com>
Date:   Fri Jan 24 17:36:37 2025 +0200

    dm-ploop: fix how current flags are managed inside threads
    
    currently threads do
    
    old_flags = current->flags;
    current->flags |= ploop flags;
    ...
    current->flags = old_flags;
    
    this can break process flags
    
    To fix this use  current_restore_flags(..) macro which only removes
    our flags and it preserves any other flags.
    
    https://virtuozzo.atlassian.net/browse/VSTOR-98626
    Signed-off-by: Alexander Atanasov <alexander.atana...@virtuozzo.com>
    
    ======
    Patchset description:
    ploop: optimistations and scalling
    
    Ploop processes requsts in a different threads in parallel
    where possible which results in significant improvement in
    performance and makes further optimistations possible.
    
    Known bugs:
      - delayed metadata writeback is not working and is missing error handling
         - patch to disable it until fixed
      - fast path is not working - causes rcu lockups - patch to disable it
    
    Further improvements:
      - optimize md pages lookups
    
    Alexander Atanasov (50):
      dm-ploop: md_pages map all pages at creation time
      dm-ploop: Use READ_ONCE/WRITE_ONCE to access md page data
      dm-ploop: fsync after all pios are sent
      dm-ploop: move md status to use proper bitops
      dm-ploop: convert wait_list and wb_batch_llist to use lockless lists
      dm-ploop: convert enospc handling to use lockless lists
      dm-ploop: convert suspended_pios list to use lockless list
      dm-ploop: convert the rest of the lists to use llist variant
      dm-ploop: combine processing of pios thru prepare list and remove
        fsync worker
      dm-ploop: move from wq to kthread
      dm-ploop: move preparations of pios into the caller from worker
      dm-ploop: fast path execution for reads
      dm-ploop: do not use a wrapper for set_bit to make a page writeback
      dm-ploop: BAT use only one list for writeback
      dm-ploop: make md writeback timeout to be per page
      dm-ploop: add interface to disable bat writeback delay
      dm-ploop: convert wb_batch_list to lockless variant
      dm-ploop: convert high_prio to status
      dm-ploop: split cow processing into two functions
      dm-ploop: convert md page rw lock to spin lock
      dm-ploop: convert bat_rwlock to bat_lock spinlock
      dm-ploop: prepare bat updates under bat_lock
      dm-ploop: make ploop_bat_write_complete ready for parallel pio
        completion
      dm-ploop: make ploop_submit_metadata_writeback return number of
        requests sent
      dm-ploop: introduce pio runner threads
      dm-ploop: add pio list ids to be used when passing pios to runners
      dm-ploop: process pios via runners
      dm-ploop: disable metadata writeback delay
      dm-ploop: disable fast path
      dm-ploop: use lockless lists for chained cow updates list
      dm-ploop: use lockless lists for data ready pios
      dm-ploop: give runner threads better name
      dm-ploop: resize operation - add holes bitmap locking
      dm-ploop: remove unnecessary operations
      dm-ploop: use filp per thread
      dm-ploop: catch if we try to advance pio past bio end
      dm-ploop: support REQ_FUA for data pios
      dm-ploop: proplerly access nr_bat_entries
      dm-ploop: fix locking and improve error handling when submitting pios
      dm-ploop: fix how ENOTBLK is handled
      dm-ploop: sync when suspended or stopping
      dm-ploop: rework bat completion logic
      dm-ploop: rework logic in pio processing
      dm-ploop: end fsync pios in parallel
      dm-ploop: make filespace preallocations async
      dm-ploop: resubmit enospc pios from dispatcher thread
      dm-ploop: dm-ploop: simplify discard completion
      dm-ploop: use GFP_ATOMIC instead of GFP_NOIO
      dm-ploop: fix locks used in mixed context
      dm-ploop: fix how current flags are managed inside threads
    
    Andrey Zhadchenko (13):
      dm-ploop: do not flush after metadata writes
      dm-ploop: set IOCB_DSYNC on all FUA requests
      dm-ploop: remove extra ploop_cluster_is_in_top_delta()
      dm-ploop: introduce per-md page locking
      dm-ploop: reduce BAT accesses on discard completion
      dm-ploop: simplify llseek
      dm-ploop: speed up ploop_prepare_bat_update()
      dm-ploop: make new allocations immediately visible in BAT
      dm-ploop: drop ploop_cluster_is_in_top_delta()
      dm-ploop: do not wait for BAT update for non-FUA requests
      dm-ploop: add delay for metadata writeback
      dm-ploop: submit all postponed metadata on REQ_OP_FLUSH
      dm-ploop: handle REQ_PREFLUSH
    
    Feature: dm-ploop: ploop target driver
---
 drivers/md/dm-ploop-map.c | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index c6c46b49bccf..7efbaedd69e0 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -2150,6 +2150,7 @@ void ploop_resubmit_enospc_pios(struct ploop *ploop)
                ploop_submit_embedded_pios(ploop, 
llist_reverse_order(enospc_pending));
 }
 
+#define PLOOP_PFLAGS (PF_IO_THREAD|PF_LOCAL_THROTTLE|PF_MEMALLOC_NOIO)
 void do_ploop_run_work(struct ploop *ploop)
 {
        LLIST_HEAD(deferred_pios);
@@ -2163,7 +2164,7 @@ void do_ploop_run_work(struct ploop *ploop)
        int npios;
        int force_md_update;
 
-       current->flags |= PF_IO_THREAD|PF_LOCAL_THROTTLE|PF_MEMALLOC_NOIO;
+       current->flags |= PLOOP_PFLAGS;
 
        if (ploop->submit_enospc) {
                ploop->submit_enospc = false;
@@ -2201,10 +2202,10 @@ void do_ploop_run_work(struct ploop *ploop)
         * put stick in the wheel - wait for all pios to be executed before 
doing metadata
         * we could gain a bit more iops if we can skip this wait but for now 
we can not
         */
-       current->flags = old_flags;
+       current_restore_flags(old_flags, PLOOP_PFLAGS);
        wait_event_interruptible(ploop->dispatcher_wq_data,
                                 (!ploop_runners_have_pending(ploop)));
-       current->flags |= PF_IO_THREAD|PF_LOCAL_THROTTLE|PF_MEMALLOC_NOIO;
+       current->flags |= PLOOP_PFLAGS;
 
        /* if we have a flush we must sync md data too */
        force_md_update = !!llflush_pios | ploop->force_md_writeback;
@@ -2212,25 +2213,25 @@ void do_ploop_run_work(struct ploop *ploop)
 
        if (npios) {
                /* wait for metadata writeback to complete */
-               current->flags = old_flags;
+               current_restore_flags(old_flags, PLOOP_PFLAGS);
                /* First wait all pios to be processed */
                wait_event_interruptible(ploop->dispatcher_wq_data,
                                         (!ploop_runners_have_pending(ploop)));
-               current->flags |= 
PF_IO_THREAD|PF_LOCAL_THROTTLE|PF_MEMALLOC_NOIO;
+               current->flags |= PLOOP_PFLAGS;
        }
        if (llflush_pios || npios) {
                /* Now process fsync pios after we have done all other */
                npios = process_ploop_fsync_work(ploop, llflush_pios);
                /* Since dispatcher is single thread no other work can be 
queued */
                if (npios) {
-                       current->flags = old_flags;
+                       current_restore_flags(old_flags, PLOOP_PFLAGS);
                        wait_event_interruptible(ploop->dispatcher_wq_fsync,
                                        
!atomic_read(&ploop->kt_worker->fsync_pios));
-                       current->flags |= 
PF_IO_THREAD|PF_LOCAL_THROTTLE|PF_MEMALLOC_NOIO;
+                       current->flags |= PLOOP_PFLAGS;
                }
        }
 
-       current->flags = old_flags;
+       current_restore_flags(old_flags, PLOOP_PFLAGS);
 }
 
 void do_ploop_work(struct work_struct *ws)
@@ -2253,7 +2254,7 @@ int ploop_allocator(void *data)
                if (ploop->prealloc_size) {
                        __set_current_state(TASK_RUNNING);
                        file = ploop_top_delta(ploop)->file;
-                       current->flags |= 
PF_IO_THREAD|PF_LOCAL_THROTTLE|PF_MEMALLOC_NOIO;
+                       current->flags |= PLOOP_PFLAGS;
                        ret = ploop_preallocate_cluster(ploop, file);
                        if (ret) {
                                /*
@@ -2266,7 +2267,7 @@ int ploop_allocator(void *data)
                                               ploop_device_name(ploop), ret);
                        }
                        wake_up_interruptible(&ploop->dispatcher_wq_prealloc);
-                       current->flags = old_flags;
+                       current_restore_flags(old_flags, PLOOP_PFLAGS);
                }
                if (kthread_should_stop()) {
                        __set_current_state(TASK_RUNNING);
@@ -2291,7 +2292,7 @@ int ploop_pio_runner(void *data)
        int did_process_fsync = 0;
 
        for (;;) {
-               current->flags = old_flags;
+               current_restore_flags(old_flags, PLOOP_PFLAGS);
                set_current_state(TASK_INTERRUPTIBLE);
 
 check_for_more:
@@ -2314,8 +2315,7 @@ int ploop_pio_runner(void *data)
                        continue;
                }
                __set_current_state(TASK_RUNNING);
-               old_flags = current->flags;
-               current->flags |= 
PF_IO_THREAD|PF_LOCAL_THROTTLE|PF_MEMALLOC_NOIO;
+               current->flags |= PLOOP_PFLAGS;
 
                llist_for_each_safe(pos, t, llwork) {
                        pio = list_entry((struct list_head *)pos, typeof(*pio), 
list);
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to