The branch main has been updated by imp:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=20f8814cd3fa6b7cda704e07dec4da6cb9647e37

commit 20f8814cd3fa6b7cda704e07dec4da6cb9647e37
Author:     Warner Losh <[email protected]>
AuthorDate: 2023-11-13 14:23:53 +0000
Commit:     Warner Losh <[email protected]>
CommitDate: 2023-11-13 14:23:53 +0000

    busdma: On systmes that use subr_busdma_bounce, measure deferred time
    
    Measure the total deferred time (from the time we decide to defer until
    we try again) for busdma_load requests. On systems that don't ever
    defer, there is no performnce change. Add new sysctl
    hw.busdma.zoneX.total_deferred_time to report this (in
    microseconds).
    
    Normally, deferrals don't happen in modern hardware... Except there's a
    lot of buggy hardware that can't cope with memory > 4GB or that can't
    cross a 4GB boundary (or even more restrictive values), necessitating
    bouncing. This will measure the effect on the I/Os of this deferral.
    
    Sponsored by:           Netflix
    Reviewed by:            gallatin, mav
    Differential Revision:  https://reviews.freebsd.org/D42550
---
 sys/arm64/arm64/busdma_bounce.c |  1 +
 sys/kern/subr_busdma_bounce.c   | 10 +++++++++-
 sys/riscv/riscv/busdma_bounce.c |  1 +
 sys/x86/x86/busdma_bounce.c     |  1 +
 4 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
index c1028f35ba7f..ee84d03a44d4 100644
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -98,6 +98,7 @@ struct bus_dmamap {
        struct memdesc         mem;
        bus_dmamap_callback_t *callback;
        void                  *callback_arg;
+       __sbintime_t           queued_time;
        STAILQ_ENTRY(bus_dmamap) links;
        u_int                   flags;
 #define        DMAMAP_COHERENT         (1 << 0)
diff --git a/sys/kern/subr_busdma_bounce.c b/sys/kern/subr_busdma_bounce.c
index f5f27dae9b05..76f50b2abf38 100644
--- a/sys/kern/subr_busdma_bounce.c
+++ b/sys/kern/subr_busdma_bounce.c
@@ -76,6 +76,7 @@ struct bounce_zone {
 #ifdef dmat_domain
        int             domain;
 #endif
+       sbintime_t      total_deferred_time;
        bus_size_t      alignment;
        bus_addr_t      lowaddr;
        char            zoneid[8];
@@ -119,6 +120,7 @@ _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t 
map, int flags)
                        bz = dmat->bounce_zone;
                        STAILQ_INSERT_TAIL(&bz->bounce_map_waitinglist, map,
                            links);
+                       map->queued_time = sbinuptime();
                        mtx_unlock(&bounce_lock);
                        return (EINPROGRESS);
                }
@@ -239,7 +241,10 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
            "domain", CTLFLAG_RD, &bz->domain, 0,
            "memory domain");
 #endif
-
+       SYSCTL_ADD_SBINTIME_USEC(busdma_sysctl_tree(bz),
+           SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+           "total_deferred_time", CTLFLAG_RD, &bz->total_deferred_time,
+           "Cumulative time busdma requests are deferred (us)");
        if (start_thread) {
                if (kproc_create(busdma_thread, NULL, NULL, 0, 0, "busdma") !=
                    0)
@@ -436,6 +441,7 @@ busdma_thread(void *dummy __unused)
        STAILQ_HEAD(, bus_dmamap) callbacklist;
        bus_dma_tag_t dmat;
        struct bus_dmamap *map, *nmap;
+       struct bounce_zone *bz;
 
        thread_lock(curthread);
        sched_class(curthread, PRI_ITHD);
@@ -452,8 +458,10 @@ busdma_thread(void *dummy __unused)
 
                STAILQ_FOREACH_SAFE(map, &callbacklist, links, nmap) {
                        dmat = map->dmat;
+                       bz = dmat->bounce_zone;
                        dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),
                            BUS_DMA_LOCK);
+                       bz->total_deferred_time += (sbinuptime() - 
map->queued_time);
                        bus_dmamap_load_mem(map->dmat, map, &map->mem,
                            map->callback, map->callback_arg, BUS_DMA_WAITOK);
                        dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),
diff --git a/sys/riscv/riscv/busdma_bounce.c b/sys/riscv/riscv/busdma_bounce.c
index 6b50dc527b66..2aa6c0d0b371 100644
--- a/sys/riscv/riscv/busdma_bounce.c
+++ b/sys/riscv/riscv/busdma_bounce.c
@@ -95,6 +95,7 @@ struct bus_dmamap {
        struct memdesc         mem;
        bus_dmamap_callback_t *callback;
        void                  *callback_arg;
+       __sbintime_t           queued_time;
        STAILQ_ENTRY(bus_dmamap) links;
        u_int                   flags;
 #define        DMAMAP_COULD_BOUNCE     (1 << 0)
diff --git a/sys/x86/x86/busdma_bounce.c b/sys/x86/x86/busdma_bounce.c
index b9943ad3792e..6ae02752a805 100644
--- a/sys/x86/x86/busdma_bounce.c
+++ b/sys/x86/x86/busdma_bounce.c
@@ -90,6 +90,7 @@ struct bus_dmamap {
        struct memdesc         mem;
        bus_dmamap_callback_t *callback;
        void                  *callback_arg;
+       __sbintime_t           queued_time;
        STAILQ_ENTRY(bus_dmamap) links;
 #ifdef KMSAN
        struct memdesc         kmsan_mem;

Reply via email to