Re: [PATCH v10 4/7] softmmu: Support concurrent bounce buffers

2024-05-07 Thread Peter Xu
On Tue, May 07, 2024 at 07:34:28AM -0700, Mattias Nissler wrote:
> When DMA memory can't be directly accessed, as is the case when
> running the device model in a separate process without shareable DMA
> file descriptors, bounce buffering is used.
> 
> It is not uncommon for device models to request mapping of several DMA
> regions at the same time. Examples include:
>  * net devices, e.g. when transmitting a packet that is split across
>several TX descriptors (observed with igb)
>  * USB host controllers, when handling a packet with multiple data TRBs
>(observed with xhci)
> 
> Previously, qemu only provided a single bounce buffer per AddressSpace
> and would fail DMA map requests while the buffer was already in use. In
> turn, this would cause DMA failures that ultimately manifest as hardware
> errors from the guest perspective.
> 
> This change allocates DMA bounce buffers dynamically instead of
> supporting only a single buffer. Thus, multiple DMA mappings work
> correctly also when RAM can't be mmap()-ed.
> 
> The total bounce buffer allocation size is limited individually for each
> AddressSpace. The default limit is 4096 bytes, matching the previous
> maximum buffer size. A new x-max-bounce-buffer-size parameter is
> provided to configure the limit for PCI devices.
> 
> Signed-off-by: Mattias Nissler 

Acked-by: Peter Xu 

-- 
Peter Xu




[PATCH v10 4/7] softmmu: Support concurrent bounce buffers

2024-05-07 Thread Mattias Nissler
When DMA memory can't be directly accessed, as is the case when
running the device model in a separate process without shareable DMA
file descriptors, bounce buffering is used.

It is not uncommon for device models to request mapping of several DMA
regions at the same time. Examples include:
 * net devices, e.g. when transmitting a packet that is split across
   several TX descriptors (observed with igb)
 * USB host controllers, when handling a packet with multiple data TRBs
   (observed with xhci)

Previously, qemu only provided a single bounce buffer per AddressSpace
and would fail DMA map requests while the buffer was already in use. In
turn, this would cause DMA failures that ultimately manifest as hardware
errors from the guest perspective.

This change allocates DMA bounce buffers dynamically instead of
supporting only a single buffer. Thus, multiple DMA mappings work
correctly also when RAM can't be mmap()-ed.

The total bounce buffer allocation size is limited individually for each
AddressSpace. The default limit is 4096 bytes, matching the previous
maximum buffer size. A new x-max-bounce-buffer-size parameter is
provided to configure the limit for PCI devices.

Signed-off-by: Mattias Nissler 
---
 hw/pci/pci.c|  8 
 include/exec/memory.h   | 14 +++
 include/hw/pci/pci_device.h |  3 ++
 system/memory.c |  5 ++-
 system/physmem.c| 82 ++---
 5 files changed, 76 insertions(+), 36 deletions(-)

diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 324c1302d2..d6f4944cbd 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -85,6 +85,8 @@ static Property pci_props[] = {
 QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
+DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice,
+ max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE),
 DEFINE_PROP_END_OF_LIST()
 };
 
@@ -1204,6 +1206,8 @@ static PCIDevice *do_pci_register_device(PCIDevice 
*pci_dev,
"bus master container", UINT64_MAX);
 address_space_init(_dev->bus_master_as,
_dev->bus_master_container_region, pci_dev->name);
+pci_dev->bus_master_as.max_bounce_buffer_size =
+pci_dev->max_bounce_buffer_size;
 
 if (phase_check(PHASE_MACHINE_READY)) {
 pci_init_bus_master(pci_dev);
@@ -2633,6 +2637,10 @@ static void pci_device_class_init(ObjectClass *klass, 
void *data)
 k->unrealize = pci_qdev_unrealize;
 k->bus_type = TYPE_PCI_BUS;
 device_class_set_props(k, pci_props);
+object_class_property_set_description(
+klass, "x-max-bounce-buffer-size",
+"Maximum buffer size allocated for bounce buffers used for mapped "
+"access to indirect DMA memory");
 }
 
 static void pci_device_class_base_init(ObjectClass *klass, void *data)
diff --git a/include/exec/memory.h b/include/exec/memory.h
index d417d7f363..451879efbd 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -1117,13 +1117,7 @@ typedef struct AddressSpaceMapClient {
 QLIST_ENTRY(AddressSpaceMapClient) link;
 } AddressSpaceMapClient;
 
-typedef struct {
-MemoryRegion *mr;
-void *buffer;
-hwaddr addr;
-hwaddr len;
-bool in_use;
-} BounceBuffer;
+#define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
 
 /**
  * struct AddressSpace: describes a mapping of addresses to #MemoryRegion 
objects
@@ -1143,8 +1137,10 @@ struct AddressSpace {
 QTAILQ_HEAD(, MemoryListener) listeners;
 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
 
-/* Bounce buffer to use for this address space. */
-BounceBuffer bounce;
+/* Maximum DMA bounce buffer size used for indirect memory map requests */
+size_t max_bounce_buffer_size;
+/* Total size of bounce buffers currently allocated, atomically accessed */
+size_t bounce_buffer_size;
 /* List of callbacks to invoke when buffers free up */
 QemuMutex map_client_list_lock;
 QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
diff --git a/include/hw/pci/pci_device.h b/include/hw/pci/pci_device.h
index d3dd0f64b2..253b48a688 100644
--- a/include/hw/pci/pci_device.h
+++ b/include/hw/pci/pci_device.h
@@ -160,6 +160,9 @@ struct PCIDevice {
 /* ID of standby device in net_failover pair */
 char *failover_pair_id;
 uint32_t acpi_index;
+
+/* Maximum DMA bounce buffer size used for indirect memory map requests */
+uint32_t max_bounce_buffer_size;
 };
 
 static inline int pci_intx(PCIDevice *pci_dev)
diff --git a/system/memory.c b/system/memory.c
index 642a449f8c..c288ed354a 100644
--- a/system/memory.c
+++ b/system/memory.c
@@ -3174,7 +3174,8 @@ void address_space_init(AddressSpace *as, MemoryRegion 
*root, const char *name)
 as->ioeventfds = NULL;
 QTAILQ_INIT(>listeners);
 QTAILQ_INSERT_TAIL(_spaces, as, address_spaces_link);
-