Add the BAR1 user interface for CPU access to GPU video memory through
the BAR1 aperture.

Signed-off-by: Joel Fernandes <[email protected]>
---
 drivers/gpu/nova-core/driver.rs      |   1 -
 drivers/gpu/nova-core/mm/bar_user.rs | 195 +++++++++++++++++++++++++++
 drivers/gpu/nova-core/mm/mod.rs      |   1 +
 3 files changed, 196 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/nova-core/mm/bar_user.rs

diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index f30ffa45cf13..d8b2e967ba4c 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -42,7 +42,6 @@ pub(crate) struct NovaCore {
 const GPU_DMA_BITS: u32 = 47;
 
 pub(crate) type Bar0 = pci::Bar<BAR0_SIZE>;
-#[expect(dead_code)]
 pub(crate) type Bar1 = pci::Bar<BAR1_SIZE>;
 
 kernel::pci_device_table!(
diff --git a/drivers/gpu/nova-core/mm/bar_user.rs 
b/drivers/gpu/nova-core/mm/bar_user.rs
new file mode 100644
index 000000000000..288dec0ae920
--- /dev/null
+++ b/drivers/gpu/nova-core/mm/bar_user.rs
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! BAR1 user interface for CPU access to GPU virtual memory.
+//!
+//! BAR1 provides a PCIe aperture for CPU access to GPU video memory through
+//! the GPU's MMU. The [`BarUser`] struct owns a VMM and provides BAR1-specific
+//! mapping operations with automatic cleanup.
+//!
+//! [`BarUser::map()`] returns a [`BarAccess`] object that provides read/write
+//! accessors to the mapped region. When [`BarAccess`] is dropped, the pages
+//! are automatically unmapped and the virtual range is freed.
+//!
+//! Some uses of BAR1 are:
+//! - USERD writes: CPU submits work by writing GP_PUT to userspace doorbell.
+//! - User-space mmap: Applications access GPU buffers via mmap().
+//!
+//! # Example
+//!
+//! ```ignore
+//! use crate::mm::bar_user::BarUser;
+//!
+//! fn setup_bar1(mm: &mut GpuMm, bar1: &Bar1, pdb_addr: VramAddress) -> 
Result<()> {
+//!     let mut bar_user = BarUser::new(pdb_addr, MmuVersion::V2, 
0x1000_0000)?;
+//!
+//!     // Map discontiguous physical pages to contiguous virtual range.
+//!     let pfns = [Pfn::new(0x100), Pfn::new(0x500), Pfn::new(0x200)];
+//!     let access = bar_user.map(mm, bar1, &pfns, true)?;
+//!
+//!     // Access the mapped region (offset is within the mapped range).
+//!     access.try_write32(0xDEAD_BEEF, 0x0)?;  // Page 0, offset 0
+//!     access.try_write32(0xCAFE_BABE, 0x1000)?;  // Page 1, offset 0
+//!
+//!     let val = access.try_read32(0x0)?;
+//!     assert_eq!(val, 0xDEAD_BEEF);
+//!
+//!     // Pages unmapped when `access` is dropped.
+//!     Ok(())
+//! }
+//! ```
+
+use kernel::{
+    gpu::buddy::AllocatedBlocks,
+    prelude::*,
+    sync::Arc, //
+};
+
+use crate::{
+    driver::Bar1,
+    mm::{
+        pagetable::MmuVersion,
+        vmm::Vmm,
+        GpuMm,
+        Pfn,
+        Vfn,
+        VirtualAddress,
+        VramAddress,
+        PAGE_SIZE, //
+    },
+};
+
+/// BAR1 user interface for virtual memory mappings.
+///
+/// Owns a VMM instance with virtual address tracking and provides
+/// BAR1-specific mapping and cleanup operations.
+pub(crate) struct BarUser {
+    vmm: Vmm,
+}
+
+impl BarUser {
+    /// Create a new [`BarUser`] with virtual address tracking.
+    pub(crate) fn new(
+        pdb_addr: VramAddress,
+        mmu_version: MmuVersion,
+        va_size: u64,
+    ) -> Result<Self> {
+        Ok(Self {
+            vmm: Vmm::new(pdb_addr, mmu_version, va_size)?,
+        })
+    }
+
+    /// Map a list of physical frame numbers to a contiguous virtual range.
+    ///
+    /// Allocates a contiguous virtual range from the VMM's virtual address 
range
+    /// allocator, maps each PFN to consecutive VFNs, and returns a 
[`BarAccess`] object
+    /// for accessing the mapped region.
+    ///
+    /// The mappings are automatically unmapped and the virtual range is freed
+    /// when the returned [`BarAccess`] is dropped.
+    pub(crate) fn map<'a>(
+        &'a mut self,
+        mm: &'a mut GpuMm,
+        bar: &'a Bar1,
+        pfns: &[Pfn],
+        writable: bool,
+    ) -> Result<BarAccess<'a>> {
+        let num_pages = pfns.len();
+        if num_pages == 0 {
+            return Err(EINVAL);
+        }
+
+        // Allocate contiguous virtual range.
+        let (vfn_start, vfn_alloc) = self.vmm.alloc_vfn_range(num_pages)?;
+
+        // Map each PFN to its corresponding VFN.
+        for (i, &pfn) in pfns.iter().enumerate() {
+            let vfn = Vfn::new(vfn_start.raw() + i as u64);
+            self.vmm.map_page(mm, vfn, pfn, writable)?;
+        }
+
+        Ok(BarAccess {
+            vmm: &mut self.vmm,
+            mm,
+            bar,
+            vfn_start,
+            num_pages,
+            _vfn_alloc: vfn_alloc,
+        })
+    }
+}
+
+/// Access object for a mapped BAR1 region.
+///
+/// Provides read/write accessors to the mapped region. When dropped, 
automatically
+/// unmaps all pages and frees the virtual range.
+pub(crate) struct BarAccess<'a> {
+    vmm: &'a mut Vmm,
+    mm: &'a mut GpuMm,
+    bar: &'a Bar1,
+    vfn_start: Vfn,
+    num_pages: usize,
+    /// Holds the virtual range allocation; freed when [`BarAccess`] is 
dropped.
+    _vfn_alloc: Arc<AllocatedBlocks>,
+}
+
+impl<'a> BarAccess<'a> {
+    /// Get the base virtual address of this mapping.
+    pub(crate) fn base(&self) -> VirtualAddress {
+        VirtualAddress::from(self.vfn_start)
+    }
+
+    /// Get the total size of the mapped region in bytes.
+    pub(crate) fn size(&self) -> usize {
+        self.num_pages * PAGE_SIZE
+    }
+
+    /// Get the starting virtual frame number.
+    pub(crate) fn vfn_start(&self) -> Vfn {
+        self.vfn_start
+    }
+
+    /// Get the number of pages in this mapping.
+    pub(crate) fn num_pages(&self) -> usize {
+        self.num_pages
+    }
+
+    /// Translate an offset within this mapping to a BAR1 aperture offset.
+    fn bar_offset(&self, offset: usize) -> Result<usize> {
+        if offset >= self.size() {
+            return Err(EINVAL);
+        }
+        Ok(self.vfn_start.raw() as usize * PAGE_SIZE + offset)
+    }
+
+    // Fallible accessors with runtime bounds checking.
+
+    /// Read a 32-bit value at the given offset.
+    pub(crate) fn try_read32(&self, offset: usize) -> Result<u32> {
+        self.bar.try_read32(self.bar_offset(offset)?)
+    }
+
+    /// Write a 32-bit value at the given offset.
+    pub(crate) fn try_write32(&self, value: u32, offset: usize) -> Result {
+        self.bar.try_write32(value, self.bar_offset(offset)?)
+    }
+
+    /// Read a 64-bit value at the given offset.
+    pub(crate) fn try_read64(&self, offset: usize) -> Result<u64> {
+        self.bar.try_read64(self.bar_offset(offset)?)
+    }
+
+    /// Write a 64-bit value at the given offset.
+    pub(crate) fn try_write64(&self, value: u64, offset: usize) -> Result {
+        self.bar.try_write64(value, self.bar_offset(offset)?)
+    }
+}
+
+impl Drop for BarAccess<'_> {
+    fn drop(&mut self) {
+        // Unmap all pages in this access range.
+        for i in 0..self.num_pages {
+            let vfn = Vfn::new(self.vfn_start.raw() + i as u64);
+            let _ = self.vmm.unmap_page(self.mm, vfn);
+        }
+    }
+}
diff --git a/drivers/gpu/nova-core/mm/mod.rs b/drivers/gpu/nova-core/mm/mod.rs
index 53d726eb7296..449c2dea3e07 100644
--- a/drivers/gpu/nova-core/mm/mod.rs
+++ b/drivers/gpu/nova-core/mm/mod.rs
@@ -4,6 +4,7 @@
 
 #![expect(dead_code)]
 
+pub(crate) mod bar_user;
 pub(crate) mod pagetable;
 pub(crate) mod pramin;
 pub(crate) mod tlb;
-- 
2.34.1

Reply via email to