Add an optional structure, controlled by TARGET_PAGE_ENTRY_EXTRA, that allows arbitrary extra data to be saved in the TLB for a given page. Set it with tlb_set_page_with_extra() and fetch it with probe_access_extra().
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- include/exec/cpu-defs.h | 5 +++ include/exec/exec-all.h | 26 +++++++++++++-- include/qemu/typedefs.h | 1 + accel/tcg/cputlb.c | 73 ++++++++++++++++++++++++++++++----------- 4 files changed, 84 insertions(+), 21 deletions(-) diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index ba3cd32a1e..f14586e219 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -76,6 +76,10 @@ typedef uint64_t target_ulong; #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) +#ifndef TARGET_PAGE_ENTRY_EXTRA +struct PageEntryExtra { }; +#endif + /* use a fully associative victim tlb of 8 entries */ #define CPU_VTLB_SIZE 8 @@ -148,6 +152,7 @@ typedef struct CPUIOTLBEntry { */ hwaddr addr; MemTxAttrs attrs; + PageEntryExtra extra; } CPUIOTLBEntry; /* diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 311e5fb422..2c036de3d8 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -259,11 +259,12 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, unsigned bits); /** - * tlb_set_page_with_attrs: + * tlb_set_page_with_extra: * @cpu: CPU to add this TLB entry for * @vaddr: virtual address of page to add entry for * @paddr: physical address of the page * @attrs: memory transaction attributes + * @extra: cpu specific extra information * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) * @mmu_idx: MMU index to insert TLB entry for * @size: size of the page in bytes @@ -279,11 +280,25 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, * At most one entry for a given virtual address is permitted. Only a * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only * used by tlb_flush_page. + * + * The @extra information is target-specific, and may be retrieved + * by calling probe_access_extra(). + */ +void tlb_set_page_with_extra(CPUState *cpu, target_ulong vaddr, hwaddr paddr, + MemTxAttrs attrs, PageEntryExtra extra, + int prot, int mmu_idx, target_ulong size); + +/** + * tlb_set_page_with_attrs: + * + * This function is equivalent to calling tlb_set_page_with_extra() + * with an @extra argument of all zeros. */ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, hwaddr paddr, MemTxAttrs attrs, int prot, int mmu_idx, target_ulong size); -/* tlb_set_page: +/** + * tlb_set_page: * * This function is equivalent to calling tlb_set_page_with_attrs() * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided @@ -435,6 +450,13 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t retaddr); +#ifdef CONFIG_SOFTMMU +int probe_access_extra(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, MemTxAttrs *pattrs, + PageEntryExtra *pextra, uintptr_t retaddr); +#endif + #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ /* Estimated block size for TB allocation. */ diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index 42f4ceb701..a4de3bb07c 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -82,6 +82,7 @@ typedef struct NodeInfo NodeInfo; typedef struct NumaNodeMem NumaNodeMem; typedef struct Object Object; typedef struct ObjectClass ObjectClass; +typedef struct PageEntryExtra PageEntryExtra; typedef struct PCIBridge PCIBridge; typedef struct PCIBus PCIBus; typedef struct PCIDevice PCIDevice; diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index f90f4312ea..05555961c9 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1095,16 +1095,21 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx, env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; } -/* Add a new TLB entry. At most one entry for a given virtual address +/* + * Add a new TLB entry. At most one entry for a given virtual address * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the * supplied size is only used by tlb_flush_page. * * Called from TCG-generated code, which is under an RCU read-side * critical section. + * + * Returns a pointer to the iotlb entry, with env_tlb(env)->c.lock + * still locked, for final additions to the iotlb entry. The caller + * must unlock the lock. */ -void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, - hwaddr paddr, MemTxAttrs attrs, int prot, - int mmu_idx, target_ulong size) +void tlb_set_page_with_extra(CPUState *cpu, target_ulong vaddr, hwaddr paddr, + MemTxAttrs attrs, PageEntryExtra extra, + int prot, int mmu_idx, target_ulong size) { CPUArchState *env = cpu->env_ptr; CPUTLB *tlb = env_tlb(env); @@ -1238,6 +1243,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, */ desc->iotlb[index].addr = iotlb - vaddr_page; desc->iotlb[index].attrs = attrs; + desc->iotlb[index].extra = extra; /* Now calculate the new entry */ tn.addend = addend - vaddr_page; @@ -1272,7 +1278,23 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, qemu_spin_unlock(&tlb->c.lock); } -/* Add a new TLB entry, but without specifying the memory +/* + * Add a new TLB entry, specifying the memory transaction + * attributes to be used. + */ +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, int prot, + int mmu_idx, target_ulong size) +{ + PageEntryExtra extra; + + memset(&extra, 0, sizeof(extra)); + tlb_set_page_with_extra(cpu, vaddr, paddr, attrs, extra, + prot, mmu_idx, size); +} + +/* + * Add a new TLB entry, but without specifying the memory * transaction attributes to be used. */ void tlb_set_page(CPUState *cpu, target_ulong vaddr, @@ -1633,25 +1655,38 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, return flags; } +int probe_access_extra(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, MemTxAttrs *pattrs, + PageEntryExtra *pextra, uintptr_t retaddr) +{ + int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, + nonfault, phost, retaddr); + + if (likely(!(flags & TLB_INVALID_MASK))) { + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle clean RAM pages. */ + if (unlikely(flags & TLB_NOTDIRTY)) { + notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); + flags &= ~TLB_NOTDIRTY; + } + *pattrs = iotlbentry->attrs; + *pextra = iotlbentry->extra; + } + return flags; +} + int probe_access_flags(CPUArchState *env, target_ulong addr, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t retaddr) { - int flags; + MemTxAttrs attrs; + PageEntryExtra extra; - flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, - nonfault, phost, retaddr); - - /* Handle clean RAM pages. */ - if (unlikely(flags & TLB_NOTDIRTY)) { - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; - - notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); - flags &= ~TLB_NOTDIRTY; - } - - return flags; + return probe_access_extra(env, addr, access_type, mmu_idx, nonfault, + phost, &attrs, &extra, retaddr); } void *probe_access(CPUArchState *env, target_ulong addr, int size, -- 2.34.1