From: Joan Lledó <jlle...@member.fsf.org> Given a task and an address, it returns the pager used to map that address in that task.
* include/mach/mach4.defs: new interface declaration: vm_pager * vm/memory_object_proxy.h: add declaration for proxy hash functions * vm/memory_object_proxy.c: implement proxy hash functions * To track the proxies so they can be looked up when needed * vm/vm_map.c: implementation for the vm_pager RPC * vm/vm_object.c: implement vm_object_pager * new util function to get the pager from a vm_object * vm/vm_object.h: declare vm_object_pager * vm/vm_user.c: update functions to work with the proxies hash * vm_map: inserts the given proxy in the hash when mapping * vm_deallocate: removes the proxy from the hash when unmapping --- include/mach/mach4.defs | 6 ++ vm/memory_object_proxy.c | 120 +++++++++++++++++++++++++++++++++++++++ vm/memory_object_proxy.h | 6 ++ vm/vm_map.c | 46 +++++++++++++++ vm/vm_object.c | 31 ++++++++++ vm/vm_object.h | 1 + vm/vm_user.c | 13 ++++- 7 files changed, 222 insertions(+), 1 deletion(-) diff --git a/include/mach/mach4.defs b/include/mach/mach4.defs index 98af5905..195d6292 100644 --- a/include/mach/mach4.defs +++ b/include/mach/mach4.defs @@ -110,3 +110,9 @@ routine memory_object_create_proxy( start : vm_offset_array_t; len : vm_offset_array_t; out proxy : mach_port_t); + +/* Get the pager where the given address is mapped to */ +routine vm_pager( + target_task : vm_task_t; + address : vm_address_t; + out pager : mach_port_t); diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c index b55a17f1..fa09aa47 100644 --- a/vm/memory_object_proxy.c +++ b/vm/memory_object_proxy.c @@ -41,6 +41,7 @@ #include <mach/notify.h> #include <mach/vm_prot.h> #include <kern/printf.h> +#include <kern/queue.h> #include <kern/slab.h> #include <ipc/ipc_port.h> #include <ipc/ipc_space.h> @@ -62,12 +63,131 @@ struct memory_object_proxy }; typedef struct memory_object_proxy *memory_object_proxy_t; +/* + * A hash table of ports for struct memory_object_proxy backed objects. + */ + +#define MEMORY_OBJECT_PROXY_HASH_COUNT 127 + +struct memory_object_proxy_entry { + queue_chain_t links; + vm_map_t map; + vm_offset_t address; + ipc_port_t proxy; +}; +typedef struct memory_object_proxy_entry *memory_object_proxy_entry_t; + +/* + * Indexed by port name, each element contains a queue of all + * memory_object_proxy_entry_t which name shares the same hash + */ +queue_head_t memory_object_proxy_hashtable[MEMORY_OBJECT_PROXY_HASH_COUNT]; +struct kmem_cache memory_object_proxy_hash_cache; +decl_simple_lock_data(, + memory_object_proxy_hash_lock) + +#define memory_object_proxy_hash(name_port) \ + (((vm_offset_t)(name_port) & 0xffffff) % MEMORY_OBJECT_PROXY_HASH_COUNT) + +static +void memory_object_proxy_hash_init(void) +{ + int i; + vm_size_t size; + + size = sizeof(struct memory_object_proxy_entry); + kmem_cache_init(&memory_object_proxy_hash_cache, + "memory_object_proxy_entry", size, 0, NULL, 0); + for (i = 0; i < MEMORY_OBJECT_PROXY_HASH_COUNT; i++) + queue_init(&memory_object_proxy_hashtable[i]); + simple_lock_init(&memory_object_proxy_hash_lock); +} + +void memory_object_proxy_hash_insert( + const vm_map_t map, + const vm_offset_t address, + const ipc_port_t proxy) +{ + memory_object_proxy_entry_t new_entry; + + new_entry = (memory_object_proxy_entry_t) kmem_cache_alloc( + &memory_object_proxy_hash_cache + ); + new_entry->map = map; + new_entry->address = address; + new_entry->proxy = proxy; + + simple_lock(&memory_object_proxy_hash_lock); + queue_enter(&memory_object_proxy_hashtable[ + memory_object_proxy_hash(map + address) + ], new_entry, memory_object_proxy_entry_t, links); + simple_unlock(&memory_object_proxy_hash_lock); +} + +void memory_object_proxy_hash_delete( + const vm_map_t map, + const vm_offset_t address) +{ + queue_t bucket; + memory_object_proxy_entry_t tmp_entry; + memory_object_proxy_entry_t entry = NULL; + + bucket = &memory_object_proxy_hashtable[ + memory_object_proxy_hash(map + address) + ]; + + simple_lock(&memory_object_proxy_hash_lock); + for (tmp_entry = (memory_object_proxy_entry_t)queue_first(bucket); + !queue_end(bucket, &tmp_entry->links); + tmp_entry = + (memory_object_proxy_entry_t)queue_next(&tmp_entry->links)) { + if (tmp_entry->map == map && tmp_entry->address == address) { + entry = tmp_entry; + queue_remove(bucket, + tmp_entry, memory_object_proxy_entry_t, links); + break; + } + } + simple_unlock(&memory_object_proxy_hash_lock); + if (entry) + kmem_cache_free( + &memory_object_proxy_hash_cache, (vm_offset_t)entry + ); +} + +ipc_port_t memory_object_proxy_hash_lookup( + const vm_map_t map, + const vm_offset_t address) +{ + queue_t bucket; + memory_object_proxy_entry_t entry; + ipc_port_t proxy; + + bucket = &memory_object_proxy_hashtable[ + memory_object_proxy_hash(map + address) + ]; + + simple_lock(&memory_object_proxy_hash_lock); + for (entry = (memory_object_proxy_entry_t)queue_first(bucket); + !queue_end(bucket, &entry->links); + entry = (memory_object_proxy_entry_t)queue_next(&entry->links)) { + if (entry->map == map && entry->address == address) { + proxy = entry->proxy; + simple_unlock(&memory_object_proxy_hash_lock); + return proxy; + } + } + simple_unlock(&memory_object_proxy_hash_lock); + return (IP_NULL); +} void memory_object_proxy_init (void) { kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy", sizeof (struct memory_object_proxy), 0, NULL, 0); + + memory_object_proxy_hash_init(); } /* Lookup a proxy memory object by its port. */ diff --git a/vm/memory_object_proxy.h b/vm/memory_object_proxy.h index 8b3f2025..2805aa67 100644 --- a/vm/memory_object_proxy.h +++ b/vm/memory_object_proxy.h @@ -36,4 +36,10 @@ extern kern_return_t memory_object_proxy_lookup (ipc_port_t port, vm_offset_t *start, vm_offset_t *len); +extern void memory_object_proxy_hash_insert(const vm_map_t, const vm_offset_t, + const ipc_port_t); +extern void memory_object_proxy_hash_delete(const vm_map_t, const vm_offset_t); +extern ipc_port_t memory_object_proxy_hash_lookup(const vm_map_t, + const vm_offset_t); + #endif /* _VM_MEMORY_OBJECT_PROXY_H_ */ diff --git a/vm/vm_map.c b/vm/vm_map.c index a687d365..50137d3e 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -50,6 +50,7 @@ #include <vm/vm_fault.h> #include <vm/vm_map.h> #include <vm/vm_object.h> +#include <vm/memory_object_proxy.h> #include <vm/vm_page.h> #include <vm/vm_resident.h> #include <vm/vm_kern.h> @@ -4806,6 +4807,51 @@ kern_return_t vm_region( return(KERN_SUCCESS); } +/* + * vm_pager: + * + * User call to obtain the pager for the object the address is mapped in. + */ + +kern_return_t vm_pager( + vm_map_t map, + vm_offset_t address, + ipc_port_t *pager) /* OUT */ +{ + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + ipc_port_t proxy; + + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + if((proxy = memory_object_proxy_hash_lookup(map, + trunc_page(address)))) { + *pager = ipc_port_make_send(proxy); + return KERN_SUCCESS; + } + + vm_map_lock_read(map); + if (!vm_map_lookup_entry(map, address, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return(KERN_NO_SPACE); + } + } else { + entry = tmp_entry; + } + + if (entry->is_sub_map) { + *pager = IP_NULL; + } else { + *pager = vm_object_pager(entry->object.vm_object); + } + + vm_map_unlock_read(map); + + return(KERN_SUCCESS); +} + /* * Routine: vm_map_simplify * diff --git a/vm/vm_object.c b/vm/vm_object.c index bbc1d6e2..d8d85322 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -2809,6 +2809,37 @@ ipc_port_t vm_object_name( return p; } +/* + * Routine: vm_object_pager + * Purpose: + * Returns a naked send right to the port associated + * with this object. + */ +ipc_port_t vm_object_pager( + vm_object_t object) +{ + ipc_port_t p; + + if (object == VM_OBJECT_NULL) + return IP_NULL; + + vm_object_lock(object); + + while (object->shadow != VM_OBJECT_NULL) { + vm_object_t new_object = object->shadow; + vm_object_lock(new_object); + vm_object_unlock(object); + object = new_object; + } + + p = object->pager; + if (p != IP_NULL) + p = ipc_port_make_send(p); + vm_object_unlock(object); + + return p; +} + /* * Attach a set of physical pages to an object, so that they can * be mapped by mapping the object. Typically used to map IO memory. diff --git a/vm/vm_object.h b/vm/vm_object.h index 80d449a0..83f10eb1 100644 --- a/vm/vm_object.h +++ b/vm/vm_object.h @@ -202,6 +202,7 @@ extern void vm_object_collapse(vm_object_t); extern vm_object_t vm_object_lookup(struct ipc_port *); extern vm_object_t vm_object_lookup_name(struct ipc_port *); extern struct ipc_port *vm_object_name(vm_object_t); +extern struct ipc_port *vm_object_pager(vm_object_t); extern void vm_object_remove(vm_object_t); extern boolean_t vm_object_copy_temporary( diff --git a/vm/vm_user.c b/vm/vm_user.c index 8da8dea1..29870158 100644 --- a/vm/vm_user.c +++ b/vm/vm_user.c @@ -110,6 +110,8 @@ kern_return_t vm_deallocate( if (size == (vm_offset_t) 0) return(KERN_SUCCESS); + memory_object_proxy_hash_delete(map, trunc_page(start)); + return(vm_map_remove(map, trunc_page(start), round_page(start+size))); } @@ -320,6 +322,7 @@ kern_return_t vm_map( { vm_object_t object; kern_return_t result; + boolean_t is_proxy = FALSE; if ((target_map == VM_MAP_NULL) || (cur_protection & ~VM_PROT_ALL) || @@ -380,6 +383,8 @@ kern_return_t vm_map( if ((object = vm_object_enter(real_memobj, size, FALSE)) == VM_OBJECT_NULL) return KERN_INVALID_ARGUMENT; + + is_proxy = TRUE; } /* @@ -413,8 +418,14 @@ kern_return_t vm_map( object, offset, copy, cur_protection, max_protection, inheritance - )) != KERN_SUCCESS) + )) != KERN_SUCCESS) { vm_object_deallocate(object); + } else if (is_proxy){ + memory_object_proxy_hash_insert(target_map, + trunc_page(*address), + memory_object); + } + return(result); } -- 2.31.1