Hi,

This patch modifies tmpfs to keep a reference (by mapping it into its
own space) to each memory object created by the user, so they don't get
inmediately terminated at the end of the current operation.

Used in conjunction with "mach_defpager: fix external objects
interface" and "tmpfs: add padding to tmpfs_dirent structure", should
put tmpfs in working condition.



diff -du hurd-deb.orig/tmpfs/node.c hurd/tmpfs/node.c
--- hurd-deb.orig/tmpfs/node.c	2010-05-06 11:37:31.000000000 +0200
+++ hurd/tmpfs/node.c	2010-05-18 13:43:27.000000000 +0200
@@ -61,8 +61,10 @@
   switch (np->dn->type)
     {
     case DT_REG:
-      if (np->dn->u.reg.memobj != MACH_PORT_NULL)
+      if (np->dn->u.reg.memobj != MACH_PORT_NULL) {
+	vm_deallocate (mach_task_self (), np->dn->u.reg.memref, 4096);
 	mach_port_deallocate (mach_task_self (), np->dn->u.reg.memobj);
+      }	
       break;
     case DT_DIR:
       assert (np->dn->u.dir.entries == 0);
@@ -489,7 +491,7 @@
     {
       error_t err = default_pager_object_create (default_pager,
 						 &np->dn->u.reg.memobj,
-						 np->allocsize);
+						 vm_page_size);
       if (err)
 	{
 	  errno = err;
@@ -500,6 +502,13 @@
 	 past the specified size of the file.  */
       err = default_pager_object_set_size (np->dn->u.reg.memobj,
 					   np->allocsize);
+      assert_perror (err);
+      
+      /* XXX we need to keep a reference to the object, or GNU Mach
+	 could try to terminate it while cleaning object cache */
+      vm_map (mach_task_self (), &np->dn->u.reg.memref, 4096, 0, 1,
+	      np->dn->u.reg.memobj, 0, 0, VM_PROT_NONE, VM_PROT_NONE,
+	      VM_INHERIT_NONE);
     }
 
   /* XXX always writable */
diff -du hurd-deb.orig/tmpfs/tmpfs.h hurd/tmpfs/tmpfs.h
--- hurd-deb.orig/tmpfs/tmpfs.h	2010-05-06 11:37:31.000000000 +0200
+++ hurd/tmpfs/tmpfs.h	2010-05-18 16:13:02.000000000 +0200
@@ -47,6 +47,7 @@
     struct
     {
       mach_port_t memobj;
+      vm_address_t memref;
       unsigned int allocpages;	/* largest size while memobj was live */
     } reg;
     struct

Reply via email to