On Wed, 2006-12-27 at 10:25 +0100, Gilles Chanteperdrix wrote:
> M. Koehrer wrote:
> > Hi Philippe,
> >
> > I agree. To fix the root cause is actually the very best to do!
> > This eases the life of users and developers.
> >
> > Regards
> >
> > Mathias
>
> Hi Mathias,
>
> here comes a workaround for the COW issue on Linux 2.6.19. The patch
> relies on a new VM_NOCOW flag which should be set for real-time
> application if you use Xenomai trunk.
>
> It would be nice if you could test it.
>
Here is the same patch backported to 2.6.14 for the ppc people who
experienced the same kind of issues. When enabling the nucleus debug
option (full, with queue checks), no more warning should appear in the
kernel log about threads being switched to secondary mode while the
latency test runs. The last patch is to be applied against a Xenomai
2.3.x tree to activate the feature. Feedback highly recommended if you
want this patch to be merged in. TIA,
--- 2.6.14/include/linux/mm.h 2005-10-28 02:02:08.000000000 +0200
+++ 2.6.14-ipipe/include/linux/mm.h 2006-12-26 19:38:10.000000000 +0100
@@ -162,6 +162,7 @@
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu
mmap) */
+#define VM_NOCOW 0x10000000 /* Disable COW mapping for the vma */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
--- 2.6.14/mm/memory.c 2006-12-26 19:46:40.000000000 +0100
+++ 2.6.14-ipipe/mm/memory.c 2006-12-26 19:46:54.000000000 +0100
@@ -341,10 +341,10 @@
* but may be dropped within p[mg]d_alloc() and pte_alloc_map().
*/
-static inline void
+static inline int
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags,
- unsigned long addr)
+ pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags,
+ unsigned long addr, struct vm_area_struct *vma)
{
pte_t pte = *src_pte;
struct page *page;
@@ -362,7 +362,7 @@
}
}
set_pte_at(dst_mm, addr, dst_pte, pte);
- return;
+ return 0;
}
pfn = pte_pfn(pte);
@@ -377,7 +377,7 @@
if (!page || PageReserved(page)) {
set_pte_at(dst_mm, addr, dst_pte, pte);
- return;
+ return 0;
}
/*
@@ -385,6 +385,27 @@
* in the parent and the child
*/
if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
+#ifdef CONFIG_IPIPE
+ if (((vm_flags|src_mm->def_flags) & (VM_NOCOW|VM_LOCKED)) ==
(VM_NOCOW|VM_LOCKED)) {
+ struct page *old_page = page;
+ page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
+ if (!page)
+ return -ENOMEM;
+
+ copy_user_highpage(page, old_page, addr);
+ pte = mk_pte(page, vma->vm_page_prot);
+
+ if (vm_flags & VM_SHARED)
+ pte = pte_mkclean(pte);
+ pte = pte_mkold(pte);
+ inc_mm_counter(dst_mm, rss);
+ if (PageAnon(page))
+ inc_mm_counter(dst_mm, anon_rss);
+ set_pte_at(dst_mm, addr, dst_pte, pte);
+ page_dup_rmap(page);
+ return 0;
+ }
+#endif /* CONFIG_IPIPE */
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = *src_pte;
}
@@ -402,6 +423,8 @@
inc_mm_counter(dst_mm, anon_rss);
set_pte_at(dst_mm, addr, dst_pte, pte);
page_dup_rmap(page);
+
+ return 0;
}
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -433,7 +456,8 @@
progress++;
continue;
}
- copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vm_flags, addr);
+ if (copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vm_flags,
addr, vma))
+ return -ENOMEM;
progress += 8;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
spin_unlock(&src_mm->page_table_lock);
--- ksrc/nucleus/shadow.c.orig 2006-12-27 11:42:50.000000000 +0100
+++ ksrc/nucleus/shadow.c 2006-12-27 11:42:55.000000000 +0100
@@ -839,6 +839,8 @@
#ifdef CONFIG_MMU
if (!(current->mm->def_flags & VM_LOCKED))
send_sig(SIGXCPU, current, 1);
+ else
+ current->mm->def_flags |= VM_NOCOW;
#endif /* CONFIG_MMU */
current->cap_effective |=
--
Philippe.
_______________________________________________
Xenomai-help mailing list
[email protected]
https://mail.gna.org/listinfo/xenomai-help