Re: [Xenomai-core] Kernel crash with Xenomai (caused by fork?)

2008-03-31 Thread Gilles Chanteperdrix
Tomas Kalibera wrote:
  
  Crashed on the very same line as before
  Tomas

Ok. Let us look for unbalanced kmap_atomics then. Try this patch instead.

-- 


Gilles.
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 1c3bf95..a78494e 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,6 +1,11 @@
 #include linux/highmem.h
 #include linux/module.h
 
+static struct {
+   const char *file;
+   unsigned line;
+} last_km_user0 [NR_CPUS];
+
 void *kmap(struct page *page)
 {
might_sleep();
@@ -26,7 +31,8 @@ void kunmap(struct page *page)
  * However when holding an atomic kmap is is not legal to sleep, so atomic
  * kmaps are appropriate for short, tight code paths only.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *_kmap_atomic_prot(struct page *page, enum km_type type,
+   pgprot_t prot, const char *file, unsigned line)
 {
enum fixed_addresses idx;
unsigned long vaddr;
@@ -39,7 +45,17 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, 
pgprot_t prot)
 
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-   BUG_ON(!pte_none(*(kmap_pte-idx)));
+   if (!pte_none(*(kmap_pte-idx))) {
+   if (type == KM_USER0)
+   printk(KM_USER0 already mapped at %s:%d\n,
+  last_km_user0[smp_processor_id()].file,
+  last_km_user0[smp_processor_id()].line);
+   BUG();
+   } else if (type == KM_USER0) {
+   last_km_user0[smp_processor_id()].file = file;
+   last_km_user0[smp_processor_id()].line = line;
+   }
+
set_pte(kmap_pte-idx, mk_pte(page, prot));
arch_flush_lazy_mmu_mode();
 
@@ -70,6 +86,10 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
BUG_ON(vaddr = (unsigned long)high_memory);
 #endif
}
+   if (type == KM_USER0) {
+   last_km_user0[smp_processor_id()].file = NULL;
+   last_km_user0[smp_processor_id()].line = 0;
+   }
 
arch_flush_lazy_mmu_mode();
pagefault_enable();
@@ -78,7 +98,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
 /* This is the same as kmap_atomic() but can map memory that doesn't
  * have a struct page associated with it.
  */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *_kmap_atomic_pfn(unsigned long pfn, enum km_type type,
+  const char *file, unsigned line)
 {
enum fixed_addresses idx;
unsigned long vaddr;
@@ -87,6 +108,16 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+   if (!pte_none(*(kmap_pte-idx))) {
+   if (type == KM_USER0)
+   printk(KM_USER0 already mapped at %s:%d\n,
+  last_km_user0[smp_processor_id()].file,
+  last_km_user0[smp_processor_id()].line);
+   BUG();
+   } else if (type == KM_USER0) {
+   last_km_user0[smp_processor_id()].file = file;
+   last_km_user0[smp_processor_id()].line = line;
+   }
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
arch_flush_lazy_mmu_mode();
 
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index 13cdcd6..57b89f7 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -68,10 +68,16 @@ extern void FASTCALL(kunmap_high(struct page *page));
 
 void *kmap(struct page *page);
 void kunmap(struct page *page);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
+void *_kmap_atomic_prot(struct page *page, enum km_type type,
+   pgprot_t prot, const char *file, unsigned line);
+#define kmap_atomic_prot(page, type, prot) \
+   _kmap_atomic_prot(page, type, prot, __FILE__, __LINE__)
 void *kmap_atomic(struct page *page, enum km_type type);
 void kunmap_atomic(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
+void *_kmap_atomic_pfn(unsigned long pfn, enum km_type type,
+  const char *file, unsigned line);
+#define kmap_atomic_pfn(pfn, type) \
+   _kmap_atomic_pfn(pfn, type, __FILE__, __LINE__)
 struct page *kmap_atomic_to_page(void *ptr);
 
 #ifndef CONFIG_PARAVIRT
___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] Kernel crash with Xenomai (caused by fork?)

2008-03-31 Thread Gilles Chanteperdrix
Gilles Chanteperdrix wrote:
  Tomas Kalibera wrote:

Crashed on the very same line as before
Tomas
  
  Ok. Let us look for unbalanced kmap_atomics then. Try this patch instead.

Just when I hit the reply button, I realize that I forgot something. So,
try this one instead.

-- 


Gilles.
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 1c3bf95..97a5242 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,6 +1,11 @@
 #include linux/highmem.h
 #include linux/module.h
 
+static struct {
+   const char *file;
+   unsigned line;
+} last_km_user0 [NR_CPUS];
+
 void *kmap(struct page *page)
 {
might_sleep();
@@ -26,7 +31,8 @@ void kunmap(struct page *page)
  * However when holding an atomic kmap is is not legal to sleep, so atomic
  * kmaps are appropriate for short, tight code paths only.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *_kmap_atomic_prot(struct page *page, enum km_type type,
+   pgprot_t prot, const char *file, unsigned line)
 {
enum fixed_addresses idx;
unsigned long vaddr;
@@ -39,16 +45,27 @@ void *kmap_atomic_prot(struct page *page, enum km_type 
type, pgprot_t prot)
 
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-   BUG_ON(!pte_none(*(kmap_pte-idx)));
+   if (!pte_none(*(kmap_pte-idx))) {
+   if (type == KM_USER0)
+   printk(KM_USER0 already mapped at %s:%d\n,
+  last_km_user0[smp_processor_id()].file,
+  last_km_user0[smp_processor_id()].line);
+   BUG();
+   } else if (type == KM_USER0) {
+   last_km_user0[smp_processor_id()].file = file;
+   last_km_user0[smp_processor_id()].line = line;
+   }
+
set_pte(kmap_pte-idx, mk_pte(page, prot));
arch_flush_lazy_mmu_mode();
 
return (void *)vaddr;
 }
 
-void *kmap_atomic(struct page *page, enum km_type type)
+void *_kmap_atomic(struct page *page, enum km_type type,
+  const char *file, unsigned line)
 {
-   return kmap_atomic_prot(page, type, kmap_prot);
+   return _kmap_atomic_prot(page, type, kmap_prot, file, line);
 }
 
 void kunmap_atomic(void *kvaddr, enum km_type type)
@@ -70,6 +87,10 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
BUG_ON(vaddr = (unsigned long)high_memory);
 #endif
}
+   if (type == KM_USER0) {
+   last_km_user0[smp_processor_id()].file = NULL;
+   last_km_user0[smp_processor_id()].line = 0;
+   }
 
arch_flush_lazy_mmu_mode();
pagefault_enable();
@@ -78,7 +99,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
 /* This is the same as kmap_atomic() but can map memory that doesn't
  * have a struct page associated with it.
  */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *_kmap_atomic_pfn(unsigned long pfn, enum km_type type,
+  const char *file, unsigned line)
 {
enum fixed_addresses idx;
unsigned long vaddr;
@@ -87,6 +109,16 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+   if (!pte_none(*(kmap_pte-idx))) {
+   if (type == KM_USER0)
+   printk(KM_USER0 already mapped at %s:%d\n,
+  last_km_user0[smp_processor_id()].file,
+  last_km_user0[smp_processor_id()].line);
+   BUG();
+   } else if (type == KM_USER0) {
+   last_km_user0[smp_processor_id()].file = file;
+   last_km_user0[smp_processor_id()].line = line;
+   }
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
arch_flush_lazy_mmu_mode();
 
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index 13cdcd6..db09f27 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -68,10 +68,19 @@ extern void FASTCALL(kunmap_high(struct page *page));
 
 void *kmap(struct page *page);
 void kunmap(struct page *page);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
+void *_kmap_atomic_prot(struct page *page, enum km_type type,
+   pgprot_t prot, const char *file, unsigned line);
+#define kmap_atomic_prot(page, type, prot) \
+   _kmap_atomic_prot(page, type, prot, __FILE__, __LINE__)
+void *_kmap_atomic(struct page *page, enum km_type type,
+  const char *file, unsigned line);
+#define kmap_atomic(page, type) \
+   _kmap_atomic(page, type, __FILE__, __LINE__)
 void kunmap_atomic(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
+void 

Re: [Xenomai-core] Kernel crash with Xenomai (caused by fork?)

2008-03-31 Thread Tomas Kalibera


I added a missing underscore and re-tried, and none of the debug 
messages was printed. I added another one to make sure that there is not 
a problem with getting printk messages to the serial console. The 
resulting highmem_32.c and the output is attached.


T


Gilles Chanteperdrix wrote:

Gilles Chanteperdrix wrote:
  Tomas Kalibera wrote:

Crashed on the very same line as before

Tomas
  
  Ok. Let us look for unbalanced kmap_atomics then. Try this patch instead.


Just when I hit the reply button, I realize that I forgot something. So,
try this one instead.

  


#include linux/highmem.h
#include linux/module.h

static struct {
	const char *file;
	unsigned line;
} last_km_user0 [NR_CPUS];

void *kmap(struct page *page)
{
	might_sleep();
	if (!PageHighMem(page))
		return page_address(page);
	return kmap_high(page);
}

void kunmap(struct page *page)
{
	if (in_interrupt())
		BUG();
	if (!PageHighMem(page))
		return;
	kunmap_high(page);
}

/*
 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 * no global lock is needed and because the kmap code must perform a global TLB
 * invalidation when the kmap pool wraps.
 *
 * However when holding an atomic kmap is is not legal to sleep, so atomic
 * kmaps are appropriate for short, tight code paths only.
 */
void *_kmap_atomic_prot(struct page *page, enum km_type type,
			pgprot_t prot, const char *file, unsigned line)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	pagefault_disable();

	if (!PageHighMem(page))
		return page_address(page);

	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	if (!pte_none(*(kmap_pte-idx))) {
		if (type == KM_USER0) {
			printk(KM_USER0 already mapped at %s:%d\n,
			   last_km_user0[smp_processor_id()].file,
			   last_km_user0[smp_processor_id()].line);
		} else {
			printk(type is NOT KM_USER0\n);
		}
		BUG();
	} else if (type == KM_USER0) {
		last_km_user0[smp_processor_id()].file = file;
		last_km_user0[smp_processor_id()].line = line;
	}

	set_pte(kmap_pte-idx, mk_pte(page, prot));
	arch_flush_lazy_mmu_mode();

	return (void *)vaddr;
}

void *_kmap_atomic(struct page *page, enum km_type type,
		   const char *file, unsigned line)
{
	return _kmap_atomic_prot(page, type, kmap_prot, file, line);
}

void kunmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr  PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	/*
	 * Force other mappings to Oops if they'll try to access this pte
	 * without first remap it.  Keeping stale mappings around is a bad idea
	 * also, in case the page changes cacheability attributes or becomes
	 * a protected page in a hypervisor.
	 */
	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
		kpte_clear_flush(kmap_pte-idx, vaddr);
	else {
#ifdef CONFIG_DEBUG_HIGHMEM
		BUG_ON(vaddr  PAGE_OFFSET);
		BUG_ON(vaddr = (unsigned long)high_memory);
#endif
	}
	if (type == KM_USER0) {
		last_km_user0[smp_processor_id()].file = NULL;
		last_km_user0[smp_processor_id()].line = 0;
	}

	arch_flush_lazy_mmu_mode();
	pagefault_enable();
}

/* This is the same as kmap_atomic() but can map memory that doesn't
 * have a struct page associated with it.
 */
void *_kmap_atomic_pfn(unsigned long pfn, enum km_type type,
		   const char *file, unsigned line)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

	pagefault_disable();

	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	if (!pte_none(*(kmap_pte-idx))) {
		if (type == KM_USER0)
			printk(KM_USER0 already mapped at %s:%d\n,
			   last_km_user0[smp_processor_id()].file,
			   last_km_user0[smp_processor_id()].line);
		BUG();
	} else if (type == KM_USER0) {
		last_km_user0[smp_processor_id()].file = file;
		last_km_user0[smp_processor_id()].line = line;
	}
	set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
	arch_flush_lazy_mmu_mode();

	return (void*) vaddr;
}

struct page *kmap_atomic_to_page(void *ptr)
{
	unsigned long idx, vaddr = (unsigned long)ptr;
	pte_t *pte;

	if (vaddr  FIXADDR_START)
		return virt_to_page(ptr);

	idx = virt_to_fix(vaddr);
	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
	return pte_page(*pte);
}

EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(_kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);
EXPORT_SYMBOL(kmap_atomic_to_page);
[  255.285392] [ cut here ]
[  255.289992] kernel BUG at arch/x86/mm/highmem_32.c:56!
[  255.295107] invalid opcode:  [#1] PREEMPT SMP 
[  255.299901] Modules linked in: rfcomm l2cap bluetooth ppdev sbp2 ipv6 
parport_pc lp parport pcspkr iTCO_wdt iTCO_vendor_se
[  255.327057] 
[  255.328538] Pid: 4986, comm: ovmtask Not tainted (2.6.24.3xenomaip3 #2)
[  255.335123] EIP: 0060:[c011a966] EFLAGS: 00010286 CPU: 0
[  255.340588] EIP is at _kmap_atomic_prot+0xa6/0x120
[  255.345356] EAX: 0027 EBX: c2b27520 ECX:  

Re: [Xenomai-core] Kernel crash with Xenomai (caused by fork?)

2008-03-31 Thread Gilles Chanteperdrix
Tomas Kalibera wrote:
  
  I added a missing underscore and re-tried, and none of the debug 
  messages was printed. I added another one to make sure that there is not 
  a problem with getting printk messages to the serial console. The 
  resulting highmem_32.c and the output is attached.
  
  T

The interesting part of the output is the printk which occurs right
before the first bug, what happens afterwards is of little use. Do you
get any output before the first bug ?

-- 


Gilles.

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core