Re: v2.6.23-rc1-rt6-dw1
On Sun, 2007-07-29 at 22:20 -0700, Arjan van de Ven wrote: > > > > +/* > > + * trigger a reschedule on all other CPUs: > > + */ > > +extern void smp_send_reschedule_allbutself(void); > > + > > +/* > > + * trigger a reschedule on all other CPUs: > > + */ > > +extern void smp_send_reschedule_allbutself(void); > > + > > just to make sure the C compiler doesn't forget it ? > or maybe the parser is suffering from memory lapse ? ;) In terms of this whole patch, I should have been a little clearer about it's meaning .. It represents code getting moved inside the -rt series.. For example the code above was originally in the, preempt-realtime-core.patch and I moved it to the preempt-realtime-i386.patch. So I'm not actually adding any code.. I wasn't trying to put Ingo's code up for evaluation either, since it's all still in development .. Daniel - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: v2.6.23-rc1-rt6-dw1
On Sun, 2007-07-29 at 22:20 -0700, Arjan van de Ven wrote: +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + just to make sure the C compiler doesn't forget it ? or maybe the parser is suffering from memory lapse ? ;) In terms of this whole patch, I should have been a little clearer about it's meaning .. It represents code getting moved inside the -rt series.. For example the code above was originally in the, preempt-realtime-core.patch and I moved it to the preempt-realtime-i386.patch. So I'm not actually adding any code.. I wasn't trying to put Ingo's code up for evaluation either, since it's all still in development .. Daniel - To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: v2.6.23-rc1-rt6-dw1
> > +/* > + * trigger a reschedule on all other CPUs: > + */ > +extern void smp_send_reschedule_allbutself(void); > + > +/* > + * trigger a reschedule on all other CPUs: > + */ > +extern void smp_send_reschedule_allbutself(void); > + just to make sure the C compiler doesn't forget it ? or maybe the parser is suffering from memory lapse ? ;) > > -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP > +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) why this change? the change is for spinlock debugging.. why add preempt to this? >void __might_sleep(char *file, int line); > # define might_sleep() \ > do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) > @@ -198,6 +198,7 @@ extern void add_taint(unsigned); > /* Values used for system_state */ > extern enum system_states { > SYSTEM_BOOTING, > + SYSTEM_BOOTING_SCHEDULER_OK, what is this used for? your patch doesn't add users of this... > SYSTEM_RUNNING, > SYSTEM_HALT, > SYSTEM_POWER_OFF, > --- linux-2.6.22.orig/init/main.c > +++ linux-2.6.22/init/main.c > @@ -438,6 +438,8 @@ static void noinline __init_refok rest_i > { > int pid; > > + system_state = SYSTEM_BOOTING_SCHEDULER_OK; > + > kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); > numa_default_policy(); > pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); > --- linux-2.6.22.orig/lib/kernel_lock.c > +++ linux-2.6.22/lib/kernel_lock.c > @@ -35,22 +35,25 @@ DECLARE_MUTEX(kernel_sem); > * about recursion, both due to the down() and due to the enabling of > * preemption. schedule() will re-check the preemption flag after > * reacquiring the semaphore. > + * > + * Called with interrupts disabled. > */ > int __lockfunc __reacquire_kernel_lock(void) > { > struct task_struct *task = current; > int saved_lock_depth = task->lock_depth; > > + local_irq_enable(); eh... if interrupts are off here that's a bad bug. Why work around it? > BUG_ON(saved_lock_depth < 0); > > task->lock_depth = -1; > - __preempt_enable_no_resched(); > > down(_sem); > > - preempt_disable(); > task->lock_depth = saved_lock_depth; > > + local_irq_disable(); > + same here.. > int __lockfunc __reacquire_kernel_lock(void) > { > - while (!_raw_spin_trylock(_flag)) { > - if (test_thread_flag(TIF_NEED_RESCHED)) > - return -EAGAIN; > - cpu_relax(); > - } > + local_irq_enable(); > + _raw_spin_lock(_flag); > + local_irq_disable(); are you sure you want this semantics change? - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: v2.6.23-rc1-rt6-dw1
> > +/* > + * trigger a reschedule on all other CPUs: > + */ > +extern void smp_send_reschedule_allbutself(void); > + > +/* > + * trigger a reschedule on all other CPUs: > + */ > +extern void smp_send_reschedule_allbutself(void); > + > > /* > * Prepare machine for booting other CPUs. > @@ -97,6 +107,7 @@ static inline int up_smp_call_function(v > 0; \ > }) > static inline void smp_send_reschedule(int cpu) { } > +static inline void smp_send_reschedule_allbutself(void) { } > #define num_booting_cpus() 1 > #define smp_prepare_boot_cpu() do {} while (0) > #define smp_call_function_single(cpuid, func, info, retry, wait) \ > :: end preempt-realtime-i386.patch :: > :: start preempt-realtime-sched.patch :: > --- linux-2.6.22.orig/include/linux/kernel.h > +++ linux-2.6.22/include/linux/kernel.h > @@ -88,7 +88,7 @@ extern int cond_resched(void); > # define might_resched() do { } while (0) > #endif > > -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP > +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) >void __might_sleep(char *file, int line); > # define might_sleep() \ > do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) > @@ -198,6 +198,7 @@ extern void add_taint(unsigned); > /* Values used for system_state */ > extern enum system_states { > SYSTEM_BOOTING, > + SYSTEM_BOOTING_SCHEDULER_OK, > SYSTEM_RUNNING, > SYSTEM_HALT, > SYSTEM_POWER_OFF, > --- linux-2.6.22.orig/init/main.c > +++ linux-2.6.22/init/main.c > @@ -438,6 +438,8 @@ static void noinline __init_refok rest_i > { > int pid; > > + system_state = SYSTEM_BOOTING_SCHEDULER_OK; > + > kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); > numa_default_policy(); > pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); > --- linux-2.6.22.orig/lib/kernel_lock.c > +++ linux-2.6.22/lib/kernel_lock.c > @@ -35,22 +35,25 @@ DECLARE_MUTEX(kernel_sem); > * about recursion, both due to the down() and due to the enabling of > * preemption. schedule() will re-check the preemption flag after > * reacquiring the semaphore. > + * > + * Called with interrupts disabled. > */ > int __lockfunc __reacquire_kernel_lock(void) > { > struct task_struct *task = current; > int saved_lock_depth = task->lock_depth; > > + local_irq_enable(); > BUG_ON(saved_lock_depth < 0); > > task->lock_depth = -1; > - __preempt_enable_no_resched(); > > down(_sem); > > - preempt_disable(); > task->lock_depth = saved_lock_depth; > > + local_irq_disable(); > + > return 0; > } > > @@ -67,11 +70,15 @@ void __lockfunc lock_kernel(void) > struct task_struct *task = current; > int depth = task->lock_depth + 1; > > - if (likely(!depth)) > + if (likely(!depth)) { > /* >* No recursion worries - we set up lock_depth _after_ >*/ > down(_sem); > +#ifdef CONFIG_DEBUG_RT_MUTEXES > + current->last_kernel_lock = __builtin_return_address(0); > +#endif > + } > > task->lock_depth = depth; > } > @@ -82,8 +89,12 @@ void __lockfunc unlock_kernel(void) > > BUG_ON(task->lock_depth < 0); > > - if (likely(--task->lock_depth < 0)) > + if (likely(--task->lock_depth == -1)) { > +#ifdef CONFIG_DEBUG_RT_MUTEXES > + current->last_kernel_lock = NULL; > +#endif > up(_sem); > + } > } > > #else > @@ -116,11 +127,9 @@ static __cacheline_aligned_in_smp DEFIN > */ > int __lockfunc __reacquire_kernel_lock(void) > { > - while (!_raw_spin_trylock(_flag)) { > - if (test_thread_flag(TIF_NEED_RESCHED)) > - return -EAGAIN; > - cpu_relax(); > - } > + local_irq_enable(); > + _raw_spin_lock(_flag); > + local_irq_disable(); > preempt_disable(); > return 0; > } > :: end preempt-realtime-sched.patch :: > :: start preempt-realtime-core.patch :: > diff -u linux-rt-rebase.q/include/linux/kernel.h > linux-2.6.22/include/linux/kernel.h > --- linux-rt-rebase.q/include/linux/kernel.h > +++ linux-2.6.22/include/linux/kernel.h 2007-07-27 01:32:15.0 > + > @@ -88,7 +88,7 @@ > # define might_resched() do { } while (0) > #endif > > -#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) > +#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP >void __might_sleep(char *file, int line); > # define might_sleep() \ > do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) > @@ -210,7 +210,6 @@ > /* Values used for system_state */ > extern enum system_states { > SYSTEM_BOOTING, > - SYSTEM_BOOTING_SCHEDULER_OK, > SYSTEM_RUNNING, > SYSTEM_HALT, > SYSTEM_POWER_OFF, > reverted: > --- linux-rt-rebase.q/include/linux/smp.h > +++ linux-rt-rebase.q.orig/include/linux/smp.h >
v2.6.23-rc1-rt6-dw1
I've released another version of the real-time tree. It's just a slightly more bisectable tree, and it includes the fixes I've just released. You can download a combined patch from, ftp://source.mvista.com/pub/dwalker/rt/patch-2.6.23-rc1-rt6-dw1 Or a broken out set from, ftp://source.mvista.com/pub/dwalker/rt/patch-2.6.23-rc1-rt6-dw1.tar.gz Below I've included a per patch delta (as good as interdiff can make). In the hopes that it will help integration. The changes are additions to preempt-realtime-sched.patch which are extracted from patches later in the series. This allow preempt-realtime-sched.patch to compile standalone .. There was also a stray hunk in the version.patch which I moved to another location. :: start series :: --- tmp/patches/series 2007-07-26 14:47:21.0 -0700 +++ patches/series 2007-07-29 19:48:07.0 -0700 @@ -670,3 +670,12 @@ schedule_on_each_cpu-enhance.patch schedule_on_each_cpu-enhance-rt.patch version.patch +add-warn-on-rcu-read-unlock-imbalance.patch +preempt-hardirqs-selects-preempt-softirqs.patch +initialize-last_tick-in-calc_load.patch +softirq-raise-wakeup-fix.patch +preempt-realtime-net-mismerge.patch +locking-cleanup.patch +pickop-rt_lock_h.patch +pickop-spinlock-rwlocks.patch +pickop-seqlocks.patch :: end series :: :: start preempt-realtime-i386.patch :: --- linux-2.6.22.orig/include/linux/smp.h 2007-07-27 00:20:49.0 + +++ linux-2.6.22/include/linux/smp.h2007-07-27 00:20:50.0 + @@ -33,6 +33,16 @@ extern void smp_send_stop(void); */ extern void smp_send_reschedule(int cpu); +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + /* * Prepare machine for booting other CPUs. @@ -97,6 +107,7 @@ static inline int up_smp_call_function(v 0; \ }) static inline void smp_send_reschedule(int cpu) { } +static inline void smp_send_reschedule_allbutself(void) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_single(cpuid, func, info, retry, wait) \ :: end preempt-realtime-i386.patch :: :: start preempt-realtime-sched.patch :: --- linux-2.6.22.orig/include/linux/kernel.h +++ linux-2.6.22/include/linux/kernel.h @@ -88,7 +88,7 @@ extern int cond_resched(void); # define might_resched() do { } while (0) #endif -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) void __might_sleep(char *file, int line); # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) @@ -198,6 +198,7 @@ extern void add_taint(unsigned); /* Values used for system_state */ extern enum system_states { SYSTEM_BOOTING, + SYSTEM_BOOTING_SCHEDULER_OK, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, --- linux-2.6.22.orig/init/main.c +++ linux-2.6.22/init/main.c @@ -438,6 +438,8 @@ static void noinline __init_refok rest_i { int pid; + system_state = SYSTEM_BOOTING_SCHEDULER_OK; + kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); --- linux-2.6.22.orig/lib/kernel_lock.c +++ linux-2.6.22/lib/kernel_lock.c @@ -35,22 +35,25 @@ DECLARE_MUTEX(kernel_sem); * about recursion, both due to the down() and due to the enabling of * preemption. schedule() will re-check the preemption flag after * reacquiring the semaphore. + * + * Called with interrupts disabled. */ int __lockfunc __reacquire_kernel_lock(void) { struct task_struct *task = current; int saved_lock_depth = task->lock_depth; + local_irq_enable(); BUG_ON(saved_lock_depth < 0); task->lock_depth = -1; - __preempt_enable_no_resched(); down(_sem); - preempt_disable(); task->lock_depth = saved_lock_depth; + local_irq_disable(); + return 0; } @@ -67,11 +70,15 @@ void __lockfunc lock_kernel(void) struct task_struct *task = current; int depth = task->lock_depth + 1; - if (likely(!depth)) + if (likely(!depth)) { /* * No recursion worries - we set up lock_depth _after_ */ down(_sem); +#ifdef CONFIG_DEBUG_RT_MUTEXES + current->last_kernel_lock = __builtin_return_address(0); +#endif + } task->lock_depth = depth; } @@ -82,8 +89,12 @@ void __lockfunc unlock_kernel(void) BUG_ON(task->lock_depth < 0); - if (likely(--task->lock_depth < 0)) + if (likely(--task->lock_depth == -1)) { +#ifdef CONFIG_DEBUG_RT_MUTEXES + current->last_kernel_lock = NULL; +#endif
v2.6.23-rc1-rt6-dw1
I've released another version of the real-time tree. It's just a slightly more bisectable tree, and it includes the fixes I've just released. You can download a combined patch from, ftp://source.mvista.com/pub/dwalker/rt/patch-2.6.23-rc1-rt6-dw1 Or a broken out set from, ftp://source.mvista.com/pub/dwalker/rt/patch-2.6.23-rc1-rt6-dw1.tar.gz Below I've included a per patch delta (as good as interdiff can make). In the hopes that it will help integration. The changes are additions to preempt-realtime-sched.patch which are extracted from patches later in the series. This allow preempt-realtime-sched.patch to compile standalone .. There was also a stray hunk in the version.patch which I moved to another location. :: start series :: --- tmp/patches/series 2007-07-26 14:47:21.0 -0700 +++ patches/series 2007-07-29 19:48:07.0 -0700 @@ -670,3 +670,12 @@ schedule_on_each_cpu-enhance.patch schedule_on_each_cpu-enhance-rt.patch version.patch +add-warn-on-rcu-read-unlock-imbalance.patch +preempt-hardirqs-selects-preempt-softirqs.patch +initialize-last_tick-in-calc_load.patch +softirq-raise-wakeup-fix.patch +preempt-realtime-net-mismerge.patch +locking-cleanup.patch +pickop-rt_lock_h.patch +pickop-spinlock-rwlocks.patch +pickop-seqlocks.patch :: end series :: :: start preempt-realtime-i386.patch :: --- linux-2.6.22.orig/include/linux/smp.h 2007-07-27 00:20:49.0 + +++ linux-2.6.22/include/linux/smp.h2007-07-27 00:20:50.0 + @@ -33,6 +33,16 @@ extern void smp_send_stop(void); */ extern void smp_send_reschedule(int cpu); +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + /* * Prepare machine for booting other CPUs. @@ -97,6 +107,7 @@ static inline int up_smp_call_function(v 0; \ }) static inline void smp_send_reschedule(int cpu) { } +static inline void smp_send_reschedule_allbutself(void) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_single(cpuid, func, info, retry, wait) \ :: end preempt-realtime-i386.patch :: :: start preempt-realtime-sched.patch :: --- linux-2.6.22.orig/include/linux/kernel.h +++ linux-2.6.22/include/linux/kernel.h @@ -88,7 +88,7 @@ extern int cond_resched(void); # define might_resched() do { } while (0) #endif -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) void __might_sleep(char *file, int line); # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) @@ -198,6 +198,7 @@ extern void add_taint(unsigned); /* Values used for system_state */ extern enum system_states { SYSTEM_BOOTING, + SYSTEM_BOOTING_SCHEDULER_OK, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, --- linux-2.6.22.orig/init/main.c +++ linux-2.6.22/init/main.c @@ -438,6 +438,8 @@ static void noinline __init_refok rest_i { int pid; + system_state = SYSTEM_BOOTING_SCHEDULER_OK; + kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); --- linux-2.6.22.orig/lib/kernel_lock.c +++ linux-2.6.22/lib/kernel_lock.c @@ -35,22 +35,25 @@ DECLARE_MUTEX(kernel_sem); * about recursion, both due to the down() and due to the enabling of * preemption. schedule() will re-check the preemption flag after * reacquiring the semaphore. + * + * Called with interrupts disabled. */ int __lockfunc __reacquire_kernel_lock(void) { struct task_struct *task = current; int saved_lock_depth = task-lock_depth; + local_irq_enable(); BUG_ON(saved_lock_depth 0); task-lock_depth = -1; - __preempt_enable_no_resched(); down(kernel_sem); - preempt_disable(); task-lock_depth = saved_lock_depth; + local_irq_disable(); + return 0; } @@ -67,11 +70,15 @@ void __lockfunc lock_kernel(void) struct task_struct *task = current; int depth = task-lock_depth + 1; - if (likely(!depth)) + if (likely(!depth)) { /* * No recursion worries - we set up lock_depth _after_ */ down(kernel_sem); +#ifdef CONFIG_DEBUG_RT_MUTEXES + current-last_kernel_lock = __builtin_return_address(0); +#endif + } task-lock_depth = depth; } @@ -82,8 +89,12 @@ void __lockfunc unlock_kernel(void) BUG_ON(task-lock_depth 0); - if (likely(--task-lock_depth 0)) + if (likely(--task-lock_depth == -1)) { +#ifdef CONFIG_DEBUG_RT_MUTEXES + current-last_kernel_lock = NULL; +#endif
Re: v2.6.23-rc1-rt6-dw1
+/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + /* * Prepare machine for booting other CPUs. @@ -97,6 +107,7 @@ static inline int up_smp_call_function(v 0; \ }) static inline void smp_send_reschedule(int cpu) { } +static inline void smp_send_reschedule_allbutself(void) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_single(cpuid, func, info, retry, wait) \ :: end preempt-realtime-i386.patch :: :: start preempt-realtime-sched.patch :: --- linux-2.6.22.orig/include/linux/kernel.h +++ linux-2.6.22/include/linux/kernel.h @@ -88,7 +88,7 @@ extern int cond_resched(void); # define might_resched() do { } while (0) #endif -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) void __might_sleep(char *file, int line); # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) @@ -198,6 +198,7 @@ extern void add_taint(unsigned); /* Values used for system_state */ extern enum system_states { SYSTEM_BOOTING, + SYSTEM_BOOTING_SCHEDULER_OK, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, --- linux-2.6.22.orig/init/main.c +++ linux-2.6.22/init/main.c @@ -438,6 +438,8 @@ static void noinline __init_refok rest_i { int pid; + system_state = SYSTEM_BOOTING_SCHEDULER_OK; + kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); --- linux-2.6.22.orig/lib/kernel_lock.c +++ linux-2.6.22/lib/kernel_lock.c @@ -35,22 +35,25 @@ DECLARE_MUTEX(kernel_sem); * about recursion, both due to the down() and due to the enabling of * preemption. schedule() will re-check the preemption flag after * reacquiring the semaphore. + * + * Called with interrupts disabled. */ int __lockfunc __reacquire_kernel_lock(void) { struct task_struct *task = current; int saved_lock_depth = task-lock_depth; + local_irq_enable(); BUG_ON(saved_lock_depth 0); task-lock_depth = -1; - __preempt_enable_no_resched(); down(kernel_sem); - preempt_disable(); task-lock_depth = saved_lock_depth; + local_irq_disable(); + return 0; } @@ -67,11 +70,15 @@ void __lockfunc lock_kernel(void) struct task_struct *task = current; int depth = task-lock_depth + 1; - if (likely(!depth)) + if (likely(!depth)) { /* * No recursion worries - we set up lock_depth _after_ */ down(kernel_sem); +#ifdef CONFIG_DEBUG_RT_MUTEXES + current-last_kernel_lock = __builtin_return_address(0); +#endif + } task-lock_depth = depth; } @@ -82,8 +89,12 @@ void __lockfunc unlock_kernel(void) BUG_ON(task-lock_depth 0); - if (likely(--task-lock_depth 0)) + if (likely(--task-lock_depth == -1)) { +#ifdef CONFIG_DEBUG_RT_MUTEXES + current-last_kernel_lock = NULL; +#endif up(kernel_sem); + } } #else @@ -116,11 +127,9 @@ static __cacheline_aligned_in_smp DEFIN */ int __lockfunc __reacquire_kernel_lock(void) { - while (!_raw_spin_trylock(kernel_flag)) { - if (test_thread_flag(TIF_NEED_RESCHED)) - return -EAGAIN; - cpu_relax(); - } + local_irq_enable(); + _raw_spin_lock(kernel_flag); + local_irq_disable(); preempt_disable(); return 0; } :: end preempt-realtime-sched.patch :: :: start preempt-realtime-core.patch :: diff -u linux-rt-rebase.q/include/linux/kernel.h linux-2.6.22/include/linux/kernel.h --- linux-rt-rebase.q/include/linux/kernel.h +++ linux-2.6.22/include/linux/kernel.h 2007-07-27 01:32:15.0 + @@ -88,7 +88,7 @@ # define might_resched() do { } while (0) #endif -#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) +#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line); # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) @@ -210,7 +210,6 @@ /* Values used for system_state */ extern enum system_states { SYSTEM_BOOTING, - SYSTEM_BOOTING_SCHEDULER_OK, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, reverted: --- linux-rt-rebase.q/include/linux/smp.h +++ linux-rt-rebase.q.orig/include/linux/smp.h @@ -33,16 +33,6 @@ */ extern void smp_send_reschedule(int cpu); -/* - * trigger a reschedule on all other CPUs: - */ -extern void
Re: v2.6.23-rc1-rt6-dw1
+/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + just to make sure the C compiler doesn't forget it ? or maybe the parser is suffering from memory lapse ? ;) -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) why this change? the change is for spinlock debugging.. why add preempt to this? void __might_sleep(char *file, int line); # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) @@ -198,6 +198,7 @@ extern void add_taint(unsigned); /* Values used for system_state */ extern enum system_states { SYSTEM_BOOTING, + SYSTEM_BOOTING_SCHEDULER_OK, what is this used for? your patch doesn't add users of this... SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, --- linux-2.6.22.orig/init/main.c +++ linux-2.6.22/init/main.c @@ -438,6 +438,8 @@ static void noinline __init_refok rest_i { int pid; + system_state = SYSTEM_BOOTING_SCHEDULER_OK; + kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); --- linux-2.6.22.orig/lib/kernel_lock.c +++ linux-2.6.22/lib/kernel_lock.c @@ -35,22 +35,25 @@ DECLARE_MUTEX(kernel_sem); * about recursion, both due to the down() and due to the enabling of * preemption. schedule() will re-check the preemption flag after * reacquiring the semaphore. + * + * Called with interrupts disabled. */ int __lockfunc __reacquire_kernel_lock(void) { struct task_struct *task = current; int saved_lock_depth = task-lock_depth; + local_irq_enable(); eh... if interrupts are off here that's a bad bug. Why work around it? BUG_ON(saved_lock_depth 0); task-lock_depth = -1; - __preempt_enable_no_resched(); down(kernel_sem); - preempt_disable(); task-lock_depth = saved_lock_depth; + local_irq_disable(); + same here.. int __lockfunc __reacquire_kernel_lock(void) { - while (!_raw_spin_trylock(kernel_flag)) { - if (test_thread_flag(TIF_NEED_RESCHED)) - return -EAGAIN; - cpu_relax(); - } + local_irq_enable(); + _raw_spin_lock(kernel_flag); + local_irq_disable(); are you sure you want this semantics change? - To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/