Diff
Modified: trunk/arch/blackfin/Kconfig (6464 => 6465)
--- trunk/arch/blackfin/Kconfig 2009-05-24 21:31:33 UTC (rev 6464)
+++ trunk/arch/blackfin/Kconfig 2009-05-25 04:27:22 UTC (rev 6465)
@@ -241,12 +241,6 @@
depends on SMP
default y
-config TICK_SOURCE_SYSTMR0
- bool
- select BFIN_GPTIMERS
- depends on SMP
- default y
-
config BF_REV_MIN
int
default 0 if (BF51x || BF52x || (BF54x && !BF54xM))
Modified: trunk/arch/blackfin/include/asm/pda.h (6464 => 6465)
--- trunk/arch/blackfin/include/asm/pda.h 2009-05-24 21:31:33 UTC (rev 6464)
+++ trunk/arch/blackfin/include/asm/pda.h 2009-05-25 04:27:22 UTC (rev 6465)
@@ -64,8 +64,6 @@
extern struct blackfin_pda cpu_pda[];
-void reserve_pda(void);
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_BLACKFIN_PDA_H */
Modified: trunk/arch/blackfin/kernel/time.c (6464 => 6465)
--- trunk/arch/blackfin/kernel/time.c 2009-05-24 21:31:33 UTC (rev 6464)
+++ trunk/arch/blackfin/kernel/time.c 2009-05-25 04:27:22 UTC (rev 6465)
@@ -31,7 +31,7 @@
#endif
};
-#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+#if defined(CONFIG_TICKSOURCE_GPTMR0) || defined(CONFIG_IPIPE)
void __init setup_system_timer0(void)
{
/* Power down the core timer, just to play safe. */
@@ -74,7 +74,7 @@
static void __init
time_sched_init(irqreturn_t(*timer_routine) (int, void *))
{
-#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+#if defined(CONFIG_TICKSOURCE_GPTMR0) || defined(CONFIG_IPIPE)
setup_system_timer0();
bfin_timer_irq.handler = timer_routine;
setup_irq(IRQ_TIMER0, &bfin_timer_irq);
@@ -94,7 +94,7 @@
unsigned long offset;
unsigned long clocks_per_jiffy;
-#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+#if defined(CONFIG_TICKSOURCE_GPTMR0) || defined(CONFIG_IPIPE)
clocks_per_jiffy = bfin_read_TIMER0_PERIOD();
offset = bfin_read_TIMER0_COUNTER() / \
(((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
@@ -133,7 +133,7 @@
static long last_rtc_update;
write_seqlock(&xtime_lock);
-#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
+#if defined(CONFIG_TICKSOURCE_GPTMR0) && !defined(CONFIG_IPIPE)
/*
* TIMIL0 is latched in __ipipe_grab_irq() when the I-Pipe is
* enabled.
@@ -159,7 +159,7 @@
/* Do it again in 60s. */
last_rtc_update = xtime.tv_sec - 600;
}
-#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
+#if defined(CONFIG_TICKSOURCE_GPTMR0) && !defined(CONFIG_IPIPE)
set_gptimer_status(0, TIMER_STATUS_TIMIL0);
}
#endif
Modified: trunk/arch/blackfin/mach-bf561/smp.c (6464 => 6465)
--- trunk/arch/blackfin/mach-bf561/smp.c 2009-05-24 21:31:33 UTC (rev 6464)
+++ trunk/arch/blackfin/mach-bf561/smp.c 2009-05-25 04:27:22 UTC (rev 6465)
@@ -133,7 +133,7 @@
int ret;
ret = request_irq(IRQ_SUPPLE_0, handler, IRQF_DISABLED,
- "SMP interrupt", handler);
+ "Supplemental Interrupt0", handler);
if (ret)
panic("Cannot request supplemental interrupt 0 for IPI service");
}
Modified: trunk/arch/blackfin/mach-common/ints-priority.c (6464 => 6465)
--- trunk/arch/blackfin/mach-common/ints-priority.c 2009-05-24 21:31:33 UTC (rev 6464)
+++ trunk/arch/blackfin/mach-common/ints-priority.c 2009-05-25 04:27:22 UTC (rev 6465)
@@ -1052,7 +1052,7 @@
set_irq_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
-#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+#if defined(CONFIG_TICKSOURCE_GPTMR0) || defined(CONFIG_IPIPE)
case IRQ_TIMER0:
set_irq_handler(irq, handle_percpu_irq);
break;
Modified: trunk/arch/blackfin/mach-common/smp.c (6464 => 6465)
--- trunk/arch/blackfin/mach-common/smp.c 2009-05-24 21:31:33 UTC (rev 6464)
+++ trunk/arch/blackfin/mach-common/smp.c 2009-05-25 04:27:22 UTC (rev 6465)
@@ -369,9 +369,6 @@
bfin_write_ILAT(ilat);
CSYNC();
- /* Reserve the PDA space for the secondary CPU. */
- reserve_pda();
-
/* Enable interrupt levels IVG7-15. IARs have been already
* programmed by the boot CPU. */
bfin_irq_flags |= IMASK_IVG15 |
Modified: trunk/arch/blackfin/mm/blackfin_sram.h (6464 => 6465)
--- trunk/arch/blackfin/mm/blackfin_sram.h 2009-05-24 21:31:33 UTC (rev 6464)
+++ trunk/arch/blackfin/mm/blackfin_sram.h 2009-05-25 04:27:22 UTC (rev 6465)
@@ -30,7 +30,6 @@
#ifndef __BLACKFIN_SRAM_H__
#define __BLACKFIN_SRAM_H__
-extern void bfin_sram_init(void);
extern void *l1sram_alloc(size_t);
#endif
Modified: trunk/arch/blackfin/mm/init.c (6464 => 6465)
--- trunk/arch/blackfin/mm/init.c 2009-05-24 21:31:33 UTC (rev 6464)
+++ trunk/arch/blackfin/mm/init.c 2009-05-25 04:27:22 UTC (rev 6465)
@@ -108,12 +108,6 @@
#endif
}
-void __cpuinit reserve_pda(void)
-{
- printk(KERN_INFO "PDA for CPU%u reserved at %p\n", smp_processor_id(),
- &cpu_pda[smp_processor_id()]);
-}
-
void __init mem_init(void)
{
unsigned int codek = 0, datak = 0, initk = 0;
@@ -155,19 +149,6 @@
initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10)));
}
-static int __init sram_init(void)
-{
- /* Initialize the blackfin L1 Memory. */
- bfin_sram_init();
-
- /* Reserve the PDA space for the boot CPU right after we
- * initialized the scratch memory allocator.
- */
- reserve_pda();
- return 0;
-}
-pure_initcall(sram_init);
-
static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
unsigned long addr;
Modified: trunk/arch/blackfin/mm/sram-alloc.c (6464 => 6465)
--- trunk/arch/blackfin/mm/sram-alloc.c 2009-05-24 21:31:33 UTC (rev 6464)
+++ trunk/arch/blackfin/mm/sram-alloc.c 2009-05-25 04:27:22 UTC (rev 6465)
@@ -83,6 +83,14 @@
static void __init l1sram_init(void)
{
unsigned int cpu;
+ unsigned long reserve;
+
+#ifdef CONFIG_SMP
+ reserve = 0;
+#else
+ reserve = sizeof(struct l1_scratch_task_info);
+#endif
+
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
per_cpu(free_l1_ssram_head, cpu).next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
@@ -91,8 +99,8 @@
return;
}
- per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu);
- per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH;
+ per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
+ per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
@@ -223,7 +231,7 @@
spin_lock_init(&l2_sram_lock);
}
-void __init bfin_sram_init(void)
+static int __init bfin_sram_init(void)
{
sram_piece_cache = kmem_cache_create("sram_piece_cache",
sizeof(struct sram_piece),
@@ -233,7 +241,10 @@
l1_data_sram_init();
l1_inst_sram_init();
l2_sram_init();
+
+ return 0;
}
+pure_initcall(bfin_sram_init);
/* SRAM allocate function */
static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,