Witam

robię:

$./builder --with preemptrt kernel-desktop.spec

Dostaję:

[...]
+ echo Patch #0 (kernel-desktop-preempt-rt.patch):
Patch #0 (kernel-desktop-preempt-rt.patch):
+ patch -p1 -s
+ < /home/users/tomlee/rpm/SOURCES/kernel-desktop-preempt-rt.patch
1 out of 21 hunks FAILED -- saving rejects to file kernel/softirq.c.rej
błąd: Błędny status wyjścia z /var/tmp/rpm-tmp.84493 (%prep)

W załączniku przesyłam softirq.c.rej

-- 
Pozdrawiam
Tomasz Woźniak

Amatorzy zbudowali Arkę Noego- profesjonaliści Tytanika.
***************
*** 565,610 ****
  }
  #endif /* CONFIG_HOTPLUG_CPU */
  
- static int __cpuinit cpu_callback(struct notifier_block *nfb,
                                  unsigned long action,
                                  void *hcpu)
  {
-       int hotcpu = (unsigned long)hcpu;
        struct task_struct *p;
  
        switch (action) {
        case CPU_UP_PREPARE:
-               BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
-               BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
-               p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
-               if (IS_ERR(p)) {
-                       printk("ksoftirqd for %i failed\n", hotcpu);
-                       return NOTIFY_BAD;
                }
-               kthread_bind(p, hotcpu);
-               per_cpu(ksoftirqd, hotcpu) = p;
-               break;
        case CPU_ONLINE:
-               wake_up_process(per_cpu(ksoftirqd, hotcpu));
                break;
  #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
-               if (!per_cpu(ksoftirqd, hotcpu))
-                       break;
                /* Unbind so it can run.  Fall thru. */
-               kthread_bind(per_cpu(ksoftirqd, hotcpu),
-                            any_online_cpu(cpu_online_map));
        case CPU_DEAD:
-               p = per_cpu(ksoftirqd, hotcpu);
-               per_cpu(ksoftirqd, hotcpu) = NULL;
-               kthread_stop(p);
                takeover_tasklets(hotcpu);
                break;
  #endif /* CONFIG_HOTPLUG_CPU */
-       }
        return NOTIFY_OK;
  }
  
  static struct notifier_block __cpuinitdata cpu_nfb = {
        .notifier_call = cpu_callback
  };
--- 820,893 ----
  }
  #endif /* CONFIG_HOTPLUG_CPU */
  
+ static const char *softirq_names [] =
+ {
+   [HI_SOFTIRQ]                = "high",
+   [TIMER_SOFTIRQ]     = "timer",
+   [NET_TX_SOFTIRQ]    = "net-tx",
+   [NET_RX_SOFTIRQ]    = "net-rx",
+   [BLOCK_SOFTIRQ]     = "block",
+   [TASKLET_SOFTIRQ]   = "tasklet",
+ #ifdef CONFIG_HIGH_RES_TIMERS
+   [HRTIMER_SOFTIRQ]   = "hrtimer",
+ #endif
+   [RCU_SOFTIRQ]               = "rcu",
+ };
+ 
+ static __cpuinit int cpu_callback(struct notifier_block *nfb,
                                  unsigned long action,
                                  void *hcpu)
  {
+       int hotcpu = (unsigned long)hcpu, i;
        struct task_struct *p;
  
        switch (action) {
        case CPU_UP_PREPARE:
+               /* We may have tasklets already scheduled on
+                  processor 0, so don't check there. */
+               if (hotcpu != 0) {
+                       BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
+                       BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
                }
+               for (i = 0; i < MAX_SOFTIRQ; i++) {
+                       per_cpu(ksoftirqd[i].nr, hotcpu) = i;
+                       per_cpu(ksoftirqd[i].cpu, hotcpu) = hotcpu;
+                       p = kthread_create(ksoftirqd, &per_cpu(ksoftirqd[i], 
hotcpu),
+                                          "softirq-%s/%d", softirq_names[i], 
hotcpu);
+                       if (IS_ERR(p)) {
+                               printk("ksoftirqd %d for %i failed\n", i, 
hotcpu);
+                               return NOTIFY_BAD;
+                       }
+                       kthread_bind(p, hotcpu);
+                       per_cpu(ksoftirqd[i].tsk, hotcpu) = p;
+               }
+               break;
        case CPU_ONLINE:
+               for (i = 0; i < MAX_SOFTIRQ; i++)
+                       wake_up_process(per_cpu(ksoftirqd[i].tsk, hotcpu));
                break;
+ 
  #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
                /* Unbind so it can run.  Fall thru. */
+               for (i = 0; i < MAX_SOFTIRQ; i++)
+                       if (!per_cpu(ksoftirqd[i].tsk, hotcpu))
+                               continue;
+                       kthread_bind(per_cpu(ksoftirqd[i], hotcpu).tsk, 
any_online_cpu(cpu_online_map));
        case CPU_DEAD:
+               for (i = 0; i < MAX_SOFTIRQ; i++) {
+                       p = per_cpu(ksoftirqd[i], hotcpu).tsk;
+                       per_cpu(ksoftirqd[i], hotcpu).tsk = NULL;
+                       kthread_stop(p);
+               }
                takeover_tasklets(hotcpu);
                break;
  #endif /* CONFIG_HOTPLUG_CPU */
+       }
        return NOTIFY_OK;
  }
  
+ 
  static struct notifier_block __cpuinitdata cpu_nfb = {
        .notifier_call = cpu_callback
  };


_______________________________________________
pld-users-pl mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-users-pl

Odpowiedź listem elektroniczym