On Thu, 13 Jul 2000, Stefano Curtarolo wrote:

> Ok, I am waiting  !!

Ok...here it is.  Again, the i386-centric folk decided not to change
things for all archs...what else is new, right? :-)

At any rate, this should be it.  I haven't compiled it yet, but it should
be ok now.  I have NO idea if this will actually work, but if it does on
i386, then it should here too :-)  This is the complete diff, btw, so it
supercedes the other one that I sent.

C

diff -ruN linux-2.4.0-test3/arch/alpha/kernel/time.c 
linux-patched/arch/alpha/kernel/time.c
--- linux-2.4.0-test3/arch/alpha/kernel/time.c  Mon Apr 24 16:39:34 2000
+++ linux-patched/arch/alpha/kernel/time.c      Thu Jul 13 11:00:46 2000
@@ -45,7 +45,7 @@
 #include "irq_impl.h"
 
 extern rwlock_t xtime_lock;
-extern volatile unsigned long lost_ticks;      /* kernel/sched.c */
+extern unsigned long wall_jiffies;
 
 static int set_rtc_mmss(unsigned long);
 
@@ -312,7 +312,7 @@
        sec = xtime.tv_sec;
        usec = xtime.tv_usec;
        partial_tick = state.partial_tick;
-       lost = lost_ticks;
+       lost = jiffies - wall_jiffies;
 
        read_unlock_irqrestore(&xtime_lock, flags);
 
@@ -363,12 +363,12 @@
           time.  Without this, a full-tick error is possible.  */
 
 #ifdef CONFIG_SMP
-       delta_usec = lost_ticks * (1000000 / HZ);
+       delta_usec = (jiffies - wall_jiffies) * (1000000 / HZ);
 #else
        delta_usec = rpcc() - state.last_time;
        delta_usec = (delta_usec * state.scaled_ticks_per_cycle 
                      + state.partial_tick
-                     + (lost_ticks << FIX_SHIFT)) * 15625;
+                     + ((jiffies - wall_jiffies) << FIX_SHIFT)) * 15625;
        delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
 #endif
 
diff -ruN linux-2.4.0-test3/drivers/block/md.c linux-patched/drivers/block/md.c
--- linux-2.4.0-test3/drivers/block/md.c        Fri Jul  7 18:53:58 2000
+++ linux-patched/drivers/block/md.c    Thu Jul 13 10:26:50 2000
@@ -2908,7 +2908,7 @@
         * many dirty RAID5 blocks.
         */
        current->policy = SCHED_OTHER;
-       current->priority = 40;
+       current->nice = 40;
 //     md_unlock_kernel();
 
        up(thread->sem);
@@ -3336,7 +3336,7 @@
        /*
         * Resync has low priority.
         */
-       current->priority = 1;
+       current->nice = 1;
 
        is_mddev_idle(mddev); /* this also initializes IO event counters */
        for (m = 0; m < SYNC_MARKS; m++) {
@@ -3412,7 +3412,7 @@
                currspeed = 
(j-mddev->resync_mark_cnt)/((jiffies-mddev->resync_mark)/HZ +1) +1;
 
                if (currspeed > sysctl_speed_limit_min) {
-                       current->priority = 1;
+                       current->nice = 1;
 
                        if ((currspeed > sysctl_speed_limit_max) ||
                                        !is_mddev_idle(mddev)) {
@@ -3422,7 +3422,7 @@
                                        goto repeat;
                        }
                } else
-                       current->priority = 40;
+                       current->nice = 40;
        }
        fsync_dev(read_disk);
        printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev));
diff -ruN linux-2.4.0-test3/kernel/timer.c linux-patched/kernel/timer.c
--- linux-2.4.0-test3/kernel/timer.c    Mon Jul 10 17:35:19 2000
+++ linux-patched/kernel/timer.c        Thu Jul 13 10:26:50 2000
@@ -577,7 +577,7 @@
                        p->counter = 0;
                        p->need_resched = 1;
                }
-               if (p->priority < DEF_PRIORITY)
+               if (p->nice < DEF_COUNTER)
                        kstat.cpu_nice += user_tick;
                else
                        kstat.cpu_user += user_tick;

Reply via email to