BTW, this is the first time I have worked with porting forward a kernel patch, so be gentle.
CPUutil patch against the june 9th cpuutil release
--- util/Makefile 2004-06-09 08:12:16.000000000 -0500 +++ zutil/Makefile 2005-01-15 17:45:06.000000000 -0600 @@ -4,10 +4,6 @@ FLAGS=-Wall -O3
2.6:
- sed -e 's/NRSETCPUCAP/274/g' <cap.in > cap.c
- make cap
-
-2.6.6:
sed -e 's/NRSETCPUCAP/283/g' <cap.in >cap.c
make capcpucap patch against the June 9th cpucap release for linux 2.6.9
--- linux-2.6.9/arch/i386/kernel/entry.S 2004-11-16 00:50:04.000000000
-0600
+++ linux-2.6.9x/arch/i386/kernel/entry.S 2005-01-15 17:16:22.000000000
-0600
@@ -902,6 +902,7 @@ ENTRY(sys_call_table)
.long sys_mq_timedreceive /* 280 */
.long sys_mq_notify
.long sys_mq_getsetattr
+ .long sys_cpucap /* cpucap */
.long sys_ni_syscall /* reserved for kexec */
.long sys_waitid--- linux-2.6.9/include/asm-i386/unistd.h 2004-10-18 16:54:37.000000000 -0500 +++ linux-2.6.9x/include/asm-i386/unistd.h 2005-01-15 17:17:44.000000000 -0600 @@ -288,10 +288,11 @@ #define __NR_mq_timedreceive (__NR_mq_open+3) #define __NR_mq_notify (__NR_mq_open+4) #define __NR_mq_getsetattr (__NR_mq_open+5) -#define __NR_sys_kexec_load 283 -#define __NR_waitid 284 +#define __NR_cpucap 283 +#define __NR_sys_kexec_load 284 +#define __NR_waitid 285
-#define NR_syscalls 285 +#define NR_syscalls 286
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
--- linux-2.6.9/include/linux/init_task.h 2004-10-18 16:53:13.000000000
-0500
+++ linux-2.6.9x/include/linux/init_task.h 2005-01-15 17:18:47.000000000
-0600
@@ -112,6 +112,9 @@ extern struct group_info init_groups;
.proc_lock = SPIN_LOCK_UNLOCKED, \
.switch_lock = SPIN_LOCK_UNLOCKED, \
.journal_info = NULL, \
+ .cap_timer = 0, \
+ .cap_left = -1, \
+ .cap_limit = HZ \
}
--- linux-2.6.9/include/linux/sched.h 2004-10-18 16:53:13.000000000 -0500 +++ linux-2.6.9x/include/linux/sched.h 2005-01-15 17:20:15.000000000 -0600 @@ -562,6 +562,11 @@ struct task_struct {
/* journalling filesystem info */
void *journal_info;
+/* cpucap */
+ unsigned long cap_timer;
+ int cap_left;
+ int cap_limit;
+ /* VM state */
struct reclaim_state *reclaim_state;
@@ -621,6 +626,8 @@ do { if (atomic_dec_and_test(&(tsk)->usa
#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
+#define PF_CPUCAP 0x80000000 /* cpucap */
+#ifdef CONFIG_SMP
extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
--- linux-2.6.9/kernel/sched.c 2004-10-18 16:54:55.000000000 -0500
+++ linux-2.6.9x/kernel/sched.c 2005-01-15 17:26:32.000000000 -0600
@@ -1310,6 +1310,7 @@ void fastcall sched_fork(task_t *p)
*/
local_irq_disable();
p->time_slice = (current->time_slice + 1) >> 1;
+ p->cap_left = (current->cap_left + 1) >> 1;
/*
* The remainder of the first timeslice might be recovered by
* the parent if the child exits early enough.
@@ -1317,7 +1318,9 @@ void fastcall sched_fork(task_t *p)
p->first_time_slice = 1;
current->time_slice >>= 1;
p->timestamp = sched_clock();
- if (unlikely(!current->time_slice)) {
+ current->cap_left >>= 1;
+ if (!current->time_slice || + ((p->flags & PF_CPUCAP) && (!current->cap_left))) {
/*
* This case is rare, it happens when the parent has only
* a single jiffy left from its timeslice. Taking the
@@ -2447,16 +2450,27 @@ void scheduler_tick(int user_ticks, int
}
goto out_unlock;
}
+ --p->cap_left;
if (!--p->time_slice) {
dequeue_task(p, rq->active);
set_tsk_need_resched(p);
p->prio = effective_prio(p);
p->time_slice = task_timeslice(p);
p->first_time_slice = 0;
+ if (p->flags & PF_CPUCAP) {
+ if (p->cap_left > 0){
+ if (p->time_slice > p->cap_left)
+ p->time_slice = p->cap_left;
+ } else {
+ if(p->time_slice > p->cap_limit)
+ p->time_slice = p->cap_limit;
+ }
+ }
if (!rq->expired_timestamp)
rq->expired_timestamp = jiffies;
- if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
+ if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)
+ || ((p->flags & PF_CPUCAP) && (p->cap_left <= 0))) {
enqueue_task(p, rq->expired);
if (p->static_prio < rq->best_expired_prio)
rq->best_expired_prio = p->static_prio;
@@ -2633,7 +2647,7 @@ asmlinkage void __sched schedule(void)
struct list_head *queue;
unsigned long long now;
unsigned long run_time;
- int cpu, idx;
+ int cpu, idx, rq_switch=0; /*
* Test if we are atomic. Since do_exit() needs to call into
@@ -2692,7 +2706,7 @@ need_resched:
else
deactivate_task(prev, rq);
}
-
+ cpucap_sw_rq:
cpu = smp_processor_id();
if (unlikely(!rq->nr_running)) {
go_idle:
@@ -2735,9 +2749,10 @@ go_idle:
array = rq->active;
rq->expired_timestamp = 0;
rq->best_expired_prio = MAX_PRIO;
+ rq_switch=1;
} else
schedstat_inc(rq, sched_noswitch);
-
+ cpucap_sw_tsk:
idx = sched_find_first_bit(array->bitmap);
queue = array->queue + idx;
next = list_entry(queue->next, task_t, run_list);
@@ -2754,6 +2769,26 @@ go_idle:
enqueue_task(next, array);
}
next->activated = 0;
+ if (unlikely(next->flags & PF_CPUCAP)) {
+ if( next->cap_timer <= jiffies ) {
+ next->cap_timer=jiffies+HZ;
+ next->cap_left = next->cap_limit;
+ }
+ if(unlikely(!next->cap_left)) {
+ dequeue_task(next, array);
+ enqueue_task(next, rq->expired);
+ if(!unlikely(array->nr_active)){
+ if (likely(!rq_switch))
+ goto cpucap_sw_rq;
+ next = rq->idle;
+ rq->expired_timestamp=0;
+ goto switch_tasks;
+ }
+ goto cpucap_sw_tsk;
+ }
+ }
+
+
switch_tasks:
prefetch(next);
clear_tsk_need_resched(prev);
--- linux-2.6.9/kernel/sys.c 2004-10-18 16:53:13.000000000 -0500
+++ linux-2.6.9x/kernel/sys.c 2005-01-15 17:27:40.000000000 -0600
@@ -424,6 +424,49 @@ out_unlock: return retval;
}
+asmlinkage long sys_cpucap(int which, int who, int cap)
+{
+ struct task_struct *p;
+ int error;
+
+ if(which > 2 || which < 0)
+ return -EINVAL;
+ error = -ESRCH;
+ if (cap < 1)
+ cap = 1;
+ else if (cap > 100)
+ cap = 100;
+
+ cap = (cap * HZ) / 100;
+ if (cap < 1)
+ cap = 1;
+ else if (cap > HZ)
+ cap = HZ;
+
+ read_lock(&tasklist_lock);
+ for_each_process(p){
+ if (p->uid != current->euid &&
+ p->uid != current->uid && !capable(CAP_SYS_NICE)) {
+ error = -EPERM;
+ continue;
+ }
+ if (error == -ESRCH)
+ error = 0;
+ if (cap > p->cap_limit && !capable(CAP_SYS_NICE))
+ error = -EACCES;
+ else {
+ p->cap_limit = cap;
+ if (cap == HZ) {
+ p->flags &= ~PF_CPUCAP;
+ p->cap_timer = 0;
+ } else
+ p->flags |= PF_CPUCAP;
+ }
+ }
+ read_unlock(&tasklist_lock);
+ return error;
+}
+
/*
Jason The place where you made your stand never mattered. Only that you were there... And still on your feet.
------------------------------------------------------- The SF.Net email is sponsored by: Beat the post-holiday blues Get a FREE limited edition SourceForge.net t-shirt from ThinkGeek. It's fun and FREE -- well, almost....http://www.thinkgeek.com/sfshirt _______________________________________________ User-mode-linux-user mailing list [email protected] https://lists.sourceforge.net/lists/listinfo/user-mode-linux-user
