struct timer is only 48 bytes and repl_timer has a 1-to-1 correspondance with
struct rt_private, so having it referenced by pointer is wasteful.
This avoids one memory allocation in rt_init(), and the resulting diffstat is:
add/remove: 0/0 grow/shrink: 0/7 up/down: 0/-156 (-156)
function old new delta
rt_switch_sched 134 133 -1
rt_context_saved 278 271 -7
rt_vcpu_remove 253 245 -8
rt_vcpu_sleep234 218 -16
repl_timer_handler 761 744 -17
rt_deinit 44 20 -24
rt_init 219 136 -83
As an extra bit of cleanup noticed while making this change, there is no need
to call cpumask_clear() on an zeroed memory allocation.
Signed-off-by: Andrew Cooper
---
CC: Dario Faggioli
CC: Meng Xu
Also noticed by chance while inspecting the disassembly delta for "x86/bitops:
Introduce variable/constant pairs for __{set,clear,change}_bit()"
---
xen/common/sched_rt.c | 45 +
1 file changed, 17 insertions(+), 28 deletions(-)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index b770287..a202802 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -186,7 +186,7 @@ struct rt_private {
struct list_head runq; /* ordered list of runnable vcpus */
struct list_head depletedq; /* unordered list of depleted vcpus */
-struct timer *repl_timer; /* replenishment timer */
+struct timer repl_timer;/* replenishment timer */
struct list_head replq; /* ordered list of vcpus that need
replenishment */
cpumask_t tickled; /* cpus been tickled */
@@ -554,10 +554,10 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu
*svc)
if ( !list_empty(replq) )
{
struct rt_vcpu *svc_next = replq_elem(replq->next);
-set_timer(prv->repl_timer, svc_next->cur_deadline);
+set_timer(>repl_timer, svc_next->cur_deadline);
}
else
-stop_timer(prv->repl_timer);
+stop_timer(>repl_timer);
}
}
@@ -597,7 +597,7 @@ replq_insert(const struct scheduler *ops, struct rt_vcpu
*svc)
* at the front of the event list.
*/
if ( deadline_replq_insert(svc, >replq_elem, replq) )
-set_timer(prv->repl_timer, svc->cur_deadline);
+set_timer(>repl_timer, svc->cur_deadline);
}
/*
@@ -634,7 +634,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_vcpu
*svc)
rearm = deadline_replq_insert(svc, >replq_elem, replq);
if ( rearm )
-set_timer(rt_priv(ops)->repl_timer, rearm_svc->cur_deadline);
+set_timer(_priv(ops)->repl_timer, rearm_svc->cur_deadline);
}
/*
@@ -676,27 +676,18 @@ rt_init(struct scheduler *ops)
if ( prv == NULL )
goto err;
-prv->repl_timer = xzalloc(struct timer);
-if ( prv->repl_timer == NULL )
-goto err;
-
spin_lock_init(>lock);
INIT_LIST_HEAD(>sdom);
INIT_LIST_HEAD(>runq);
INIT_LIST_HEAD(>depletedq);
INIT_LIST_HEAD(>replq);
-cpumask_clear(>tickled);
-
ops->sched_data = prv;
rc = 0;
err:
-if ( rc && prv )
-{
-xfree(prv->repl_timer);
+if ( rc )
xfree(prv);
-}
return rc;
}
@@ -706,9 +697,8 @@ rt_deinit(struct scheduler *ops)
{
struct rt_private *prv = rt_priv(ops);
-ASSERT(prv->repl_timer->status == TIMER_STATUS_invalid ||
- prv->repl_timer->status == TIMER_STATUS_killed);
-xfree(prv->repl_timer);
+ASSERT(prv->repl_timer.status == TIMER_STATUS_invalid ||
+ prv->repl_timer.status == TIMER_STATUS_killed);
ops->sched_data = NULL;
xfree(prv);
@@ -731,9 +721,9 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int
cpu)
* TIMER_STATUS_invalid means we are the first cpu that sees the timer
* allocated but not initialized, and so it's up to us to initialize it.
*/
-if ( prv->repl_timer->status == TIMER_STATUS_invalid )
+if ( prv->repl_timer.status == TIMER_STATUS_invalid )
{
-init_timer(prv->repl_timer, repl_timer_handler, (void*) ops, cpu);
+init_timer(>repl_timer, repl_timer_handler, (void *)ops, cpu);
dprintk(XENLOG_DEBUG, "RTDS: timer initialized on cpu %u\n", cpu);
}
@@ -769,10 +759,10 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int
cpu,
* removed (in which case we'll see TIMER_STATUS_killed), it's our
* job to (re)initialize the timer.
*/
-if ( prv->repl_timer->status == TIMER_STATUS_invalid ||
- prv->repl_timer->status == TIMER_STATUS_killed )
+if ( prv->repl_timer.status ==