embeddedxen中xen的时间中断分析
需要注意的一点是,这里的timer_softirq和DOMU里面的并不一样。本文仅浅析xen里面的timer
执行顺序:
irq来了以后->time_irq_handler->调用其他函数->raise_softirq(TIMER_SOFTIRQ)->timer_softirq_action->(定时器timer->fn())->raise_softirq(SCHEDULE_SOFTIRQ)->schedule()
timer_init();------open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
|
init_xen_time()//这个函数里面有很多故事
|
init_platform_timer();
|
xen_pxa_timer_init();
|
setup_irq(IRQ_OST0, &pxa_ost0_irq);//IRQ_OST0 是26
也就是安装了IRQ,那么,看一下pxa_ost0_irq
static struct irqaction pxa_ost0_irq = {
.name = "ost0",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = pxa_ost0_interrupt,
.dev_id = &ckevt_pxa_osmr0,
};
看到它的handler是pxa_ost0_interrupt
static irqreturn_t
pxa_ost0_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
//struct clock_event_device *c = dev_id;
//static int cnt = 0;
/* Disarm the compare/match, signal the event. */
OIER &= ~OIER_E0;
OSSR = OSSR_M0;
timer_interrupt(irq, dev_id, regs);
return IRQ_HANDLED;
}
当发生时间中断的时候,会调用timer_interrupt();
void timer_interrupt(int irq, void *dev_id, struct xen_cpu_user_regs *regs) {
ASSERT(local_irq_is_enabled());
/* Update jiffies counter. */
(*(volatile unsigned long *) &jiffies_64)++;
raise_softirq(TIMER_SOFTIRQ);
if (--plt_overflow_jiffies == 0)
plt_overflow();
}
可以看到,每来一次中断,就raise_softirq();也就是响应 timer_softirq_action
我看来看一下timer_softirq_action都在做什么事情:
static void timer_softirq_action(void)
{
struct timer *t, **heap;
struct timers *ts;
s_time_t now;
void (*fn)(void *);
void *data;
ts = &this_cpu(timers);
spin_lock_irq(&ts->lock);
do {
heap = ts->heap;
now = NOW();
while ( (GET_HEAP_SIZE(heap) != 0) &&
((t = heap[1])->expires < (now + TIMER_SLOP)) )
{
remove_entry(heap, t);
ts->running = t;
fn = t->function;
data = t->data;
spin_unlock_irq(&ts->lock);
(*fn)(data);
spin_lock_irq(&ts->lock);
/* Heap may have grown while the lock was released. */
heap = ts->heap;
}
ts->running = NULL;
}
while ( !reprogram_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
spin_unlock_irq(&ts->lock);
}
在一个时间中断来了以后那个ISR做的事情是(* fn)(data),即是t->function.这个function到底是哪个函数呢?---s_time_fn().为什么捏?让我们从长计议:
在启动xen的时候,调用了scheduler_init();在scheduler_init()里面做的事情是init_timer
for_each_cpu ( i )
{
spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
}
也就是将定时器发生是的函数给确定了,是s_timer_fn.有问题么?我们看一下init_timer的定义就可以啦:
static inline void init_timer(
struct timer *timer,
void (*function)(void *),
void *data,
unsigned int cpu)
{
memset(timer, 0, sizeof(*timer));
timer->function = function;
timer->data = data;
timer->cpu = cpu;
}
那么s_timer_fn做什么事情了?
/* The scheduler timer: force a run through the scheduler */
static void s_timer_fn(void *unused)
{
raise_softirq(SCHEDULE_SOFTIRQ);
perfc_incr(sched_irq);
}
晕啊,又将SCHEDULE_SOFTIRQ给raise了
又一个软中断,让我们继续追踪一下SCHEDULE_SOFTIRQ吧:
记得我们上面说过在xen启动的时候,调用了scheduler_init吧,在scheduler_init中,我们做的工作是初始化了定时器,其实在初始化定时器之前,这个函数还做了另外一件事情:
open_softirq(SCHEDULE_SOFTIRQ, schedule);
是不是有点小晕,好吧,我们把scheduler_init()全部拿出来晒一晒
/* Initialise the data structures. */
void __init scheduler_init(void)
{
int i;
open_softirq(SCHEDULE_SOFTIRQ, schedule);
for_each_cpu ( i )
{
spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
}
for ( i = 0; schedulers[i] != NULL; i++ )
{
ops = *schedulers[i];
if ( strcmp(ops.opt_name, opt_sched) == 0 )
break;
}
if ( schedulers[i] == NULL )
printk("Could not find scheduler: %s\n", opt_sched);
printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
SCHED_OP(init);
}
好,现在看一下schedule都做的是啥事情:
/*
* The main function
* - deschedule the current domain (scheduler independent).
* - pick a new domain (scheduler dependent).
*/
static void schedule(void)
{
struct vcpu *prev = current, *next = NULL;
s_time_t now = NOW();
struct schedule_data *sd;
struct task_slice next_slice;
s32 r_time; /* time for new dom to run */
ASSERT(!in_irq());
ASSERT(this_cpu(mc_state).flags == 0);
perfc_incr(sched_run);
sd = &this_cpu(schedule_data);
spin_lock_irq(&sd->schedule_lock);
stop_timer(&sd->s_timer);
/* get policy-specific decision on scheduling... */
next_slice = ops.do_schedule(now);
r_time = next_slice.time;
next = next_slice.task;
sd->curr = next;
set_timer(&sd->s_timer, now + r_time);
if ( unlikely(prev == next) )
{
spin_unlock_irq(&sd->schedule_lock);
return continue_running(prev);
}
TRACE_2D(TRC_SCHED_SWITCH_INFPREV,
prev->domain->domain_id,
now - prev->runstate.state_entry_time);
TRACE_3D(TRC_SCHED_SWITCH_INFNEXT,
next->domain->domain_id,
(next->runstate.state == RUNSTATE_runnable) ?
(now - next->runstate.state_entry_time) : 0,
r_time);
ASSERT(prev->runstate.state == RUNSTATE_running);
vcpu_runstate_change(
prev,
(test_bit(_VPF_blocked, &prev->pause_flags) ? RUNSTATE_blocked :
(vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
now);
ASSERT(next->runstate.state != RUNSTATE_running);
vcpu_runstate_change(next, RUNSTATE_running, now);
ASSERT(!next->is_running);
next->is_running = 1;
spin_unlock_irq(&sd->schedule_lock);
perfc_incr(sched_ctx);
stop_timer(&prev->periodic_timer);
/* Ensure that the domain has an up-to-date time base. */
update_vcpu_system_time(next);
vcpu_periodic_timer_work(next);
TRACE_4D(TRC_SCHED_SWITCH,
prev->domain->domain_id, prev->vcpu_id,
next->domain->domain_id, next->vcpu_id);
context_switch(prev, next);
}
唉,这么多东西,哥也晕了。这个调度是在调度domain还是调度vcpu还是调度一个进程呢?
评论
发表评论