中断响应,从Embeddedxen到DOM

最初的跳转:
arch/arm/kernel/entry-armv.S

__irq_svc:
svc_entry

#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
add r7, r8, #1 @ increment it
str r7, [tsk, #TI_PREEMPT]
#endif

irq_handler

... 
__irq_usr:
usr_entry

get_thread_info tsk
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
add r7, r8, #1 @ increment it
str r7, [tsk, #TI_PREEMPT]
#endif

irq_handler

.macro irq_handler
1: get_irqnr_and_base r0, r6, r5, lr
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrne lr, 1b
bne asm_do_IRQ

看asm_do_IRQ:

xen/arch/arm/hypervisor/irq.c

asmlinkage void asm_do_IRQ(unsigned int irq, struct xen_cpu_user_regs *regs)
{
struct irqdesc *desc;

if (irq >= NR_IRQS) {
printk("Bad IRQ = %d\n", irq);
}

  /* (GCD) CONFIG_XEN_BENCHMARK
   * GPIO37 irq#69 is decode from interrupt #10 in pxa_gpio_demux_handler() function
   * See file xen/arch/arm/mach-pxa/irq.c
   */

desc = get_irq_descriptor(irq);
desc->handle(irq, desc, regs);
}

这里初始化被赋值为: level_irq_handler--<-xen_init_IRQ(void)

void level_irq_handler(unsigned int irq, struct irqdesc *desc, struct xen_cpu_user_regs *regs)

{

irqreturn_t ret;

irqaction_t *action;

spin_lock(&desc->lock);

desc->chip->ack(irq);

if(desc->flags & IRQF_GUEST_BOUND) {

handle_guest_bound_irq(irq);

goto out_unlock;

}

if (unlikely(desc->status & IRQ_IN_PROGRESS))

goto out_unlock;

desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);

action = desc->action;

if (unlikely(!action || (desc->status & IRQ_DISABLED))) {

desc->status |= IRQ_PENDING;

goto out_unlock;

}

desc->status |= IRQ_IN_PROGRESS;

desc->status &= ~IRQ_PENDING;

spin_unlock(&desc->lock);

ret = handle_event(irq, action, regs);

if(!ret) {

printk("Action return = %d\n", ret);

}

spin_lock(&desc->lock);

desc->status &= ~IRQ_IN_PROGRESS;

if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)

desc->chip->unmask(irq);

out_unlock:

spin_unlock(&desc->lock);

}

当xen执行到这里的时候,出现了分叉,如果desc的flags标志位是IRQF_GUEST_BOUND,那么就发往DOM否则,自己处理。
其实在xen中,自己处理的中断只有两处,一个是timer,另一个是console.

由于handle_event的执行流程跟linux中的差不多,就不再多叙。我们就假设,中断具有IRQF_GUEST_BOUND,那么就会执行Handle_guest_bound_irq(irq).
static void handle_guest_bound_irq(unsigned int irq)
{
int i;
struct domain           *d;
struct irqdesc          *desc;
irq_guest_action_t      *action;

desc = get_irq_descriptor(irq);

action = (irq_guest_action_t *)desc->action;

for ( i = 0; i < action->nr_guests; i++ ) {//每一个申请中断的DOM都发。也就是多个DOM可以申请同一个中断
                                                                        //从中我们看到,XEN是向DOM发送的,而不是VCPU

d = action->guest[i];

// send HID irqs to only the foreground domain.
if (desc->isHIDirq && d->domain_id != (domid_t)foreground_domain) {
continue;
}

if(action->ack_type == ACK_TYPE_UNMASK) {
if(!test_and_set_bit(irq, (volatile unsigned long *)&d->pirq_mask)) {
action->in_flight++;
}
}

send_guest_pirq(d, irq);
}
}

在xen/common/event_channel.c中
void send_guest_pirq(struct domain *d, int pirq)

{

    int port = d->pirq_to_evtchn[pirq];

    struct evtchn *chn;

    ASSERT(port != 0);

    chn = evtchn_from_port(d, port);

    evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);

}

在上面的这个send_guest_pirq中,设置Pending是向dom中的该chn的notify_vcpu_id,而不是随便制定的一个vcpu.
void evtchn_set_pending(struct vcpu *v, int port)

{

    struct domain *d = v->domain;

    shared_info_t *s = d->shared_info;

    /*

     * The following bit operations must happen in strict order.

     * NB. On x86, the atomic bit operations also act as memory barriers.

     * There is therefore sufficiently strict ordering for this architecture --

     * others may require explicit memory barriers.

     */

if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) )

      return;

    if ( !test_bit        (port, __shared_info_addr(d, s, evtchn_mask)) &&

         !test_and_set_bit(port / BITS_PER_GUEST_LONG(d),

                           vcpu_info_addr(v, evtchn_pending_sel)) )

    {

       vcpu_mark_events_pending(v);

    }

    /* Check if some VCPU might be polling for this event. */

    if ( unlikely(d->is_polling) )

    {

        d->is_polling = 0;

        smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */

        for_each_vcpu ( d, v )

        {

            if ( !v->is_polling )

                continue;

            v->is_polling = 0;

            vcpu_unblock(v);

        }

    }

}

对上面这个函数的认识还有一些问题。

接下来,如果函数被执行了,那么相关的vcpu位被设置,然后什么时候被执行呢?

当一个时间中断来了以后,会触发SCHEDULE_SOFTIRQ.因此schedule()会被执行.这样,相关的vcpu就执行了。


当VCPU被调度后,我们看一下DOM的中断执行顺序:


arch/arm/kernel/entry-armv-xen.S

__irq_usr:

usr_entry

get_thread_info tsk

#ifdef CONFIG_PREEMPT

ldr r8, [tsk, #TI_PREEMPT] @ get preempt count

add r7, r8, #1 @ increment it

str r7, [tsk, #TI_PREEMPT]

#endif

irq_handler

...

.align 5

__irq_svc:

svc_entry

#ifdef CONFIG_PREEMPT

get_thread_info tsk

ldr r8, [tsk, #TI_PREEMPT] @ get preempt count

add r7, r8, #1 @ increment it

str r7, [tsk, #TI_PREEMPT]

#endif

irq_handler



.macro irq_handler

mov r0, sp

bl evtchn_do_upcall

.endm

看一下evtchn_do_upcall

xen_guest/core/evtchn.c
asmlinkage void evtchn_do_upcall(struct pt_regs *regs)

{

unsigned long  l1, l2;

unsigned int   l1i, l2i, port;

int            irq, cpu = smp_processor_id();

shared_info_t *s = HYPERVISOR_shared_info;

vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];

retry:

xchg(&vcpu_info->evtchn_upcall_pending,0);

l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);

while (l1 != 0) {

l1i = __ffs(l1);

l1 &= ~(1UL << l1i);

while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {

l2i = __ffs(l2);

port = (l1i * BITS_PER_LONG) + l2i;

if ((irq = evtchn_to_irq[port]) != -1) {  //evtchn号转化为irq号

irq_enter();

do_IRQ(irq, regs); //guest分发中断函数

irq_exit();

}  else {

evtchn_device_upcall(port);

}

}

}

if(vcpu_info->evtchn_upcall_pending) {//当有多个中断的时候

goto retry;

}

}

到这里实际上已经和Linux接上轨了
fastcall unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)

{

struct pt_regs *old_regs = set_irq_regs(regs);

evtchn_desc_t *desc = evtchn_desc + irq;

struct irqaction * action;

unsigned int status = 0;

kstat_this_cpu.irqs[irq]++;

spin_lock(&desc->lock);

if (desc->handler->ack)

desc->handler->ack(irq);//调用pirq_ack函数---xen_guest/core/evtchn.c

status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);

status |= IRQ_PENDING; /* we _want_ to handle it */

action = NULL;

if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {

action = desc->action;

status &= ~IRQ_PENDING; /* we commit to handling */

status |= IRQ_INPROGRESS; /* we are handling it */

}

desc->status = status;

if (unlikely(!action))

goto out;

/*

* Edge triggered interrupts need to remember

* pending events.

* This applies to any hw interrupts that allow a second

* instance of the same irq to arrive while we are in do_IRQ

* or in the handler. But the code here only handles the _second_

* instance of the irq, not the third or fourth. So it is mostly

* useful for irq hardware that does not mask cleanly in an

* SMP environment.

*/

for (;;) {

irqreturn_t ret;

spin_unlock(&desc->lock);

         if (!(action->flags & SA_INTERRUPT))

                 local_irq_enable();

do {

                 ret = action->handler(irq, action->dev_id, regs);

                 if (ret == IRQ_HANDLED)

                         status |= action->flags;

                 action = action->next;

         } while (action);

if (status & SA_SAMPLE_RANDOM)

add_interrupt_randomness(irq);

local_irq_disable();

spin_lock(&desc->lock);

if (likely(!(desc->status & IRQ_PENDING)))

break;

desc->status &= ~IRQ_PENDING;

}

desc->status &= ~IRQ_INPROGRESS;

out:

/*

* The ->end() handler has to deal with interrupts which got

* disabled while the handler was running.

*/

desc->handler->end(irq);

spin_unlock(&desc->lock);

set_irq_regs(old_regs);

return 1;

}

 


贴图一张


评论

此博客中的热门博文

提交了30次才AC ---【附】POJ 2488解题报告

n个进程共享m个资源得死锁问题证明