xenarm中断原理
xenarm中断原理:
xenarm项目有两部分构成,一部分是xen,一部分是xenolinux
本文有三部分构成
1.中断初始化
2.中断申请
3.中断响应
--------------------------------------
中断初始化
xen的irq初始化:
arch/arm/arch-imx21/start.S
.b start_xen
|
start_xen() //arch/arm/xen/xensetup.c
|
platform_setup()----DECLARE_PLATFORM_OP(platform_setup, imx21ads_platform_setup);//arch/arm/arch-imx21/platform.c
imx21ads_platform_setup()
|
imx21_irq_init()
void imx21_irq_init(void)
{
unsigned int irq;
/* Mask all interrupts initially */
IMR(0) = 0;
IMR(1) = 0;
IMR(2) = 0;
IMR(3) = 0;
IMR(4) = 0;
IMR(5) = 0;
for (irq = 0; irq < IMX_IRQS; irq++) {
set_irq_chip(irq, &imx21_internal_chip);
set_irq_handler(irq, level_irq_handler);
set_irq_flags(irq, IRQF_VALID);
}
for (irq = IRQ_GPIOA(0); irq < IRQ_GPIOF(32); irq++) {
set_irq_chip(irq, &imx21_gpio_chip);
set_irq_handler(irq, edge_irq_handler);
set_irq_flags(irq, IRQF_VALID | IRQF_TRIGGER_PROBE);
}
set_irq_chained_handler(INT_GPIO, imx21_gpio_handler);
/* Disable all interrupts initially. */
/* In IMX21 this is done in the bootloader. */
}
DOMU的初始化
b start_kernel arch/arm/kernel/head-xen.S
|
start_kernel()
|
init_IRQ() //drivers/xen/core/evtchn.c
void __init init_IRQ(void)
{
int i;
int cpu;
spin_lock_init(&irq_mapping_update_lock);
init_evtchn_cpu_bindings();
/* No VIRQ or IPI bindings. */
for (cpu = 0; cpu < NR_CPUS; cpu++) {
for (i = 0; i < NR_VIRQS; i++)
per_cpu(virq_to_irq, cpu)[i] = -1;
for (i = 0; i < NR_IPIS; i++)
per_cpu(ipi_to_irq, cpu)[i] = -1;
}
/* No event-channel -> IRQ mappings. */
for (i = 0; i < NR_EVENT_CHANNELS; i++) {
evtchn_to_irq[i] = -1;
mask_evtchn(i); /* No event channels are 'live' right now. */
}
/* No IRQ -> event-channel mappings. */
for (i = 0; i < NR_IRQS; i++)
irq_info[i] = IRQ_UNBOUND;
/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
for (i = 0; i < NR_DYNIRQS; i++) {
irq_bindcount[dynirq_to_irq(i)] = 0;
evtchn_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
evtchn_desc[dynirq_to_irq(i)].action = NULL;
evtchn_desc[dynirq_to_irq(i)].depth = 1;
evtchn_desc[dynirq_to_irq(i)].handler = &dynirq_type;
evtchn_desc[dynirq_to_irq(i)].lock = __SPIN_LOCK_UNLOCKED(evtchn_desc->lock);
}
/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
for (i = 0; i < NR_PIRQS; i++) {
irq_bindcount[pirq_to_irq(i)] = 1;
evtchn_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
evtchn_desc[pirq_to_irq(i)].action = NULL;
evtchn_desc[pirq_to_irq(i)].depth = 1;
evtchn_desc[pirq_to_irq(i)].handler = &pirq_type;
evtchn_desc[pirq_to_irq(i)].lock = __SPIN_LOCK_UNLOCKED(evtchn_desc->lock);
}
}
---------------------------------------------
DOMU的中断申请:
drivers/xen/core/evtchn.c
int request_irq(unsigned int irq,
irq_handler_t handler,
unsigned long irqflags, const char * devname, void *dev_id)
{
struct irqaction * action;
int retval;
/*
* Sanity-check: shared interrupts must pass in a real dev-ID,
* otherwise we'll have trouble later trying to figure out
* which interrupt is which (messes up the interrupt freeing
* logic etc).
*/
if ((irqflags & SA_SHIRQ) && !dev_id)
return -EINVAL;
if (irq >= NR_IRQS)
return -EINVAL;
if (!handler)
return -EINVAL;
action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
action->flags = irqflags;
cpus_clear(action->mask);
action->name = devname;
action->next = NULL;
action->dev_id = dev_id;
retval = setup_irq(irq, action);
if (retval)
kfree(action);
return retval;
}
int setup_irq(unsigned int irq, struct irqaction * new)
{
struct evtchn_desc *desc = evtchn_desc + irq;
struct irqaction *old, **p;
unsigned long flags;
int shared = 0;
// the following should be commented: jyhwang 2007 July 15
// printk("irq=%d, NR_IRQS=%d\n", irq, NR_IRQS);
if (irq >= NR_IRQS)
return -EINVAL;
if (desc->handler == NULL)
return -ENOSYS;
/*
* Some drivers like serial.c use request_irq() heavily,
* so we have to be careful not to interfere with a
* running system.
*/
if (new->flags & SA_SAMPLE_RANDOM) {
/*
* This function might sleep, we want to call it first,
* outside of the atomic block.
* Yes, this might clear the entropy pool if the wrong
* driver is attempted to be loaded, without actually
* installing a new handler, but is this really a problem,
* only the sysadmin is able to do this.
*/
rand_initialize_irq(irq);
}
/*
* The following block of code has to be executed atomically
*/
spin_lock_irqsave(&desc->lock,flags);
p = &desc->action;
if ((old = *p) != NULL) {
/* Can't share interrupts unless both agree to */
if (!(old->flags & new->flags & SA_SHIRQ)) {
spin_unlock_irqrestore(&desc->lock,flags);
return -EBUSY;
}
/* add new interrupt at end of irq queue */
do {
p = &old->next;
old = *p;
} while (old);
shared = 1;
}
*p = new;//action赋值
if (!shared) {
desc->depth = 0;
desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT |
IRQ_WAITING | IRQ_INPROGRESS);
if (desc->handler->startup)
desc->handler->startup(irq);//priq_startup()
else
desc->handler->enable(irq);
}
spin_unlock_irqrestore(&desc->lock,flags);
new->irq = irq;
new->dir = NULL;
return 0;
}
static unsigned int startup_pirq(unsigned int irq)
{
evtchn_op_t op = { .cmd = EVTCHNOP_bind_pirq };
int evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn)) {
goto out;
}
op.u.bind_pirq.pirq = irq;
/* NB. We are happy to share unless we are probing. */
op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
if (HYPERVISOR_event_channel_op(&op) != 0) {
if (!probing_irq(irq))
printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
irq);
return 0;
}
evtchn = op.u.bind_pirq.port;
pirq_query_unmask(irq_to_pirq(irq));
bind_evtchn_to_cpu(evtchn, 0);
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
out:
unmask_evtchn(evtchn);
pirq_unmask_notify(irq_to_pirq(irq));
return 0;
}
在xenarm中:common/event_channel.c
long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
long rc;
switch ( cmd )
{
case EVTCHNOP_alloc_unbound: {
struct evtchn_alloc_unbound alloc_unbound;
if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_alloc_unbound(&alloc_unbound);
if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_interdomain: {
struct evtchn_bind_interdomain bind_interdomain;
if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_interdomain(&bind_interdomain);
if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_virq: {
struct evtchn_bind_virq bind_virq;
if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_virq(&bind_virq);
if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_ipi: {
struct evtchn_bind_ipi bind_ipi;
if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_ipi(&bind_ipi);
if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_pirq: {
struct evtchn_bind_pirq bind_pirq;
if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_pirq(&bind_pirq);
if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_close: {
struct evtchn_close close;
if ( copy_from_guest(&close, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_close(&close);
break;
}
case EVTCHNOP_send: {
struct evtchn_send send;
if ( copy_from_guest(&send, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_send(send.port);
break;
}
case EVTCHNOP_status: {
struct evtchn_status status;
if ( copy_from_guest(&status, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_status(&status);
if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
rc = -EFAULT;
break;
}
case EVTCHNOP_bind_vcpu: {
struct evtchn_bind_vcpu bind_vcpu;
if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
break;
}
case EVTCHNOP_unmask: {
struct evtchn_unmask unmask;
if ( copy_from_guest(&unmask, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_unmask(&unmask);
break;
}
case EVTCHNOP_reset: {
struct evtchn_reset reset;
if ( copy_from_guest(&reset, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_reset(&reset);
break;
}
default:
rc = -ENOSYS;
break;
}
return rc;
}
static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) //common/event_channel.c
{
struct evtchn *chn;
struct domain *d = current->domain;
int port, pirq = bind->pirq;
long rc;
if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
return -EINVAL;
if ( !irq_access_permitted(d, pirq) )
return -EPERM;
spin_lock(&d->evtchn_lock);
if ( d->pirq_to_evtchn[pirq] != 0 )
ERROR_EXIT(-EEXIST);
if ( (port = get_free_port(d)) < 0 )
ERROR_EXIT(port);
chn = evtchn_from_port(d, port);
d->pirq_to_evtchn[pirq] = port;
rc = pirq_guest_bind(d->vcpu[0], pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE));
if ( rc != 0 )
{
d->pirq_to_evtchn[pirq] = 0;
goto out;
}
chn->state = ECS_PIRQ;
chn->u.pirq = pirq;
bind->port = port;
out:
spin_unlock(&d->evtchn_lock);
return rc;
}
int pirq_guest_bind(struct vcpu *v, int irq, int will_share) //arch/arm/xen/irq.c
{
struct irqdesc *desc;
irq_guest_action_t *action;
unsigned long flags;
int rc = 0;
if ( (irq < 0) || (irq >= NR_IRQS) )
return -EINVAL;
desc = get_irq_descriptor(irq);
spin_lock_irqsave(&desc->lock, flags);
action = (irq_guest_action_t *)desc->action;
if (!(desc->flags & IRQF_GUEST_BOUND)) {
if (desc->action != NULL ) {
DPRINTK(3,"Cannot bind IRQ %d to guest. In use by %s.\n",(int)irq, desc->action->name);
rc = -EBUSY;
goto out;
}
action = xmalloc(irq_guest_action_t);
if ((desc->action = (struct irqaction *)action) == NULL ) {
DPRINTK(3,"Cannot bind IRQ %d to guest. Out of memory.\n", irq);
rc = -ENOMEM;
goto out;
}
action->shareable = 1;
action->nr_guests = 0;
action->in_flight = 0;
action->ack_type = pirq_ack_type(irq);
desc->disable_depth = 0;
desc->flags |= IRQF_GUEST_BOUND;
if(will_share) {
desc->flags |= IRQF_SHARABLE;
}
desc->chip->unmask(irq);
} else if ( !will_share || !action->shareable ) {
DPRINTK(3,"Cannot bind IRQ %d to guest. Will not share with others.\n", irq);
rc = -EBUSY;
goto out;
}
if ( action->nr_guests == IRQ_MAX_GUESTS ) {
DPRINTK(3,"Cannot bind IRQ %d to guest. Already at max share.\n", irq);
rc = -EBUSY;
goto out;
}
action->guest[action->nr_guests++] = v->domain;
out:
spin_unlock_irqrestore(&desc->lock, flags);
return rc;
}
至此,中断申请结束!
------------------------------------------------------
中断响应:
Hypervisor中断响应:
arch/arm/xen/entry.S
.align 5
__irq_svc:
save_svc_context
#ifdef CONFIG_MACHINE_VERSATILE
get_irqnr_preamble r5, lr @ minsung
#endif
1: get_irqnr_and_base r0, r6, r5, lr
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrne lr, 1b
bne asm_do_IRQ---在DOMU中是irq_handler
------------------------------------------------------------------------
__irq_usr:
save_usr_context
vcpu r0
add r0, r0, #(OFFSET_ARCH_VCPU + OFFSET_GUEST_CONTEXT)
ldr r1, [r0, #(OFFSET_SYS_REGS + OFFSET_VPSR)]
ldr r2, [sp, #S_SP]
cmp r1, #PSR_MODE_USR
streq r2, [r0, #(OFFSET_SYS_REGS + OFFSET_VUSP)]
strne r2, [r0, #(OFFSET_SYS_REGS + OFFSET_VKSP)]
ldr r3, [sp, #S_PSR]
bic r3, r3, #PSR_MODE_MASK
orr r3, r3, r1
str r3, [sp, #S_PSR]
#ifdef CONFIG_MACHINE_VERSATILE
get_irqnr_preamble r5, lr @ minsung
#endif
1: get_irqnr_and_base r0, r6, r5, lr
movne r1, sp
adrne lr, 1b
bne asm_do_IRQ
asm_do_IRQ
asmlinkage void asm_do_IRQ(unsigned int irq, struct cpu_user_regs *regs)
{
struct irqdesc *desc;
if (irq >= NR_IRQS) {
printk("Bad IRQ = %d\n", irq);
}
desc = get_irq_descriptor(irq);
desc->handle(irq, desc, regs);
}
void level_irq_handler(unsigned int irq, struct irqdesc *desc, struct cpu_user_regs *regs)
{
irqreturn_t ret;
irqaction_t *action;
spin_lock(&desc->lock);
desc->chip->ack(irq);
if(desc->flags & IRQF_GUEST_BOUND) {
handle_guest_bound_irq(irq);
goto out_unlock;
}
if (unlikely(desc->status & IRQ_IN_PROGRESS))
goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
desc->status |= IRQ_PENDING;
goto out_unlock;
}
desc->status |= IRQ_IN_PROGRESS;
desc->status &= ~IRQ_PENDING;
spin_unlock(&desc->lock);
ret = handle_event(irq, action, regs);
if(!ret) {
printk("Action return = %d\n", ret);
}
spin_lock(&desc->lock);
desc->status &= ~IRQ_IN_PROGRESS;
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
spin_unlock(&desc->lock);
}
static void handle_guest_bound_irq(unsigned int irq)
{
int i;
struct domain *d;
struct irqdesc *desc;
irq_guest_action_t *action;
desc = get_irq_descriptor(irq);
action = (irq_guest_action_t *)desc->action;
for ( i = 0; i < action->nr_guests; i++ ) {
d = action->guest[i];
// send HID irqs to only the foreground domain.
if (desc->isHIDirq && d->domain_id != (domid_t)foreground_domain) {
continue;
}
if(action->ack_type == ACK_TYPE_UNMASK) {
if(!test_and_set_bit(irq, (volatile unsigned long *)&d->pirq_mask)) {//测试,是否被mask了。
action->in_flight++;
}
}
send_guest_pirq(d, irq);
}
}
void send_guest_pirq(struct domain *d, int pirq)
{
int port = d->pirq_to_evtchn[pirq];
struct evtchn *chn;
ASSERT(port != 0);
if(!acm_send_guest_pirq(d, pirq))
return;
chn = evtchn_from_port(d, port);
evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
}
DOMU的中断响应:
arch/arm/kernel/entry-armv-xen.S
__irq_svc:
svc_entry
#ifndef CONFIG_XENOLINUX
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
add r7, r8, #1 @ increment it
str r7, [tsk, #TI_PREEMPT]
#endif
irq_handler
.macro irq_handler
mov r0, sp
bl evtchn_do_upcall
.endm
* NB. Interrupts are disabled on entry. */
asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
{
unsigned long l1, l2;
unsigned int l1i, l2i, port;
int irq, cpu = smp_processor_id();
shared_info_t *s = HYPERVISOR_shared_info;
vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
retry:
xchg(&vcpu_info->evtchn_upcall_pending,0);
l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
while (l1 != 0) {
l1i = __ffs(l1);
l1 &= ~(1UL << l1i);
while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
l2i = __ffs(l2);
port = (l1i * BITS_PER_LONG) + l2i;
if ((irq = evtchn_to_irq[port]) != -1) {
irq_enter();
do_IRQ(irq, regs);
irq_exit();
} else {
evtchn_device_upcall(port);
}
}
}
if(vcpu_info->evtchn_upcall_pending) {
goto retry;
}
}fastcall unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
evtchn_desc_t *desc = evtchn_desc + irq;
struct irqaction * action;
unsigned int status = 0;
kstat_this_cpu.irqs[irq]++;
spin_lock(&desc->lock);
if (desc->handler->ack)
desc->handler->ack(irq);
status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
status |= IRQ_PENDING; /* we _want_ to handle it */
action = NULL;
if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
action = desc->action;
status &= ~IRQ_PENDING; /* we commit to handling */
status |= IRQ_INPROGRESS; /* we are handling it */
}
desc->status = status;
if (unlikely(!action))
goto out;
/*
* Edge triggered interrupts need to remember
* pending events.
* This applies to any hw interrupts that allow a second
* instance of the same irq to arrive while we are in do_IRQ
* or in the handler. But the code here only handles the _second_
* instance of the irq, not the third or fourth. So it is mostly
* useful for irq hardware that does not mask cleanly in an
* SMP environment.
*/
for (;;) {
irqreturn_t ret;
spin_unlock(&desc->lock);
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
do {
ret = action->handler(irq, action->dev_id);
if (ret == IRQ_HANDLED)
status |= action->flags;
action = action->next;
} while (action);
if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();
spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
}
desc->status &= ~IRQ_INPROGRESS;
out:
/*
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
*/
desc->handler->end(irq);
spin_unlock(&desc->lock);
set_irq_regs(old_regs);
return 1;
}
评论
发表评论