debuggers.hg

changeset 20841:6512068aa0db

x86: fix unmaskable msi assignment issue.

Currently, unmasked msi irq's EOI write is deferred untile guest
writes EOI, so needs to keep eoi_vector unchanged before guest writes
EOI. However, irq migration breaks the assumption and changs
eoi_vector when interrupts are generated through new vector.

The patch removes the dependency for eoi_vector and directly recoreds
the irq info in the EOI stack, and when guest writes EOI, just do the
physical EOI for the specific irq(recorded in EOI stack)on the cpus
according to the cpu_eoi_map.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 13 08:18:38 2010 +0000 (2010-01-13)
parents 3f8fd65732cc
children 13d4e78ede97
files xen/arch/x86/irq.c
line diff
     1.1 --- a/xen/arch/x86/irq.c	Wed Jan 13 08:17:00 2010 +0000
     1.2 +++ b/xen/arch/x86/irq.c	Wed Jan 13 08:18:38 2010 +0000
     1.3 @@ -740,7 +740,6 @@ typedef struct {
     1.4  #define ACKTYPE_UNMASK 1     /* Unmask PIC hardware (from any CPU)   */
     1.5  #define ACKTYPE_EOI    2     /* EOI on the CPU that was interrupted  */
     1.6      cpumask_t cpu_eoi_map;   /* CPUs that need to EOI this interrupt */
     1.7 -    u8 eoi_vector;           /* vector awaiting the EOI*/
     1.8      struct domain *guest[IRQ_MAX_GUESTS];
     1.9  } irq_guest_action_t;
    1.10  
    1.11 @@ -749,8 +748,9 @@ typedef struct {
    1.12   * order, as only the current highest-priority pending irq can be EOIed.
    1.13   */
    1.14  struct pending_eoi {
    1.15 -    u8 vector; /* vector awaiting EOI */
    1.16 -    u8 ready;  /* Ready for EOI now?  */
    1.17 +    u32 ready:1;  /* Ready for EOI now?  */
    1.18 +    u32 irq:23;   /* irq of the vector */
    1.19 +    u32 vector:8; /* vector awaiting EOI */
    1.20  };
    1.21  
    1.22  static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);
    1.23 @@ -817,11 +817,11 @@ static void __do_IRQ_guest(int irq)
    1.24          sp = pending_eoi_sp(peoi);
    1.25          ASSERT((sp == 0) || (peoi[sp-1].vector < vector));
    1.26          ASSERT(sp < (NR_VECTORS-1));
    1.27 +        peoi[sp].irq = irq;
    1.28          peoi[sp].vector = vector;
    1.29          peoi[sp].ready = 0;
    1.30          pending_eoi_sp(peoi) = sp+1;
    1.31          cpu_set(smp_processor_id(), action->cpu_eoi_map);
    1.32 -        action->eoi_vector = vector;
    1.33      }
    1.34  
    1.35      for ( i = 0; i < action->nr_guests; i++ )
    1.36 @@ -913,7 +913,7 @@ static void flush_ready_eoi(void)
    1.37  
    1.38      while ( (--sp >= 0) && peoi[sp].ready )
    1.39      {
    1.40 -        irq = __get_cpu_var(vector_irq[peoi[sp].vector]);
    1.41 +        irq = peoi[sp].irq;
    1.42          ASSERT(irq > 0);
    1.43          desc = irq_to_desc(irq);
    1.44          spin_lock(&desc->lock);
    1.45 @@ -941,7 +941,7 @@ static void __set_eoi_ready(struct irq_d
    1.46  
    1.47      do {
    1.48          ASSERT(sp > 0);
    1.49 -    } while ( peoi[--sp].vector != action->eoi_vector );
    1.50 +    } while ( peoi[--sp].irq != irq );
    1.51      ASSERT(!peoi[sp].ready);
    1.52      peoi[sp].ready = 1;
    1.53  }