debuggers.hg

view xen/arch/x86/irq.c @ 3658:0ef6e8e6e85d

bitkeeper revision 1.1159.212.71 (4200f0afX_JumfbEHQex6TdFENULMQ)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
author iap10@labyrinth.cl.cam.ac.uk
date Wed Feb 02 15:24:31 2005 +0000 (2005-02-02)
parents 75f82adfcc90 beb0887c54bc
children bf2c38625b39
line source
1 /******************************************************************************
2 * arch/x86/irq.c
3 *
4 * Portions of this file are:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 */
8 #include <xen/config.h>
9 #include <xen/errno.h>
10 #include <xen/event.h>
11 #include <xen/irq.h>
12 #include <xen/perfc.h>
13 #include <xen/sched.h>
14 #include <asm/smpboot.h>
16 irq_desc_t irq_desc[NR_IRQS];
18 static void __do_IRQ_guest(int irq);
20 void no_action(int cpl, void *dev_id, struct xen_regs *regs) { }
22 static void enable_none(unsigned int irq) { }
23 static unsigned int startup_none(unsigned int irq) { return 0; }
24 static void disable_none(unsigned int irq) { }
25 static void ack_none(unsigned int irq)
26 {
27 printk("Unexpected IRQ trap at vector %02x.\n", irq);
28 ack_APIC_irq();
29 }
31 #define shutdown_none disable_none
32 #define end_none enable_none
34 struct hw_interrupt_type no_irq_type = {
35 "none",
36 startup_none,
37 shutdown_none,
38 enable_none,
39 disable_none,
40 ack_none,
41 end_none
42 };
44 atomic_t irq_err_count;
45 atomic_t irq_mis_count;
47 inline void disable_irq_nosync(unsigned int irq)
48 {
49 irq_desc_t *desc = &irq_desc[irq];
50 unsigned long flags;
52 spin_lock_irqsave(&desc->lock, flags);
54 if ( desc->depth++ == 0 )
55 {
56 desc->status |= IRQ_DISABLED;
57 desc->handler->disable(irq);
58 }
60 spin_unlock_irqrestore(&desc->lock, flags);
61 }
63 void disable_irq(unsigned int irq)
64 {
65 disable_irq_nosync(irq);
66 do { smp_mb(); } while ( irq_desc[irq].status & IRQ_INPROGRESS );
67 }
69 void enable_irq(unsigned int irq)
70 {
71 irq_desc_t *desc = &irq_desc[irq];
72 unsigned long flags;
74 spin_lock_irqsave(&desc->lock, flags);
76 if ( --desc->depth == 0 )
77 {
78 desc->status &= ~IRQ_DISABLED;
79 if ( (desc->status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING )
80 {
81 desc->status |= IRQ_REPLAY;
82 hw_resend_irq(desc->handler,irq);
83 }
84 desc->handler->enable(irq);
85 }
87 spin_unlock_irqrestore(&desc->lock, flags);
88 }
90 asmlinkage void do_IRQ(struct xen_regs regs)
91 {
92 #if defined(__i386__)
93 unsigned int irq = regs.entry_vector;
94 #else
95 unsigned int irq = 0; /* XXX */
96 #endif
97 irq_desc_t *desc = &irq_desc[irq];
98 struct irqaction *action;
100 perfc_incrc(irqs);
102 spin_lock(&desc->lock);
103 desc->handler->ack(irq);
105 if ( likely(desc->status & IRQ_GUEST) )
106 {
107 __do_IRQ_guest(irq);
108 spin_unlock(&desc->lock);
109 return;
110 }
112 desc->status &= ~IRQ_REPLAY;
113 desc->status |= IRQ_PENDING;
115 /*
116 * Since we set PENDING, if another processor is handling a different
117 * instance of this same irq, the other processor will take care of it.
118 */
119 if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) )
120 goto out;
122 desc->status |= IRQ_INPROGRESS;
124 action = desc->action;
125 while ( desc->status & IRQ_PENDING )
126 {
127 desc->status &= ~IRQ_PENDING;
128 irq_enter(smp_processor_id(), irq);
129 spin_unlock_irq(&desc->lock);
130 action->handler(irq, action->dev_id, &regs);
131 spin_lock_irq(&desc->lock);
132 irq_exit(smp_processor_id(), irq);
133 }
135 desc->status &= ~IRQ_INPROGRESS;
137 out:
138 desc->handler->end(irq);
139 spin_unlock(&desc->lock);
140 }
142 void free_irq(unsigned int irq)
143 {
144 irq_desc_t *desc = &irq_desc[irq];
145 unsigned long flags;
147 spin_lock_irqsave(&desc->lock,flags);
148 desc->action = NULL;
149 desc->depth = 1;
150 desc->status |= IRQ_DISABLED;
151 desc->handler->shutdown(irq);
152 spin_unlock_irqrestore(&desc->lock,flags);
154 /* Wait to make sure it's not being used on another CPU */
155 do { smp_mb(); } while ( irq_desc[irq].status & IRQ_INPROGRESS );
156 }
158 int setup_irq(unsigned int irq, struct irqaction *new)
159 {
160 irq_desc_t *desc = &irq_desc[irq];
161 unsigned long flags;
163 spin_lock_irqsave(&desc->lock,flags);
165 if ( desc->action != NULL )
166 {
167 spin_unlock_irqrestore(&desc->lock,flags);
168 return -EBUSY;
169 }
171 desc->action = new;
172 desc->depth = 0;
173 desc->status &= ~IRQ_DISABLED;
174 desc->handler->startup(irq);
176 spin_unlock_irqrestore(&desc->lock,flags);
178 return 0;
179 }
182 /*
183 * HANDLING OF GUEST-BOUND PHYSICAL IRQS
184 */
186 #define IRQ_MAX_GUESTS 7
187 typedef struct {
188 u8 nr_guests;
189 u8 in_flight;
190 u8 shareable;
191 struct exec_domain *guest[IRQ_MAX_GUESTS];
192 } irq_guest_action_t;
194 static void __do_IRQ_guest(int irq)
195 {
196 irq_desc_t *desc = &irq_desc[irq];
197 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
198 struct exec_domain *ed;
199 int i;
201 for ( i = 0; i < action->nr_guests; i++ )
202 {
203 ed = action->guest[i];
204 if ( !test_and_set_bit(irq, &ed->domain->pirq_mask) )
205 action->in_flight++;
206 send_guest_pirq(ed, irq);
207 }
208 }
210 int pirq_guest_unmask(struct domain *d)
211 {
212 irq_desc_t *desc;
213 unsigned int i, j, pirq;
214 u32 m;
215 shared_info_t *s = d->shared_info;
217 for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
218 {
219 m = d->pirq_mask[i];
220 while ( m != 0 )
221 {
222 j = find_first_set_bit(m);
223 m &= ~(1 << j);
224 pirq = (i << 5) + j;
225 desc = &irq_desc[pirq];
226 spin_lock_irq(&desc->lock);
227 if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
228 test_and_clear_bit(pirq, &d->pirq_mask) &&
229 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
230 desc->handler->end(pirq);
231 spin_unlock_irq(&desc->lock);
232 }
233 }
235 return 0;
236 }
238 int pirq_guest_bind(struct exec_domain *ed, int irq, int will_share)
239 {
240 struct domain *d = ed->domain;
241 irq_desc_t *desc = &irq_desc[irq];
242 irq_guest_action_t *action;
243 unsigned long flags;
244 int rc = 0;
246 if ( !IS_CAPABLE_PHYSDEV(d) )
247 return -EPERM;
249 spin_lock_irqsave(&desc->lock, flags);
251 action = (irq_guest_action_t *)desc->action;
253 if ( !(desc->status & IRQ_GUEST) )
254 {
255 if ( desc->action != NULL )
256 {
257 DPRINTK("Cannot bind IRQ %d to guest. In use by '%s'.\n",
258 irq, desc->action->name);
259 rc = -EBUSY;
260 goto out;
261 }
263 action = xmalloc(irq_guest_action_t);
264 if ( (desc->action = (struct irqaction *)action) == NULL )
265 {
266 DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
267 rc = -ENOMEM;
268 goto out;
269 }
271 action->nr_guests = 0;
272 action->in_flight = 0;
273 action->shareable = will_share;
275 desc->depth = 0;
276 desc->status |= IRQ_GUEST;
277 desc->status &= ~IRQ_DISABLED;
278 desc->handler->startup(irq);
280 /* Attempt to bind the interrupt target to the correct CPU. */
281 if ( desc->handler->set_affinity != NULL )
282 desc->handler->set_affinity(
283 irq, apicid_to_phys_cpu_present(ed->processor));
284 }
285 else if ( !will_share || !action->shareable )
286 {
287 DPRINTK("Cannot bind IRQ %d to guest. Will not share with others.\n",
288 irq);
289 rc = -EBUSY;
290 goto out;
291 }
293 if ( action->nr_guests == IRQ_MAX_GUESTS )
294 {
295 DPRINTK("Cannot bind IRQ %d to guest. Already at max share.\n", irq);
296 rc = -EBUSY;
297 goto out;
298 }
300 action->guest[action->nr_guests++] = ed;
302 out:
303 spin_unlock_irqrestore(&desc->lock, flags);
304 return rc;
305 }
307 int pirq_guest_unbind(struct domain *d, int irq)
308 {
309 irq_desc_t *desc = &irq_desc[irq];
310 irq_guest_action_t *action;
311 unsigned long flags;
312 int i;
314 spin_lock_irqsave(&desc->lock, flags);
316 action = (irq_guest_action_t *)desc->action;
318 if ( test_and_clear_bit(irq, &d->pirq_mask) &&
319 (--action->in_flight == 0) )
320 desc->handler->end(irq);
322 if ( action->nr_guests == 1 )
323 {
324 desc->action = NULL;
325 xfree(action);
326 desc->depth = 1;
327 desc->status |= IRQ_DISABLED;
328 desc->status &= ~IRQ_GUEST;
329 desc->handler->shutdown(irq);
330 }
331 else
332 {
333 i = 0;
334 while ( action->guest[i] && action->guest[i]->domain != d )
335 i++;
336 memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
337 action->nr_guests--;
338 }
340 spin_unlock_irqrestore(&desc->lock, flags);
341 return 0;
342 }
344 int pirq_guest_bindable(int irq, int will_share)
345 {
346 irq_desc_t *desc = &irq_desc[irq];
347 irq_guest_action_t *action;
348 unsigned long flags;
349 int okay;
351 spin_lock_irqsave(&desc->lock, flags);
353 action = (irq_guest_action_t *)desc->action;
355 /*
356 * To be bindable the IRQ must either be not currently bound (1), or
357 * it must be shareable (2) and not at its share limit (3).
358 */
359 okay = ((!(desc->status & IRQ_GUEST) && (action == NULL)) || /* 1 */
360 (action->shareable && will_share && /* 2 */
361 (action->nr_guests != IRQ_MAX_GUESTS))); /* 3 */
363 spin_unlock_irqrestore(&desc->lock, flags);
364 return okay;
365 }