debuggers.hg

view linux-2.6.10-rc2-xen-sparse/arch/xen/kernel/ctrl_if.c @ 3289:a169836882cb

bitkeeper revision 1.1159.170.59 (41b4c2fdJ2gj_BWy27Vj3ptayZp_yg)

sync w/ head.
author cl349@arcadians.cl.cam.ac.uk
date Mon Dec 06 20:37:17 2004 +0000 (2004-12-06)
parents 13728122c78d
children
line source
1 /******************************************************************************
2 * ctrl_if.c
3 *
4 * Management functions for special interface to the domain controller.
5 *
6 * Copyright (c) 2004, K A Fraser
7 *
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include <linux/config.h>
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/errno.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/module.h>
39 #include <asm-xen/ctrl_if.h>
40 #include <asm-xen/evtchn.h>
42 #if 0
43 #define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
44 __FILE__ , __LINE__ , ## _a )
45 #else
46 #define DPRINTK(_f, _a...) ((void)0)
47 #endif
49 /*
50 * Only used by initial domain which must create its own control-interface
51 * event channel. This value is picked up by the user-space domain controller
52 * via an ioctl.
53 */
54 int initdom_ctrlif_domcontroller_port = -1;
56 static int ctrl_if_evtchn;
57 static int ctrl_if_irq;
58 static spinlock_t ctrl_if_lock;
60 static struct irqaction ctrl_if_irq_action;
62 static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
63 static CONTROL_RING_IDX ctrl_if_rx_req_cons;
65 /* Incoming message requests. */
66 /* Primary message type -> message handler. */
67 static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
68 /* Primary message type -> callback in process context? */
69 static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
70 /* Is it late enough during bootstrap to use schedule_task()? */
71 static int safe_to_schedule_task;
72 /* Queue up messages to be handled in process context. */
73 static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
74 static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
75 static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
77 /* Incoming message responses: message identifier -> message handler/id. */
78 static struct {
79 ctrl_msg_handler_t fn;
80 unsigned long id;
81 } ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
83 /* For received messages that must be deferred to process context. */
84 static void __ctrl_if_rxmsg_deferred(void *unused);
85 static DECLARE_WORK(ctrl_if_rxmsg_deferred_work,
86 __ctrl_if_rxmsg_deferred,
87 NULL);
89 /* Deferred callbacks for people waiting for space in the transmit ring. */
90 static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
92 static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
93 static void __ctrl_if_tx_tasklet(unsigned long data);
94 static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
96 static void __ctrl_if_rx_tasklet(unsigned long data);
97 static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
99 #define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
100 #define TX_FULL(_c) \
101 (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
103 static void ctrl_if_notify_controller(void)
104 {
105 notify_via_evtchn(ctrl_if_evtchn);
106 }
108 static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
109 {
110 msg->length = 0;
111 ctrl_if_send_response(msg);
112 }
114 static void __ctrl_if_tx_tasklet(unsigned long data)
115 {
116 control_if_t *ctrl_if = get_ctrl_if();
117 ctrl_msg_t *msg;
118 int was_full = TX_FULL(ctrl_if);
119 CONTROL_RING_IDX rp;
121 rp = ctrl_if->tx_resp_prod;
122 rmb(); /* Ensure we see all requests up to 'rp'. */
124 while ( ctrl_if_tx_resp_cons != rp )
125 {
126 msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
128 DPRINTK("Rx-Rsp %u/%u :: %d/%d\n",
129 ctrl_if_tx_resp_cons,
130 ctrl_if->tx_resp_prod,
131 msg->type, msg->subtype);
133 /* Execute the callback handler, if one was specified. */
134 if ( msg->id != 0xFF )
135 {
136 (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
137 msg, ctrl_if_txmsg_id_mapping[msg->id].id);
138 smp_mb(); /* Execute, /then/ free. */
139 ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
140 }
142 /*
143 * Step over the message in the ring /after/ finishing reading it. As
144 * soon as the index is updated then the message may get blown away.
145 */
146 smp_mb();
147 ctrl_if_tx_resp_cons++;
148 }
150 if ( was_full && !TX_FULL(ctrl_if) )
151 {
152 wake_up(&ctrl_if_tx_wait);
153 run_task_queue(&ctrl_if_tx_tq);
154 }
155 }
157 static void __ctrl_if_rxmsg_deferred(void *unused)
158 {
159 ctrl_msg_t *msg;
160 CONTROL_RING_IDX dp;
162 dp = ctrl_if_rxmsg_deferred_prod;
163 rmb(); /* Ensure we see all deferred requests up to 'dp'. */
165 while ( ctrl_if_rxmsg_deferred_cons != dp )
166 {
167 msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
168 ctrl_if_rxmsg_deferred_cons++)];
169 (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
170 }
171 }
173 static void __ctrl_if_rx_tasklet(unsigned long data)
174 {
175 control_if_t *ctrl_if = get_ctrl_if();
176 ctrl_msg_t msg, *pmsg;
177 CONTROL_RING_IDX rp, dp;
179 dp = ctrl_if_rxmsg_deferred_prod;
180 rp = ctrl_if->rx_req_prod;
181 rmb(); /* Ensure we see all requests up to 'rp'. */
183 while ( ctrl_if_rx_req_cons != rp )
184 {
185 pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
186 memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
188 DPRINTK("Rx-Req %u/%u :: %d/%d\n",
189 ctrl_if_rx_req_cons-1,
190 ctrl_if->rx_req_prod,
191 msg.type, msg.subtype);
193 if ( msg.length != 0 )
194 memcpy(msg.msg, pmsg->msg, msg.length);
196 if ( test_bit(msg.type,
197 (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
198 memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
199 &msg, offsetof(ctrl_msg_t, msg) + msg.length);
200 else
201 (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
202 }
204 if ( dp != ctrl_if_rxmsg_deferred_prod )
205 {
206 wmb();
207 ctrl_if_rxmsg_deferred_prod = dp;
208 schedule_work(&ctrl_if_rxmsg_deferred_work);
209 }
210 }
212 static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
213 struct pt_regs *regs)
214 {
215 control_if_t *ctrl_if = get_ctrl_if();
217 if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
218 tasklet_schedule(&ctrl_if_tx_tasklet);
220 if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
221 tasklet_schedule(&ctrl_if_rx_tasklet);
223 return IRQ_HANDLED;
224 }
226 int
227 ctrl_if_send_message_noblock(
228 ctrl_msg_t *msg,
229 ctrl_msg_handler_t hnd,
230 unsigned long id)
231 {
232 control_if_t *ctrl_if = get_ctrl_if();
233 unsigned long flags;
234 int i;
236 spin_lock_irqsave(&ctrl_if_lock, flags);
238 if ( TX_FULL(ctrl_if) )
239 {
240 spin_unlock_irqrestore(&ctrl_if_lock, flags);
241 return -EAGAIN;
242 }
244 msg->id = 0xFF;
245 if ( hnd != NULL )
246 {
247 for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
248 continue;
249 ctrl_if_txmsg_id_mapping[i].fn = hnd;
250 ctrl_if_txmsg_id_mapping[i].id = id;
251 msg->id = i;
252 }
254 DPRINTK("Tx-Req %u/%u :: %d/%d\n",
255 ctrl_if->tx_req_prod,
256 ctrl_if_tx_resp_cons,
257 msg->type, msg->subtype);
259 memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)],
260 msg, sizeof(*msg));
261 wmb(); /* Write the message before letting the controller peek at it. */
262 ctrl_if->tx_req_prod++;
264 spin_unlock_irqrestore(&ctrl_if_lock, flags);
266 ctrl_if_notify_controller();
268 return 0;
269 }
271 int
272 ctrl_if_send_message_block(
273 ctrl_msg_t *msg,
274 ctrl_msg_handler_t hnd,
275 unsigned long id,
276 long wait_state)
277 {
278 DECLARE_WAITQUEUE(wait, current);
279 int rc;
281 /* Fast path. */
282 if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
283 return rc;
285 add_wait_queue(&ctrl_if_tx_wait, &wait);
287 for ( ; ; )
288 {
289 set_current_state(wait_state);
291 if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
292 break;
294 rc = -ERESTARTSYS;
295 if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
296 break;
298 schedule();
299 }
301 set_current_state(TASK_RUNNING);
302 remove_wait_queue(&ctrl_if_tx_wait, &wait);
304 return rc;
305 }
307 /* Allow a reponse-callback handler to find context of a blocked requester. */
308 struct rsp_wait {
309 ctrl_msg_t *msg; /* Buffer for the response message. */
310 struct task_struct *task; /* The task that is blocked on the response. */
311 int done; /* Indicate to 'task' that response is rcv'ed. */
312 };
314 static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
315 {
316 struct rsp_wait *wait = (struct rsp_wait *)id;
317 struct task_struct *task = wait->task;
319 memcpy(wait->msg, msg, sizeof(*msg));
320 wmb();
321 wait->done = 1;
323 wake_up_process(task);
324 }
326 int
327 ctrl_if_send_message_and_get_response(
328 ctrl_msg_t *msg,
329 ctrl_msg_t *rmsg,
330 long wait_state)
331 {
332 struct rsp_wait wait;
333 int rc;
335 wait.msg = rmsg;
336 wait.done = 0;
337 wait.task = current;
339 if ( (rc = ctrl_if_send_message_block(msg, __ctrl_if_get_response,
340 (unsigned long)&wait,
341 wait_state)) != 0 )
342 return rc;
344 for ( ; ; )
345 {
346 /* NB. Can't easily support TASK_INTERRUPTIBLE here. */
347 set_current_state(TASK_UNINTERRUPTIBLE);
348 if ( wait.done )
349 break;
350 schedule();
351 }
353 set_current_state(TASK_RUNNING);
354 return 0;
355 }
357 int
358 ctrl_if_enqueue_space_callback(
359 struct tq_struct *task)
360 {
361 control_if_t *ctrl_if = get_ctrl_if();
363 /* Fast path. */
364 if ( !TX_FULL(ctrl_if) )
365 return 0;
367 (void)queue_task(task, &ctrl_if_tx_tq);
369 /*
370 * We may race execution of the task queue, so return re-checked status. If
371 * the task is not executed despite the ring being non-full then we will
372 * certainly return 'not full'.
373 */
374 smp_mb();
375 return TX_FULL(ctrl_if);
376 }
378 void
379 ctrl_if_send_response(
380 ctrl_msg_t *msg)
381 {
382 control_if_t *ctrl_if = get_ctrl_if();
383 unsigned long flags;
384 ctrl_msg_t *dmsg;
386 /*
387 * NB. The response may the original request message, modified in-place.
388 * In this situation we may have src==dst, so no copying is required.
389 */
390 spin_lock_irqsave(&ctrl_if_lock, flags);
392 DPRINTK("Tx-Rsp %u :: %d/%d\n",
393 ctrl_if->rx_resp_prod,
394 msg->type, msg->subtype);
396 dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
397 if ( dmsg != msg )
398 memcpy(dmsg, msg, sizeof(*msg));
400 wmb(); /* Write the message before letting the controller peek at it. */
401 ctrl_if->rx_resp_prod++;
403 spin_unlock_irqrestore(&ctrl_if_lock, flags);
405 ctrl_if_notify_controller();
406 }
408 int
409 ctrl_if_register_receiver(
410 u8 type,
411 ctrl_msg_handler_t hnd,
412 unsigned int flags)
413 {
414 unsigned long _flags;
415 int inuse;
417 spin_lock_irqsave(&ctrl_if_lock, _flags);
419 inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
421 if ( inuse )
422 {
423 printk(KERN_INFO "Receiver %p already established for control "
424 "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
425 }
426 else
427 {
428 ctrl_if_rxmsg_handler[type] = hnd;
429 clear_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
430 if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
431 {
432 set_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
433 if ( !safe_to_schedule_task )
434 BUG();
435 }
436 }
438 spin_unlock_irqrestore(&ctrl_if_lock, _flags);
440 return !inuse;
441 }
443 void
444 ctrl_if_unregister_receiver(
445 u8 type,
446 ctrl_msg_handler_t hnd)
447 {
448 unsigned long flags;
450 spin_lock_irqsave(&ctrl_if_lock, flags);
452 if ( ctrl_if_rxmsg_handler[type] != hnd )
453 printk(KERN_INFO "Receiver %p is not registered for control "
454 "messages of type %d.\n", hnd, type);
455 else
456 ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
458 spin_unlock_irqrestore(&ctrl_if_lock, flags);
460 /* Ensure that @hnd will not be executed after this function returns. */
461 tasklet_unlock_wait(&ctrl_if_rx_tasklet);
462 }
464 void ctrl_if_suspend(void)
465 {
466 teardown_irq(ctrl_if_irq, &ctrl_if_irq_action);
467 unbind_evtchn_from_irq(ctrl_if_evtchn);
468 }
470 void ctrl_if_resume(void)
471 {
472 control_if_t *ctrl_if = get_ctrl_if();
474 if ( xen_start_info.flags & SIF_INITDOMAIN )
475 {
476 /*
477 * The initial domain must create its own domain-controller link.
478 * The controller is probably not running at this point, but will
479 * pick up its end of the event channel from
480 */
481 evtchn_op_t op;
482 op.cmd = EVTCHNOP_bind_interdomain;
483 op.u.bind_interdomain.dom1 = DOMID_SELF;
484 op.u.bind_interdomain.dom2 = DOMID_SELF;
485 op.u.bind_interdomain.port1 = 0;
486 op.u.bind_interdomain.port2 = 0;
487 if ( HYPERVISOR_event_channel_op(&op) != 0 )
488 BUG();
489 xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
490 initdom_ctrlif_domcontroller_port = op.u.bind_interdomain.port2;
491 }
493 /* Sync up with shared indexes. */
494 ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
495 ctrl_if_rx_req_cons = ctrl_if->rx_resp_prod;
497 ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
498 ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
500 memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
501 ctrl_if_irq_action.handler = ctrl_if_interrupt;
502 ctrl_if_irq_action.name = "ctrl-if";
503 (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
504 }
506 void __init ctrl_if_init(void)
507 {
508 int i;
510 for ( i = 0; i < 256; i++ )
511 ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
513 spin_lock_init(&ctrl_if_lock);
515 ctrl_if_resume();
516 }
519 /* This is called after it is safe to call schedule_task(). */
520 static int __init ctrl_if_late_setup(void)
521 {
522 safe_to_schedule_task = 1;
523 return 0;
524 }
525 __initcall(ctrl_if_late_setup);
528 /*
529 * !! The following are DANGEROUS FUNCTIONS !!
530 * Use with care [for example, see xencons_force_flush()].
531 */
533 int ctrl_if_transmitter_empty(void)
534 {
535 return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
536 }
538 void ctrl_if_discard_responses(void)
539 {
540 ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
541 }
543 EXPORT_SYMBOL(ctrl_if_send_message_noblock);
544 EXPORT_SYMBOL(ctrl_if_send_message_block);
545 EXPORT_SYMBOL(ctrl_if_send_message_and_get_response);
546 EXPORT_SYMBOL(ctrl_if_enqueue_space_callback);
547 EXPORT_SYMBOL(ctrl_if_send_response);
548 EXPORT_SYMBOL(ctrl_if_register_receiver);
549 EXPORT_SYMBOL(ctrl_if_unregister_receiver);