debuggers.hg

view netbsd-2.0-xen-sparse/sys/arch/xen/xen/ctrl_if.c @ 2630:654b2df93458

bitkeeper revision 1.1159.99.2 (41614eedRuLOjlI5-39Ib0z3OZYFgA)

g/c kthread code.
author cl349@freefall.cl.cam.ac.uk
date Mon Oct 04 13:23:57 2004 +0000 (2004-10-04)
parents aa75f00efa54
children a4fbb98f00cb
line source
1 /******************************************************************************
2 * ctrl_if.c
3 *
4 * Management functions for special interface to the domain controller.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD$");
12 #include <sys/param.h>
13 #include <sys/systm.h>
14 #include <sys/proc.h>
15 #include <sys/malloc.h>
17 #include <machine/xen.h>
18 #include <machine/hypervisor.h>
19 #include <machine/ctrl_if.h>
20 #include <machine/evtchn.h>
22 void printk(char *, ...);
23 #if 0
24 #define DPRINTK(_f, _a...) printk("(file=%s, line=%d) " _f, \
25 __FILE__ , __LINE__ , ## _a )
26 #else
27 #define DPRINTK(_f, _a...) ((void)0)
28 #endif
30 /*
31 * Only used by initial domain which must create its own control-interface
32 * event channel. This value is picked up by the user-space domain controller
33 * via an ioctl.
34 */
35 int initdom_ctrlif_domcontroller_port = -1;
37 /* static */ int ctrl_if_evtchn = -1;
38 static int ctrl_if_irq;
39 static struct simplelock ctrl_if_lock;
41 static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
42 static CONTROL_RING_IDX ctrl_if_rx_req_cons;
44 /* Incoming message requests. */
45 /* Primary message type -> message handler. */
46 static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
47 /* Primary message type -> callback in process context? */
48 static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
49 #if 0
50 /* Is it late enough during bootstrap to use schedule_task()? */
51 static int safe_to_schedule_task;
52 #endif
53 /* Queue up messages to be handled in process context. */
54 static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
55 static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
56 static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
58 /* Incoming message responses: message identifier -> message handler/id. */
59 static struct {
60 ctrl_msg_handler_t fn;
61 unsigned long id;
62 } ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
64 /* For received messages that must be deferred to process context. */
65 static void __ctrl_if_rxmsg_deferred(void *unused);
67 #ifdef notyet
68 /* Deferred callbacks for people waiting for space in the transmit ring. */
69 static int DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
70 #endif
72 static void *ctrl_if_softintr = NULL;
74 static int ctrl_if_tx_wait;
75 static void __ctrl_if_tx_tasklet(unsigned long data);
77 static void __ctrl_if_rx_tasklet(unsigned long data);
79 #define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
80 #define TX_FULL(_c) \
81 (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
83 static void ctrl_if_notify_controller(void)
84 {
85 hypervisor_notify_via_evtchn(ctrl_if_evtchn);
86 }
88 static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
89 {
90 msg->length = 0;
91 ctrl_if_send_response(msg);
92 }
94 static void __ctrl_if_tx_tasklet(unsigned long data)
95 {
96 control_if_t *ctrl_if = get_ctrl_if();
97 ctrl_msg_t *msg;
98 int was_full = TX_FULL(ctrl_if);
99 CONTROL_RING_IDX rp;
101 rp = ctrl_if->tx_resp_prod;
102 __insn_barrier(); /* Ensure we see all requests up to 'rp'. */
104 while ( ctrl_if_tx_resp_cons != rp )
105 {
106 msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
108 DPRINTK("Rx-Rsp %u/%u :: %d/%d\n",
109 ctrl_if_tx_resp_cons,
110 ctrl_if->tx_resp_prod,
111 msg->type, msg->subtype);
113 /* Execute the callback handler, if one was specified. */
114 if ( msg->id != 0xFF )
115 {
116 (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
117 msg, ctrl_if_txmsg_id_mapping[msg->id].id);
118 __insn_barrier(); /* Execute, /then/ free. */
119 ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
120 }
122 /*
123 * Step over the message in the ring /after/ finishing reading it. As
124 * soon as the index is updated then the message may get blown away.
125 */
126 __insn_barrier();
127 ctrl_if_tx_resp_cons++;
128 }
130 if ( was_full && !TX_FULL(ctrl_if) )
131 {
132 wakeup(&ctrl_if_tx_wait);
133 #ifdef notyet
134 run_task_queue(&ctrl_if_tx_tq);
135 #endif
136 }
137 }
139 static void __ctrl_if_rxmsg_deferred(void *unused)
140 {
141 ctrl_msg_t *msg;
142 CONTROL_RING_IDX dp;
144 dp = ctrl_if_rxmsg_deferred_prod;
145 __insn_barrier(); /* Ensure we see all deferred requests up to 'dp'. */
147 while ( ctrl_if_rxmsg_deferred_cons != dp )
148 {
149 msg = &ctrl_if_rxmsg_deferred[
150 MASK_CONTROL_IDX(ctrl_if_rxmsg_deferred_cons)];
151 (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
152 ctrl_if_rxmsg_deferred_cons++;
153 }
154 }
156 static void __ctrl_if_rx_tasklet(unsigned long data)
157 {
158 control_if_t *ctrl_if = get_ctrl_if();
159 ctrl_msg_t msg, *pmsg;
160 CONTROL_RING_IDX rp, dp;
162 dp = ctrl_if_rxmsg_deferred_prod;
163 rp = ctrl_if->rx_req_prod;
164 __insn_barrier(); /* Ensure we see all requests up to 'rp'. */
166 while ( ctrl_if_rx_req_cons != rp )
167 {
168 pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons)];
169 memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
171 DPRINTK("Rx-Req %u/%u :: %d/%d\n",
172 ctrl_if_rx_req_cons-1,
173 ctrl_if->rx_req_prod,
174 msg.type, msg.subtype);
176 if ( msg.length != 0 )
177 memcpy(msg.msg, pmsg->msg, msg.length);
179 if ( x86_atomic_test_bit(
180 (unsigned long *)&ctrl_if_rxmsg_blocking_context,
181 msg.type) )
182 memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
183 &msg, offsetof(ctrl_msg_t, msg) + msg.length);
184 else
185 (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
187 ctrl_if_rx_req_cons++;
188 }
190 if ( dp != ctrl_if_rxmsg_deferred_prod )
191 {
192 __insn_barrier();
193 ctrl_if_rxmsg_deferred_prod = dp;
194 if (ctrl_if_softintr)
195 softintr_schedule(ctrl_if_softintr);
196 }
197 }
199 static int ctrl_if_interrupt(void *arg)
200 {
201 control_if_t *ctrl_if = get_ctrl_if();
203 if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
204 __ctrl_if_tx_tasklet(0);
206 if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
207 __ctrl_if_rx_tasklet(0);
209 return 0;
210 }
212 int
213 ctrl_if_send_message_noblock(
214 ctrl_msg_t *msg,
215 ctrl_msg_handler_t hnd,
216 unsigned long id)
217 {
218 control_if_t *ctrl_if = get_ctrl_if();
219 unsigned long flags;
220 int i;
221 int s;
223 save_and_cli(flags);
224 simple_lock(&ctrl_if_lock);
226 if ( TX_FULL(ctrl_if) )
227 {
228 simple_unlock(&ctrl_if_lock);
229 restore_flags(flags);
230 s = splhigh();
231 if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
232 __ctrl_if_tx_tasklet(0);
233 splx(s);
234 return EAGAIN;
235 }
237 msg->id = 0xFF;
238 if ( hnd != NULL )
239 {
240 for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
241 continue;
242 ctrl_if_txmsg_id_mapping[i].fn = hnd;
243 ctrl_if_txmsg_id_mapping[i].id = id;
244 msg->id = i;
245 }
247 DPRINTK("Tx-Req %u/%u :: %d/%d\n",
248 ctrl_if->tx_req_prod,
249 ctrl_if_tx_resp_cons,
250 msg->type, msg->subtype);
252 memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)],
253 msg, sizeof(*msg));
254 __insn_barrier(); /* Write the message before letting the controller peek at it. */
255 ctrl_if->tx_req_prod++;
257 simple_unlock(&ctrl_if_lock);
258 restore_flags(flags);
260 ctrl_if_notify_controller();
262 return 0;
263 }
265 int
266 ctrl_if_send_message_block(
267 ctrl_msg_t *msg,
268 ctrl_msg_handler_t hnd,
269 unsigned long id,
270 long wait_state)
271 {
272 int rc;
274 while ((rc = ctrl_if_send_message_noblock(msg, hnd, id)) == EAGAIN) {
275 /* XXXcl possible race -> add a lock and ltsleep */
276 #if 1
277 HYPERVISOR_yield();
278 #else
279 rc = tsleep((caddr_t) &ctrl_if_tx_wait, PUSER | PCATCH,
280 "ctrl_if", 0);
281 if (rc)
282 break;
283 #endif
284 }
286 return rc;
287 }
289 /* Allow a reponse-callback handler to find context of a blocked requester. */
290 struct rsp_wait {
291 ctrl_msg_t *msg; /* Buffer for the response message. */
292 struct task_struct *task; /* The task that is blocked on the response. */
293 int done; /* Indicate to 'task' that response is rcv'ed. */
294 };
296 static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
297 {
298 struct rsp_wait *wait = (struct rsp_wait *)id;
300 memcpy(wait->msg, msg, sizeof(*msg));
301 __insn_barrier();
302 wait->done = 1;
304 wakeup(wait);
305 }
307 int
308 ctrl_if_send_message_and_get_response(
309 ctrl_msg_t *msg,
310 ctrl_msg_t *rmsg,
311 long wait_state)
312 {
313 struct rsp_wait wait;
314 int rc;
316 wait.msg = rmsg;
317 wait.done = 0;
319 if ( (rc = ctrl_if_send_message_block(msg, __ctrl_if_get_response,
320 (unsigned long)&wait,
321 wait_state)) != 0 )
322 return rc;
324 for ( ; ; )
325 {
326 if ( wait.done )
327 break;
328 tsleep((caddr_t)&wait, PUSER | PCATCH, "ctrl_if", 0);
329 }
331 return 0;
332 }
334 #ifdef notyet
335 int
336 ctrl_if_enqueue_space_callback(
337 struct tq_struct *task)
338 {
339 control_if_t *ctrl_if = get_ctrl_if();
341 /* Fast path. */
342 if ( !TX_FULL(ctrl_if) )
343 return 0;
345 (void)queue_task(task, &ctrl_if_tx_tq);
347 /*
348 * We may race execution of the task queue, so return re-checked status. If
349 * the task is not executed despite the ring being non-full then we will
350 * certainly return 'not full'.
351 */
352 __insn_barrier();
353 return TX_FULL(ctrl_if);
354 }
355 #endif
357 void
358 ctrl_if_send_response(
359 ctrl_msg_t *msg)
360 {
361 control_if_t *ctrl_if = get_ctrl_if();
362 unsigned long flags;
363 ctrl_msg_t *dmsg;
365 /*
366 * NB. The response may the original request message, modified in-place.
367 * In this situation we may have src==dst, so no copying is required.
368 */
369 save_and_cli(flags);
370 simple_lock(&ctrl_if_lock);
372 DPRINTK("Tx-Rsp %u :: %d/%d\n",
373 ctrl_if->rx_resp_prod,
374 msg->type, msg->subtype);
376 dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
377 if ( dmsg != msg )
378 memcpy(dmsg, msg, sizeof(*msg));
380 __insn_barrier(); /* Write the message before letting the controller peek at it. */
381 ctrl_if->rx_resp_prod++;
383 simple_unlock(&ctrl_if_lock);
384 restore_flags(flags);
386 ctrl_if_notify_controller();
387 }
389 int
390 ctrl_if_register_receiver(
391 uint8_t type,
392 ctrl_msg_handler_t hnd,
393 unsigned int flags)
394 {
395 unsigned long _flags;
396 int inuse;
398 save_and_cli(_flags);
399 simple_lock(&ctrl_if_lock);
401 inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
403 if ( inuse )
404 {
405 printf("Receiver %p already established for control "
406 "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
407 }
408 else
409 {
410 ctrl_if_rxmsg_handler[type] = hnd;
411 x86_atomic_clear_bit((unsigned long *)&ctrl_if_rxmsg_blocking_context, type);
412 if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
413 {
414 x86_atomic_set_bit((unsigned long *)&ctrl_if_rxmsg_blocking_context, type);
415 #if 0
416 if ( !safe_to_schedule_task )
417 BUG();
418 #endif
419 }
420 }
422 simple_unlock(&ctrl_if_lock);
423 restore_flags(_flags);
425 return !inuse;
426 }
428 void
429 ctrl_if_unregister_receiver(
430 uint8_t type,
431 ctrl_msg_handler_t hnd)
432 {
433 unsigned long flags;
435 save_and_cli(flags);
436 simple_lock(&ctrl_if_lock);
438 if ( ctrl_if_rxmsg_handler[type] != hnd )
439 printf("Receiver %p is not registered for control "
440 "messages of type %d.\n", hnd, type);
441 else
442 ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
444 simple_unlock(&ctrl_if_lock);
445 restore_flags(flags);
447 /* Ensure that @hnd will not be executed after this function returns. */
448 if (ctrl_if_softintr)
449 softintr_schedule(ctrl_if_softintr);
450 }
452 static void
453 ctrl_if_softintr_handler(void *arg)
454 {
456 if ( ctrl_if_rxmsg_deferred_cons != ctrl_if_rxmsg_deferred_prod )
457 __ctrl_if_rxmsg_deferred(NULL);
458 }
460 #ifdef notyet
461 void ctrl_if_suspend(void)
462 {
463 free_irq(ctrl_if_irq, NULL);
464 unbind_evtchn_from_irq(ctrl_if_evtchn);
465 }
466 #endif
468 void ctrl_if_resume(void)
469 {
470 control_if_t *ctrl_if = get_ctrl_if();
472 if ( xen_start_info.flags & SIF_INITDOMAIN )
473 {
474 /*
475 * The initial domain must create its own domain-controller link.
476 * The controller is probably not running at this point, but will
477 * pick up its end of the event channel from
478 */
479 evtchn_op_t op;
480 op.cmd = EVTCHNOP_bind_interdomain;
481 op.u.bind_interdomain.dom1 = DOMID_SELF;
482 op.u.bind_interdomain.dom2 = DOMID_SELF;
483 if ( HYPERVISOR_event_channel_op(&op) != 0 )
484 panic("EVTCHNOP_bind_interdomain");
485 xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
486 initdom_ctrlif_domcontroller_port = op.u.bind_interdomain.port2;
487 }
489 /* Sync up with shared indexes. */
490 ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
491 ctrl_if_rx_req_cons = ctrl_if->rx_resp_prod;
493 ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
494 ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
496 event_set_handler(ctrl_if_irq, &ctrl_if_interrupt, NULL, IPL_HIGH);
497 hypervisor_enable_irq(ctrl_if_irq);
498 }
500 void ctrl_if_early_init(void)
501 {
503 simple_lock_init(&ctrl_if_lock);
505 ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
506 }
508 void ctrl_if_init(void)
509 {
510 int i;
512 for ( i = 0; i < 256; i++ )
513 ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
515 if (ctrl_if_evtchn == -1)
516 ctrl_if_early_init();
518 ctrl_if_softintr = softintr_establish(IPL_SOFTNET,
519 ctrl_if_softintr_handler, NULL);
521 ctrl_if_resume();
522 }
525 #if 0
526 /* This is called after it is safe to call schedule_task(). */
527 static int __init ctrl_if_late_setup(void)
528 {
529 safe_to_schedule_task = 1;
530 return 0;
531 }
532 __initcall(ctrl_if_late_setup);
533 #endif
536 /*
537 * !! The following are DANGEROUS FUNCTIONS !!
538 * Use with care [for example, see xencons_force_flush()].
539 */
541 int ctrl_if_transmitter_empty(void)
542 {
543 return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
544 }
546 void ctrl_if_discard_responses(void)
547 {
548 ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
549 }