debuggers.hg

view xen/common/sched_arinc653.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 45b705c5fe49
children
line source
1 /******************************************************************************
2 * sched_arinc653.c
3 *
4 * An ARINC653-compatible scheduling algorithm for use in Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2010, DornerWorks, Ltd. <DornerWorks.com>
25 */
27 #include <xen/config.h>
28 #include <xen/lib.h>
29 #include <xen/sched.h>
30 #include <xen/sched-if.h>
31 #include <xen/timer.h>
32 #include <xen/softirq.h>
33 #include <xen/time.h>
34 #include <xen/errno.h>
35 #include <xen/list.h>
36 #include <xen/guest_access.h>
37 #include <public/sysctl.h>
39 /**************************************************************************
40 * Private Macros *
41 **************************************************************************/
43 /**
44 * Retrieve the idle VCPU for a given physical CPU
45 */
46 #define IDLETASK(cpu) (idle_vcpu[cpu])
48 /**
49 * Return a pointer to the ARINC 653-specific scheduler data information
50 * associated with the given VCPU (vc)
51 */
52 #define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)
54 /**
55 * Return the global scheduler private data given the scheduler ops pointer
56 */
57 #define SCHED_PRIV(s) ((a653sched_priv_t *)((s)->sched_data))
59 /**************************************************************************
60 * Private Type Definitions *
61 **************************************************************************/
63 /**
64 * The arinc653_vcpu_t structure holds ARINC 653-scheduler-specific
65 * information for all non-idle VCPUs
66 */
67 typedef struct arinc653_vcpu_s
68 {
69 /* vc points to Xen's struct vcpu so we can get to it from an
70 * arinc653_vcpu_t pointer. */
71 struct vcpu * vc;
72 /* awake holds whether the VCPU has been woken with vcpu_wake() */
73 bool_t awake;
74 /* list holds the linked list information for the list this VCPU
75 * is stored in */
76 struct list_head list;
77 } arinc653_vcpu_t;
79 /**
80 * The sched_entry_t structure holds a single entry of the
81 * ARINC 653 schedule.
82 */
83 typedef struct sched_entry_s
84 {
85 /* dom_handle holds the handle ("UUID") for the domain that this
86 * schedule entry refers to. */
87 xen_domain_handle_t dom_handle;
88 /* vcpu_id holds the VCPU number for the VCPU that this schedule
89 * entry refers to. */
90 int vcpu_id;
91 /* runtime holds the number of nanoseconds that the VCPU for this
92 * schedule entry should be allowed to run per major frame. */
93 s_time_t runtime;
94 /* vc holds a pointer to the Xen VCPU structure */
95 struct vcpu * vc;
96 } sched_entry_t;
98 /**
99 * This structure defines data that is global to an instance of the scheduler
100 */
101 typedef struct a653sched_priv_s
102 {
103 /**
104 * This array holds the active ARINC 653 schedule.
105 *
106 * When the system tries to start a new VCPU, this schedule is scanned
107 * to look for a matching (handle, VCPU #) pair. If both the handle (UUID)
108 * and VCPU number match, then the VCPU is allowed to run. Its run time
109 * (per major frame) is given in the third entry of the schedule.
110 */
111 sched_entry_t schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
113 /**
114 * This variable holds the number of entries that are valid in
115 * the arinc653_schedule table.
116 *
117 * This is not necessarily the same as the number of domains in the
118 * schedule. A domain could be listed multiple times within the schedule,
119 * or a domain with multiple VCPUs could have a different
120 * schedule entry for each VCPU.
121 */
122 int num_schedule_entries;
124 /**
125 * the major frame time for the ARINC 653 schedule.
126 */
127 s_time_t major_frame;
129 /**
130 * the time that the next major frame starts
131 */
132 s_time_t next_major_frame;
134 /**
135 * pointers to all Xen VCPU structures for iterating through
136 */
137 struct list_head vcpu_list;
138 } a653sched_priv_t;
140 /**************************************************************************
141 * Helper functions *
142 **************************************************************************/
144 /**
145 * This function compares two domain handles.
146 *
147 * @param h1 Pointer to handle 1
148 * @param h2 Pointer to handle 2
149 *
150 * @return <ul>
151 * <li> <0: handle 1 is less than handle 2
152 * <li> 0: handle 1 is equal to handle 2
153 * <li> >0: handle 1 is greater than handle 2
154 * </ul>
155 */
156 static int dom_handle_cmp(const xen_domain_handle_t h1,
157 const xen_domain_handle_t h2)
158 {
159 return memcmp(h1, h2, sizeof(xen_domain_handle_t));
160 }
162 /**
163 * This function searches the vcpu list to find a VCPU that matches
164 * the domain handle and VCPU ID specified.
165 *
166 * @param ops Pointer to this instance of the scheduler structure
167 * @param handle Pointer to handler
168 * @param vcpu_id VCPU ID
169 *
170 * @return <ul>
171 * <li> Pointer to the matching VCPU if one is found
172 * <li> NULL otherwise
173 * </ul>
174 */
175 static struct vcpu *find_vcpu(
176 const struct scheduler *ops,
177 xen_domain_handle_t handle,
178 int vcpu_id)
179 {
180 arinc653_vcpu_t *avcpu;
182 /* loop through the vcpu_list looking for the specified VCPU */
183 list_for_each_entry ( avcpu, &SCHED_PRIV(ops)->vcpu_list, list )
184 if ( (dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0)
185 && (vcpu_id == avcpu->vc->vcpu_id) )
186 return avcpu->vc;
188 return NULL;
189 }
191 /**
192 * This function updates the pointer to the Xen VCPU structure for each entry
193 * in the ARINC 653 schedule.
194 *
195 * @param ops Pointer to this instance of the scheduler structure
196 * @return <None>
197 */
198 static void update_schedule_vcpus(const struct scheduler *ops)
199 {
200 unsigned int i, n_entries = SCHED_PRIV(ops)->num_schedule_entries;
202 for ( i = 0; i < n_entries; i++ )
203 SCHED_PRIV(ops)->schedule[i].vc =
204 find_vcpu(ops,
205 SCHED_PRIV(ops)->schedule[i].dom_handle,
206 SCHED_PRIV(ops)->schedule[i].vcpu_id);
207 }
209 /**
210 * This function is called by the adjust_global scheduler hook to put
211 * in place a new ARINC653 schedule.
212 *
213 * @param ops Pointer to this instance of the scheduler structure
214 *
215 * @return <ul>
216 * <li> 0 = success
217 * <li> !0 = error
218 * </ul>
219 */
220 static int
221 arinc653_sched_set(
222 const struct scheduler *ops,
223 struct xen_sysctl_arinc653_schedule *schedule)
224 {
225 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
226 s_time_t total_runtime = 0;
227 bool_t found_dom0 = 0;
228 const static xen_domain_handle_t dom0_handle = {0};
229 unsigned int i;
231 /* Check for valid major frame and number of schedule entries. */
232 if ( (schedule->major_frame <= 0)
233 || (schedule->num_sched_entries < 1)
234 || (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
235 goto fail;
237 for ( i = 0; i < schedule->num_sched_entries; i++ )
238 {
239 if ( dom_handle_cmp(schedule->sched_entries[i].dom_handle,
240 dom0_handle) == 0 )
241 found_dom0 = 1;
243 /* Check for a valid VCPU ID and run time. */
244 if ( (schedule->sched_entries[i].vcpu_id >= MAX_VIRT_CPUS)
245 || (schedule->sched_entries[i].runtime <= 0) )
246 goto fail;
248 /* Add this entry's run time to total run time. */
249 total_runtime += schedule->sched_entries[i].runtime;
250 }
252 /* Error if the schedule doesn't contain a slot for domain 0. */
253 if ( !found_dom0 )
254 goto fail;
256 /*
257 * Error if the major frame is not large enough to run all entries as
258 * indicated by comparing the total run time to the major frame length.
259 */
260 if ( total_runtime > schedule->major_frame )
261 goto fail;
263 /* Copy the new schedule into place. */
264 sched_priv->num_schedule_entries = schedule->num_sched_entries;
265 sched_priv->major_frame = schedule->major_frame;
266 for ( i = 0; i < schedule->num_sched_entries; i++ )
267 {
268 memcpy(sched_priv->schedule[i].dom_handle,
269 schedule->sched_entries[i].dom_handle,
270 sizeof(sched_priv->schedule[i].dom_handle));
271 sched_priv->schedule[i].vcpu_id =
272 schedule->sched_entries[i].vcpu_id;
273 sched_priv->schedule[i].runtime =
274 schedule->sched_entries[i].runtime;
275 }
276 update_schedule_vcpus(ops);
278 /*
279 * The newly-installed schedule takes effect immediately. We do not even
280 * wait for the current major frame to expire.
281 *
282 * Signal a new major frame to begin. The next major frame is set up by
283 * the do_schedule callback function when it is next invoked.
284 */
285 sched_priv->next_major_frame = NOW();
287 return 0;
289 fail:
290 return -EINVAL;
291 }
293 /**
294 * This function is called by the adjust_global scheduler hook to read the
295 * current ARINC 653 schedule
296 *
297 * @param ops Pointer to this instance of the scheduler structure
298 * @return <ul>
299 * <li> 0 = success
300 * <li> !0 = error
301 * </ul>
302 */
303 static int
304 arinc653_sched_get(
305 const struct scheduler *ops,
306 struct xen_sysctl_arinc653_schedule *schedule)
307 {
308 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
309 unsigned int i;
311 schedule->num_sched_entries = sched_priv->num_schedule_entries;
312 schedule->major_frame = sched_priv->major_frame;
313 for ( i = 0; i < sched_priv->num_schedule_entries; i++ )
314 {
315 memcpy(schedule->sched_entries[i].dom_handle,
316 sched_priv->schedule[i].dom_handle,
317 sizeof(sched_priv->schedule[i].dom_handle));
318 schedule->sched_entries[i].vcpu_id = sched_priv->schedule[i].vcpu_id;
319 schedule->sched_entries[i].runtime = sched_priv->schedule[i].runtime;
320 }
322 return 0;
323 }
325 /**************************************************************************
326 * Scheduler callback functions *
327 **************************************************************************/
329 /**
330 * This function performs initialization for an instance of the scheduler.
331 *
332 * @param ops Pointer to this instance of the scheduler structure
333 *
334 * @return <ul>
335 * <li> 0 = success
336 * <li> !0 = error
337 * </ul>
338 */
339 static int
340 a653sched_init(struct scheduler *ops)
341 {
342 a653sched_priv_t *prv;
344 prv = xmalloc(a653sched_priv_t);
345 if ( prv == NULL )
346 return -ENOMEM;
348 memset(prv, 0, sizeof(*prv));
349 ops->sched_data = prv;
351 prv->schedule[0].dom_handle[0] = '\0';
352 prv->schedule[0].vcpu_id = 0;
353 prv->schedule[0].runtime = MILLISECS(10);
354 prv->schedule[0].vc = NULL;
355 prv->num_schedule_entries = 1;
356 prv->major_frame = MILLISECS(10);
357 prv->next_major_frame = 0;
358 INIT_LIST_HEAD(&prv->vcpu_list);
360 return 0;
361 }
363 /**
364 * This function performs deinitialization for an instance of the scheduler
365 *
366 * @param ops Pointer to this instance of the scheduler structure
367 */
368 static void
369 a653sched_deinit(const struct scheduler *ops)
370 {
371 xfree(SCHED_PRIV(ops));
372 }
374 /**
375 * This function allocates scheduler-specific data for a VCPU
376 *
377 * @param ops Pointer to this instance of the scheduler structure
378 *
379 * @return Pointer to the allocated data
380 */
381 static void *
382 a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
383 {
384 /*
385 * Allocate memory for the ARINC 653-specific scheduler data information
386 * associated with the given VCPU (vc).
387 */
388 if ( (vc->sched_priv = xmalloc(arinc653_vcpu_t)) == NULL )
389 return NULL;
391 /*
392 * Initialize our ARINC 653 scheduler-specific information for the VCPU.
393 * The VCPU starts "asleep." When Xen is ready for the VCPU to run, it
394 * will call the vcpu_wake scheduler callback function and our scheduler
395 * will mark the VCPU awake.
396 */
397 AVCPU(vc)->vc = vc;
398 AVCPU(vc)->awake = 0;
399 if ( !is_idle_vcpu(vc) )
400 list_add(&AVCPU(vc)->list, &SCHED_PRIV(ops)->vcpu_list);
401 update_schedule_vcpus(ops);
403 return AVCPU(vc);
404 }
406 /**
407 * This function frees scheduler-specific VCPU data
408 *
409 * @param ops Pointer to this instance of the scheduler structure
410 */
411 static void
412 a653sched_free_vdata(const struct scheduler *ops, void *priv)
413 {
414 arinc653_vcpu_t *av = priv;
416 if (av == NULL)
417 return;
419 list_del(&av->list);
420 xfree(av);
421 update_schedule_vcpus(ops);
422 }
424 /**
425 * This function allocates scheduler-specific data for a physical CPU
426 *
427 * We do not actually make use of any per-CPU data but the hypervisor expects
428 * a non-NULL return value
429 *
430 * @param ops Pointer to this instance of the scheduler structure
431 *
432 * @return Pointer to the allocated data
433 */
434 static void *
435 a653sched_alloc_pdata(const struct scheduler *ops, int cpu)
436 {
437 /* return a non-NULL value to keep schedule.c happy */
438 return SCHED_PRIV(ops);
439 }
441 /**
442 * This function frees scheduler-specific data for a physical CPU
443 *
444 * @param ops Pointer to this instance of the scheduler structure
445 */
446 static void
447 a653sched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
448 {
449 /* nop */
450 }
452 /**
453 * This function allocates scheduler-specific data for a domain
454 *
455 * We do not actually make use of any per-domain data but the hypervisor
456 * expects a non-NULL return value
457 *
458 * @param ops Pointer to this instance of the scheduler structure
459 *
460 * @return Pointer to the allocated data
461 */
462 static void *
463 a653sched_alloc_domdata(const struct scheduler *ops, struct domain *dom)
464 {
465 /* return a non-NULL value to keep schedule.c happy */
466 return SCHED_PRIV(ops);
467 }
469 /**
470 * This function frees scheduler-specific data for a domain
471 *
472 * @param ops Pointer to this instance of the scheduler structure
473 */
474 static void
475 a653sched_free_domdata(const struct scheduler *ops, void *data)
476 {
477 /* nop */
478 }
480 /**
481 * Xen scheduler callback function to sleep a VCPU
482 *
483 * @param ops Pointer to this instance of the scheduler structure
484 * @param vc Pointer to the VCPU structure for the current domain
485 */
486 static void
487 a653sched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
488 {
489 if ( AVCPU(vc) != NULL )
490 AVCPU(vc)->awake = 0;
492 /*
493 * If the VCPU being put to sleep is the same one that is currently
494 * running, raise a softirq to invoke the scheduler to switch domains.
495 */
496 if ( per_cpu(schedule_data, vc->processor).curr == vc )
497 cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
498 }
500 /**
501 * Xen scheduler callback function to wake up a VCPU
502 *
503 * @param ops Pointer to this instance of the scheduler structure
504 * @param vc Pointer to the VCPU structure for the current domain
505 */
506 static void
507 a653sched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
508 {
509 if ( AVCPU(vc) != NULL )
510 AVCPU(vc)->awake = 1;
512 cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
513 }
515 /**
516 * Xen scheduler callback function to select a VCPU to run.
517 * This is the main scheduler routine.
518 *
519 * @param ops Pointer to this instance of the scheduler structure
520 * @param now Current time
521 *
522 * @return Address of the VCPU structure scheduled to be run next
523 * Amount of time to execute the returned VCPU
524 * Flag for whether the VCPU was migrated
525 */
526 static struct task_slice
527 a653sched_do_schedule(
528 const struct scheduler *ops,
529 s_time_t now,
530 bool_t tasklet_work_scheduled)
531 {
532 struct task_slice ret; /* hold the chosen domain */
533 struct vcpu * new_task = NULL;
534 static int sched_index = 0;
535 static s_time_t next_switch_time;
536 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
538 if ( now >= sched_priv->next_major_frame )
539 {
540 /* time to enter a new major frame
541 * the first time this function is called, this will be true */
542 /* start with the first domain in the schedule */
543 sched_index = 0;
544 sched_priv->next_major_frame = now + sched_priv->major_frame;
545 next_switch_time = now + sched_priv->schedule[0].runtime;
546 }
547 else
548 {
549 while ( (now >= next_switch_time)
550 && (sched_index < sched_priv->num_schedule_entries) )
551 {
552 /* time to switch to the next domain in this major frame */
553 sched_index++;
554 next_switch_time += sched_priv->schedule[sched_index].runtime;
555 }
556 }
558 /*
559 * If we exhausted the domains in the schedule and still have time left
560 * in the major frame then switch next at the next major frame.
561 */
562 if ( sched_index >= sched_priv->num_schedule_entries )
563 next_switch_time = sched_priv->next_major_frame;
565 /*
566 * If there are more domains to run in the current major frame, set
567 * new_task equal to the address of next domain's VCPU structure.
568 * Otherwise, set new_task equal to the address of the idle task's VCPU
569 * structure.
570 */
571 new_task = (sched_index < sched_priv->num_schedule_entries)
572 ? sched_priv->schedule[sched_index].vc
573 : IDLETASK(0);
575 /* Check to see if the new task can be run (awake & runnable). */
576 if ( !((new_task != NULL)
577 && (AVCPU(new_task) != NULL)
578 && AVCPU(new_task)->awake
579 && vcpu_runnable(new_task)) )
580 new_task = IDLETASK(0);
581 BUG_ON(new_task == NULL);
583 /*
584 * Check to make sure we did not miss a major frame.
585 * This is a good test for robust partitioning.
586 */
587 BUG_ON(now >= sched_priv->next_major_frame);
589 /* Tasklet work (which runs in idle VCPU context) overrides all else. */
590 if ( tasklet_work_scheduled )
591 new_task = IDLETASK(0);
593 /*
594 * Return the amount of time the next domain has to run and the address
595 * of the selected task's VCPU structure.
596 */
597 ret.time = next_switch_time - now;
598 ret.task = new_task;
599 ret.migrated = 0; /* we do not support migration */
601 BUG_ON(ret.time <= 0);
603 return ret;
604 }
606 /**
607 * Xen scheduler callback function to select a CPU for the VCPU to run on
608 *
609 * @param ops Pointer to this instance of the scheduler structure
610 * @param v Pointer to the VCPU structure for the current domain
611 *
612 * @return Number of selected physical CPU
613 */
614 static int
615 a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc)
616 {
617 /* this implementation only supports one physical CPU */
618 return 0;
619 }
621 /**
622 * Xen scheduler callback function to perform a global (not domain-specific)
623 * adjustment. It is used by the ARINC 653 scheduler to put in place a new
624 * ARINC 653 schedule or to retrieve the schedule currently in place.
625 *
626 * @param ops Pointer to this instance of the scheduler structure
627 * @param sc Pointer to the scheduler operation specified by Domain 0
628 */
629 static int
630 a653sched_adjust_global(const struct scheduler *ops,
631 struct xen_sysctl_scheduler_op *sc)
632 {
633 xen_sysctl_arinc653_schedule_t local_sched;
634 int rc = -EINVAL;
636 switch ( sc->cmd )
637 {
638 case XEN_SYSCTL_SCHEDOP_putinfo:
639 copy_from_guest(&local_sched, sc->u.sched_arinc653.schedule, 1);
640 rc = arinc653_sched_set(ops, &local_sched);
641 break;
642 case XEN_SYSCTL_SCHEDOP_getinfo:
643 rc = arinc653_sched_get(ops, &local_sched);
644 copy_to_guest(sc->u.sched_arinc653.schedule, &local_sched, 1);
645 break;
646 }
648 return rc;
649 }
651 /**
652 * This structure defines our scheduler for Xen.
653 * The entries tell Xen where to find our scheduler-specific
654 * callback functions.
655 * The symbol must be visible to the rest of Xen at link time.
656 */
657 struct scheduler sched_arinc653_def = {
658 .name = "ARINC 653 Scheduler",
659 .opt_name = "arinc653",
660 .sched_id = XEN_SCHEDULER_ARINC653,
661 .sched_data = NULL,
663 .init = a653sched_init,
664 .deinit = a653sched_deinit,
666 .free_vdata = a653sched_free_vdata,
667 .alloc_vdata = a653sched_alloc_vdata,
669 .free_pdata = a653sched_free_pdata,
670 .alloc_pdata = a653sched_alloc_pdata,
672 .free_domdata = a653sched_free_domdata,
673 .alloc_domdata = a653sched_alloc_domdata,
675 .init_domain = NULL,
676 .destroy_domain = NULL,
678 .insert_vcpu = NULL,
679 .remove_vcpu = NULL,
681 .sleep = a653sched_vcpu_sleep,
682 .wake = a653sched_vcpu_wake,
683 .yield = NULL,
684 .context_saved = NULL,
686 .do_schedule = a653sched_do_schedule,
688 .pick_cpu = a653sched_pick_cpu,
690 .adjust = NULL,
691 .adjust_global = a653sched_adjust_global,
693 .dump_settings = NULL,
694 .dump_cpu_state = NULL,
696 .tick_suspend = NULL,
697 .tick_resume = NULL,
698 };