debuggers.hg

annotate xen/common/sched_arinc653.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 45b705c5fe49
children
rev   line source
keir@22501 1 /******************************************************************************
keir@22501 2 * sched_arinc653.c
keir@22501 3 *
keir@22501 4 * An ARINC653-compatible scheduling algorithm for use in Xen.
keir@22501 5 *
keir@22501 6 * Permission is hereby granted, free of charge, to any person obtaining a copy
keir@22501 7 * of this software and associated documentation files (the "Software"), to
keir@22501 8 * deal in the Software without restriction, including without limitation the
keir@22501 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
keir@22501 10 * sell copies of the Software, and to permit persons to whom the Software is
keir@22501 11 * furnished to do so, subject to the following conditions:
keir@22501 12 *
keir@22501 13 * The above copyright notice and this permission notice shall be included in
keir@22501 14 * all copies or substantial portions of the Software.
keir@22501 15 *
keir@22501 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
keir@22501 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
keir@22501 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
keir@22501 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
keir@22501 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
keir@22501 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
keir@22501 22 * DEALINGS IN THE SOFTWARE.
keir@22501 23 *
keir@22501 24 * Copyright (c) 2010, DornerWorks, Ltd. <DornerWorks.com>
keir@22501 25 */
keir@22501 26
keir@22501 27 #include <xen/config.h>
keir@22501 28 #include <xen/lib.h>
keir@22501 29 #include <xen/sched.h>
keir@22501 30 #include <xen/sched-if.h>
keir@22501 31 #include <xen/timer.h>
keir@22501 32 #include <xen/softirq.h>
keir@22501 33 #include <xen/time.h>
keir@22501 34 #include <xen/errno.h>
keir@22501 35 #include <xen/list.h>
keir@22501 36 #include <xen/guest_access.h>
keir@22501 37 #include <public/sysctl.h>
keir@22501 38
keir@22501 39 /**************************************************************************
keir@22501 40 * Private Macros *
keir@22501 41 **************************************************************************/
keir@22501 42
keir@22501 43 /**
keir@22501 44 * Retrieve the idle VCPU for a given physical CPU
keir@22501 45 */
keir@22501 46 #define IDLETASK(cpu) (idle_vcpu[cpu])
keir@22501 47
keir@22501 48 /**
keir@22501 49 * Return a pointer to the ARINC 653-specific scheduler data information
keir@22501 50 * associated with the given VCPU (vc)
keir@22501 51 */
keir@22501 52 #define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)
keir@22501 53
keir@22501 54 /**
keir@22501 55 * Return the global scheduler private data given the scheduler ops pointer
keir@22501 56 */
keir@22501 57 #define SCHED_PRIV(s) ((a653sched_priv_t *)((s)->sched_data))
keir@22501 58
keir@22501 59 /**************************************************************************
keir@22501 60 * Private Type Definitions *
keir@22501 61 **************************************************************************/
keir@22501 62
keir@22501 63 /**
keir@22501 64 * The arinc653_vcpu_t structure holds ARINC 653-scheduler-specific
keir@22501 65 * information for all non-idle VCPUs
keir@22501 66 */
keir@22501 67 typedef struct arinc653_vcpu_s
keir@22501 68 {
keir@22501 69 /* vc points to Xen's struct vcpu so we can get to it from an
keir@22501 70 * arinc653_vcpu_t pointer. */
keir@22501 71 struct vcpu * vc;
keir@22501 72 /* awake holds whether the VCPU has been woken with vcpu_wake() */
keir@22501 73 bool_t awake;
keir@22501 74 /* list holds the linked list information for the list this VCPU
keir@22501 75 * is stored in */
keir@22501 76 struct list_head list;
keir@22501 77 } arinc653_vcpu_t;
keir@22501 78
keir@22501 79 /**
keir@22501 80 * The sched_entry_t structure holds a single entry of the
keir@22501 81 * ARINC 653 schedule.
keir@22501 82 */
keir@22501 83 typedef struct sched_entry_s
keir@22501 84 {
keir@22501 85 /* dom_handle holds the handle ("UUID") for the domain that this
keir@22501 86 * schedule entry refers to. */
keir@22501 87 xen_domain_handle_t dom_handle;
keir@22501 88 /* vcpu_id holds the VCPU number for the VCPU that this schedule
keir@22501 89 * entry refers to. */
keir@22501 90 int vcpu_id;
keir@22501 91 /* runtime holds the number of nanoseconds that the VCPU for this
keir@22501 92 * schedule entry should be allowed to run per major frame. */
keir@22501 93 s_time_t runtime;
keir@22501 94 /* vc holds a pointer to the Xen VCPU structure */
keir@22501 95 struct vcpu * vc;
keir@22501 96 } sched_entry_t;
keir@22501 97
keir@22501 98 /**
keir@22501 99 * This structure defines data that is global to an instance of the scheduler
keir@22501 100 */
keir@22501 101 typedef struct a653sched_priv_s
keir@22501 102 {
keir@22501 103 /**
keir@22501 104 * This array holds the active ARINC 653 schedule.
keir@22501 105 *
keir@22501 106 * When the system tries to start a new VCPU, this schedule is scanned
keir@22501 107 * to look for a matching (handle, VCPU #) pair. If both the handle (UUID)
keir@22501 108 * and VCPU number match, then the VCPU is allowed to run. Its run time
keir@22501 109 * (per major frame) is given in the third entry of the schedule.
keir@22501 110 */
keir@22501 111 sched_entry_t schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
keir@22501 112
keir@22501 113 /**
keir@22501 114 * This variable holds the number of entries that are valid in
keir@22501 115 * the arinc653_schedule table.
keir@22501 116 *
keir@22501 117 * This is not necessarily the same as the number of domains in the
keir@22501 118 * schedule. A domain could be listed multiple times within the schedule,
keir@22501 119 * or a domain with multiple VCPUs could have a different
keir@22501 120 * schedule entry for each VCPU.
keir@22501 121 */
keir@22501 122 int num_schedule_entries;
keir@22501 123
keir@22501 124 /**
keir@22501 125 * the major frame time for the ARINC 653 schedule.
keir@22501 126 */
keir@22501 127 s_time_t major_frame;
keir@22501 128
keir@22501 129 /**
keir@22501 130 * the time that the next major frame starts
keir@22501 131 */
keir@22501 132 s_time_t next_major_frame;
keir@22501 133
keir@22501 134 /**
keir@22501 135 * pointers to all Xen VCPU structures for iterating through
keir@22501 136 */
keir@22501 137 struct list_head vcpu_list;
keir@22501 138 } a653sched_priv_t;
keir@22501 139
keir@22501 140 /**************************************************************************
keir@22501 141 * Helper functions *
keir@22501 142 **************************************************************************/
keir@22501 143
keir@22501 144 /**
keir@22501 145 * This function compares two domain handles.
keir@22501 146 *
keir@22501 147 * @param h1 Pointer to handle 1
keir@22501 148 * @param h2 Pointer to handle 2
keir@22501 149 *
keir@22501 150 * @return <ul>
keir@22501 151 * <li> <0: handle 1 is less than handle 2
keir@22501 152 * <li> 0: handle 1 is equal to handle 2
keir@22501 153 * <li> >0: handle 1 is greater than handle 2
keir@22501 154 * </ul>
keir@22501 155 */
keir@22501 156 static int dom_handle_cmp(const xen_domain_handle_t h1,
keir@22501 157 const xen_domain_handle_t h2)
keir@22501 158 {
keir@22501 159 return memcmp(h1, h2, sizeof(xen_domain_handle_t));
keir@22501 160 }
keir@22501 161
keir@22501 162 /**
keir@22501 163 * This function searches the vcpu list to find a VCPU that matches
keir@22501 164 * the domain handle and VCPU ID specified.
keir@22501 165 *
keir@22501 166 * @param ops Pointer to this instance of the scheduler structure
keir@22501 167 * @param handle Pointer to handler
keir@22501 168 * @param vcpu_id VCPU ID
keir@22501 169 *
keir@22501 170 * @return <ul>
keir@22501 171 * <li> Pointer to the matching VCPU if one is found
keir@22501 172 * <li> NULL otherwise
keir@22501 173 * </ul>
keir@22501 174 */
keir@22501 175 static struct vcpu *find_vcpu(
keir@22501 176 const struct scheduler *ops,
keir@22501 177 xen_domain_handle_t handle,
keir@22501 178 int vcpu_id)
keir@22501 179 {
keir@22501 180 arinc653_vcpu_t *avcpu;
keir@22501 181
keir@22501 182 /* loop through the vcpu_list looking for the specified VCPU */
keir@22501 183 list_for_each_entry ( avcpu, &SCHED_PRIV(ops)->vcpu_list, list )
keir@22501 184 if ( (dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0)
keir@22501 185 && (vcpu_id == avcpu->vc->vcpu_id) )
keir@22501 186 return avcpu->vc;
keir@22501 187
keir@22501 188 return NULL;
keir@22501 189 }
keir@22501 190
keir@22501 191 /**
keir@22501 192 * This function updates the pointer to the Xen VCPU structure for each entry
keir@22501 193 * in the ARINC 653 schedule.
keir@22501 194 *
keir@22501 195 * @param ops Pointer to this instance of the scheduler structure
keir@22501 196 * @return <None>
keir@22501 197 */
keir@22501 198 static void update_schedule_vcpus(const struct scheduler *ops)
keir@22501 199 {
keir@22501 200 unsigned int i, n_entries = SCHED_PRIV(ops)->num_schedule_entries;
keir@22501 201
keir@22501 202 for ( i = 0; i < n_entries; i++ )
keir@22501 203 SCHED_PRIV(ops)->schedule[i].vc =
keir@22501 204 find_vcpu(ops,
keir@22501 205 SCHED_PRIV(ops)->schedule[i].dom_handle,
keir@22501 206 SCHED_PRIV(ops)->schedule[i].vcpu_id);
keir@22501 207 }
keir@22501 208
keir@22501 209 /**
keir@22501 210 * This function is called by the adjust_global scheduler hook to put
keir@22501 211 * in place a new ARINC653 schedule.
keir@22501 212 *
keir@22501 213 * @param ops Pointer to this instance of the scheduler structure
keir@22501 214 *
keir@22501 215 * @return <ul>
keir@22501 216 * <li> 0 = success
keir@22501 217 * <li> !0 = error
keir@22501 218 * </ul>
keir@22501 219 */
keir@22501 220 static int
keir@22501 221 arinc653_sched_set(
keir@22501 222 const struct scheduler *ops,
keir@22501 223 struct xen_sysctl_arinc653_schedule *schedule)
keir@22501 224 {
keir@22501 225 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
keir@22501 226 s_time_t total_runtime = 0;
keir@22501 227 bool_t found_dom0 = 0;
keir@22501 228 const static xen_domain_handle_t dom0_handle = {0};
keir@22501 229 unsigned int i;
keir@22501 230
keir@22501 231 /* Check for valid major frame and number of schedule entries. */
keir@22501 232 if ( (schedule->major_frame <= 0)
keir@22501 233 || (schedule->num_sched_entries < 1)
keir@22501 234 || (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
keir@22501 235 goto fail;
keir@22501 236
keir@22501 237 for ( i = 0; i < schedule->num_sched_entries; i++ )
keir@22501 238 {
keir@22501 239 if ( dom_handle_cmp(schedule->sched_entries[i].dom_handle,
keir@22501 240 dom0_handle) == 0 )
keir@22501 241 found_dom0 = 1;
keir@22501 242
keir@22501 243 /* Check for a valid VCPU ID and run time. */
keir@22514 244 if ( (schedule->sched_entries[i].vcpu_id >= MAX_VIRT_CPUS)
keir@22501 245 || (schedule->sched_entries[i].runtime <= 0) )
keir@22501 246 goto fail;
keir@22501 247
keir@22501 248 /* Add this entry's run time to total run time. */
keir@22501 249 total_runtime += schedule->sched_entries[i].runtime;
keir@22501 250 }
keir@22501 251
keir@22501 252 /* Error if the schedule doesn't contain a slot for domain 0. */
keir@22501 253 if ( !found_dom0 )
keir@22501 254 goto fail;
keir@22501 255
keir@22501 256 /*
keir@22501 257 * Error if the major frame is not large enough to run all entries as
keir@22501 258 * indicated by comparing the total run time to the major frame length.
keir@22501 259 */
keir@22501 260 if ( total_runtime > schedule->major_frame )
keir@22501 261 goto fail;
keir@22501 262
keir@22501 263 /* Copy the new schedule into place. */
keir@22501 264 sched_priv->num_schedule_entries = schedule->num_sched_entries;
keir@22501 265 sched_priv->major_frame = schedule->major_frame;
keir@22501 266 for ( i = 0; i < schedule->num_sched_entries; i++ )
keir@22501 267 {
keir@22501 268 memcpy(sched_priv->schedule[i].dom_handle,
keir@22501 269 schedule->sched_entries[i].dom_handle,
keir@22501 270 sizeof(sched_priv->schedule[i].dom_handle));
keir@22501 271 sched_priv->schedule[i].vcpu_id =
keir@22501 272 schedule->sched_entries[i].vcpu_id;
keir@22501 273 sched_priv->schedule[i].runtime =
keir@22501 274 schedule->sched_entries[i].runtime;
keir@22501 275 }
keir@22501 276 update_schedule_vcpus(ops);
keir@22501 277
keir@22501 278 /*
keir@22501 279 * The newly-installed schedule takes effect immediately. We do not even
keir@22501 280 * wait for the current major frame to expire.
keir@22501 281 *
keir@22501 282 * Signal a new major frame to begin. The next major frame is set up by
keir@22501 283 * the do_schedule callback function when it is next invoked.
keir@22501 284 */
keir@22501 285 sched_priv->next_major_frame = NOW();
keir@22501 286
keir@22501 287 return 0;
keir@22501 288
keir@22501 289 fail:
keir@22501 290 return -EINVAL;
keir@22501 291 }
keir@22501 292
keir@22501 293 /**
keir@22501 294 * This function is called by the adjust_global scheduler hook to read the
keir@22501 295 * current ARINC 653 schedule
keir@22501 296 *
keir@22501 297 * @param ops Pointer to this instance of the scheduler structure
keir@22501 298 * @return <ul>
keir@22501 299 * <li> 0 = success
keir@22501 300 * <li> !0 = error
keir@22501 301 * </ul>
keir@22501 302 */
keir@22501 303 static int
keir@22501 304 arinc653_sched_get(
keir@22501 305 const struct scheduler *ops,
keir@22501 306 struct xen_sysctl_arinc653_schedule *schedule)
keir@22501 307 {
keir@22501 308 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
keir@22501 309 unsigned int i;
keir@22501 310
keir@22501 311 schedule->num_sched_entries = sched_priv->num_schedule_entries;
keir@22501 312 schedule->major_frame = sched_priv->major_frame;
keir@22501 313 for ( i = 0; i < sched_priv->num_schedule_entries; i++ )
keir@22501 314 {
keir@22501 315 memcpy(schedule->sched_entries[i].dom_handle,
keir@22501 316 sched_priv->schedule[i].dom_handle,
keir@22501 317 sizeof(sched_priv->schedule[i].dom_handle));
keir@22501 318 schedule->sched_entries[i].vcpu_id = sched_priv->schedule[i].vcpu_id;
keir@22501 319 schedule->sched_entries[i].runtime = sched_priv->schedule[i].runtime;
keir@22501 320 }
keir@22501 321
keir@22501 322 return 0;
keir@22501 323 }
keir@22501 324
keir@22501 325 /**************************************************************************
keir@22501 326 * Scheduler callback functions *
keir@22501 327 **************************************************************************/
keir@22501 328
keir@22501 329 /**
keir@22501 330 * This function performs initialization for an instance of the scheduler.
keir@22501 331 *
keir@22501 332 * @param ops Pointer to this instance of the scheduler structure
keir@22501 333 *
keir@22501 334 * @return <ul>
keir@22501 335 * <li> 0 = success
keir@22501 336 * <li> !0 = error
keir@22501 337 * </ul>
keir@22501 338 */
keir@22501 339 static int
keir@22501 340 a653sched_init(struct scheduler *ops)
keir@22501 341 {
keir@22501 342 a653sched_priv_t *prv;
keir@22501 343
keir@22501 344 prv = xmalloc(a653sched_priv_t);
keir@22501 345 if ( prv == NULL )
keir@22501 346 return -ENOMEM;
keir@22501 347
keir@22501 348 memset(prv, 0, sizeof(*prv));
keir@22501 349 ops->sched_data = prv;
keir@22501 350
keir@22501 351 prv->schedule[0].dom_handle[0] = '\0';
keir@22501 352 prv->schedule[0].vcpu_id = 0;
keir@22501 353 prv->schedule[0].runtime = MILLISECS(10);
keir@22501 354 prv->schedule[0].vc = NULL;
keir@22501 355 prv->num_schedule_entries = 1;
keir@22501 356 prv->major_frame = MILLISECS(10);
keir@22501 357 prv->next_major_frame = 0;
keir@22501 358 INIT_LIST_HEAD(&prv->vcpu_list);
keir@22501 359
keir@22501 360 return 0;
keir@22501 361 }
keir@22501 362
keir@22501 363 /**
keir@22501 364 * This function performs deinitialization for an instance of the scheduler
keir@22501 365 *
keir@22501 366 * @param ops Pointer to this instance of the scheduler structure
keir@22501 367 */
keir@22501 368 static void
keir@22501 369 a653sched_deinit(const struct scheduler *ops)
keir@22501 370 {
keir@22501 371 xfree(SCHED_PRIV(ops));
keir@22501 372 }
keir@22501 373
keir@22501 374 /**
keir@22501 375 * This function allocates scheduler-specific data for a VCPU
keir@22501 376 *
keir@22501 377 * @param ops Pointer to this instance of the scheduler structure
keir@22501 378 *
keir@22501 379 * @return Pointer to the allocated data
keir@22501 380 */
keir@22501 381 static void *
keir@22501 382 a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
keir@22501 383 {
keir@22501 384 /*
keir@22501 385 * Allocate memory for the ARINC 653-specific scheduler data information
keir@22501 386 * associated with the given VCPU (vc).
keir@22501 387 */
keir@22501 388 if ( (vc->sched_priv = xmalloc(arinc653_vcpu_t)) == NULL )
keir@22501 389 return NULL;
keir@22501 390
keir@22501 391 /*
keir@22501 392 * Initialize our ARINC 653 scheduler-specific information for the VCPU.
keir@22501 393 * The VCPU starts "asleep." When Xen is ready for the VCPU to run, it
keir@22501 394 * will call the vcpu_wake scheduler callback function and our scheduler
keir@22501 395 * will mark the VCPU awake.
keir@22501 396 */
keir@22501 397 AVCPU(vc)->vc = vc;
keir@22501 398 AVCPU(vc)->awake = 0;
keir@22501 399 if ( !is_idle_vcpu(vc) )
keir@22501 400 list_add(&AVCPU(vc)->list, &SCHED_PRIV(ops)->vcpu_list);
keir@22501 401 update_schedule_vcpus(ops);
keir@22501 402
keir@22501 403 return AVCPU(vc);
keir@22501 404 }
keir@22501 405
keir@22501 406 /**
keir@22501 407 * This function frees scheduler-specific VCPU data
keir@22501 408 *
keir@22501 409 * @param ops Pointer to this instance of the scheduler structure
keir@22501 410 */
keir@22501 411 static void
keir@22501 412 a653sched_free_vdata(const struct scheduler *ops, void *priv)
keir@22501 413 {
keir@22501 414 arinc653_vcpu_t *av = priv;
keir@22501 415
keir@22501 416 if (av == NULL)
keir@22501 417 return;
keir@22501 418
keir@22501 419 list_del(&av->list);
keir@22501 420 xfree(av);
keir@22501 421 update_schedule_vcpus(ops);
keir@22501 422 }
keir@22501 423
keir@22501 424 /**
keir@22501 425 * This function allocates scheduler-specific data for a physical CPU
keir@22501 426 *
keir@22501 427 * We do not actually make use of any per-CPU data but the hypervisor expects
keir@22501 428 * a non-NULL return value
keir@22501 429 *
keir@22501 430 * @param ops Pointer to this instance of the scheduler structure
keir@22501 431 *
keir@22501 432 * @return Pointer to the allocated data
keir@22501 433 */
keir@22501 434 static void *
keir@22501 435 a653sched_alloc_pdata(const struct scheduler *ops, int cpu)
keir@22501 436 {
keir@22501 437 /* return a non-NULL value to keep schedule.c happy */
keir@22501 438 return SCHED_PRIV(ops);
keir@22501 439 }
keir@22501 440
keir@22501 441 /**
keir@22501 442 * This function frees scheduler-specific data for a physical CPU
keir@22501 443 *
keir@22501 444 * @param ops Pointer to this instance of the scheduler structure
keir@22501 445 */
keir@22501 446 static void
keir@22501 447 a653sched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
keir@22501 448 {
keir@22501 449 /* nop */
keir@22501 450 }
keir@22501 451
keir@22501 452 /**
keir@22501 453 * This function allocates scheduler-specific data for a domain
keir@22501 454 *
keir@22501 455 * We do not actually make use of any per-domain data but the hypervisor
keir@22501 456 * expects a non-NULL return value
keir@22501 457 *
keir@22501 458 * @param ops Pointer to this instance of the scheduler structure
keir@22501 459 *
keir@22501 460 * @return Pointer to the allocated data
keir@22501 461 */
keir@22501 462 static void *
keir@22501 463 a653sched_alloc_domdata(const struct scheduler *ops, struct domain *dom)
keir@22501 464 {
keir@22501 465 /* return a non-NULL value to keep schedule.c happy */
keir@22501 466 return SCHED_PRIV(ops);
keir@22501 467 }
keir@22501 468
keir@22501 469 /**
keir@22501 470 * This function frees scheduler-specific data for a domain
keir@22501 471 *
keir@22501 472 * @param ops Pointer to this instance of the scheduler structure
keir@22501 473 */
keir@22501 474 static void
keir@22501 475 a653sched_free_domdata(const struct scheduler *ops, void *data)
keir@22501 476 {
keir@22501 477 /* nop */
keir@22501 478 }
keir@22501 479
keir@22501 480 /**
keir@22501 481 * Xen scheduler callback function to sleep a VCPU
keir@22501 482 *
keir@22501 483 * @param ops Pointer to this instance of the scheduler structure
keir@22501 484 * @param vc Pointer to the VCPU structure for the current domain
keir@22501 485 */
keir@22501 486 static void
keir@22501 487 a653sched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
keir@22501 488 {
keir@22501 489 if ( AVCPU(vc) != NULL )
keir@22501 490 AVCPU(vc)->awake = 0;
keir@22501 491
keir@22501 492 /*
keir@22501 493 * If the VCPU being put to sleep is the same one that is currently
keir@22501 494 * running, raise a softirq to invoke the scheduler to switch domains.
keir@22501 495 */
keir@22501 496 if ( per_cpu(schedule_data, vc->processor).curr == vc )
keir@22501 497 cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
keir@22501 498 }
keir@22501 499
keir@22501 500 /**
keir@22501 501 * Xen scheduler callback function to wake up a VCPU
keir@22501 502 *
keir@22501 503 * @param ops Pointer to this instance of the scheduler structure
keir@22501 504 * @param vc Pointer to the VCPU structure for the current domain
keir@22501 505 */
keir@22501 506 static void
keir@22501 507 a653sched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
keir@22501 508 {
keir@22501 509 if ( AVCPU(vc) != NULL )
keir@22501 510 AVCPU(vc)->awake = 1;
keir@22501 511
keir@22501 512 cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
keir@22501 513 }
keir@22501 514
keir@22501 515 /**
keir@22501 516 * Xen scheduler callback function to select a VCPU to run.
keir@22501 517 * This is the main scheduler routine.
keir@22501 518 *
keir@22501 519 * @param ops Pointer to this instance of the scheduler structure
keir@22501 520 * @param now Current time
keir@22501 521 *
keir@22501 522 * @return Address of the VCPU structure scheduled to be run next
keir@22501 523 * Amount of time to execute the returned VCPU
keir@22501 524 * Flag for whether the VCPU was migrated
keir@22501 525 */
keir@22501 526 static struct task_slice
keir@22501 527 a653sched_do_schedule(
keir@22501 528 const struct scheduler *ops,
keir@22501 529 s_time_t now,
keir@22501 530 bool_t tasklet_work_scheduled)
keir@22501 531 {
keir@22501 532 struct task_slice ret; /* hold the chosen domain */
keir@22501 533 struct vcpu * new_task = NULL;
keir@22501 534 static int sched_index = 0;
keir@22501 535 static s_time_t next_switch_time;
keir@22501 536 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
keir@22501 537
keir@22501 538 if ( now >= sched_priv->next_major_frame )
keir@22501 539 {
keir@22501 540 /* time to enter a new major frame
keir@22501 541 * the first time this function is called, this will be true */
keir@22501 542 /* start with the first domain in the schedule */
keir@22501 543 sched_index = 0;
keir@22501 544 sched_priv->next_major_frame = now + sched_priv->major_frame;
keir@22501 545 next_switch_time = now + sched_priv->schedule[0].runtime;
keir@22501 546 }
keir@22501 547 else
keir@22501 548 {
keir@22501 549 while ( (now >= next_switch_time)
keir@22501 550 && (sched_index < sched_priv->num_schedule_entries) )
keir@22501 551 {
keir@22501 552 /* time to switch to the next domain in this major frame */
keir@22501 553 sched_index++;
keir@22501 554 next_switch_time += sched_priv->schedule[sched_index].runtime;
keir@22501 555 }
keir@22501 556 }
keir@22501 557
keir@22501 558 /*
keir@22501 559 * If we exhausted the domains in the schedule and still have time left
keir@22501 560 * in the major frame then switch next at the next major frame.
keir@22501 561 */
keir@22501 562 if ( sched_index >= sched_priv->num_schedule_entries )
keir@22501 563 next_switch_time = sched_priv->next_major_frame;
keir@22501 564
keir@22501 565 /*
keir@22501 566 * If there are more domains to run in the current major frame, set
keir@22501 567 * new_task equal to the address of next domain's VCPU structure.
keir@22501 568 * Otherwise, set new_task equal to the address of the idle task's VCPU
keir@22501 569 * structure.
keir@22501 570 */
keir@22501 571 new_task = (sched_index < sched_priv->num_schedule_entries)
keir@22501 572 ? sched_priv->schedule[sched_index].vc
keir@22501 573 : IDLETASK(0);
keir@22501 574
keir@22501 575 /* Check to see if the new task can be run (awake & runnable). */
keir@22501 576 if ( !((new_task != NULL)
keir@22501 577 && (AVCPU(new_task) != NULL)
keir@22501 578 && AVCPU(new_task)->awake
keir@22501 579 && vcpu_runnable(new_task)) )
keir@22501 580 new_task = IDLETASK(0);
keir@22501 581 BUG_ON(new_task == NULL);
keir@22501 582
keir@22501 583 /*
keir@22501 584 * Check to make sure we did not miss a major frame.
keir@22501 585 * This is a good test for robust partitioning.
keir@22501 586 */
keir@22501 587 BUG_ON(now >= sched_priv->next_major_frame);
keir@22501 588
keir@22501 589 /* Tasklet work (which runs in idle VCPU context) overrides all else. */
keir@22501 590 if ( tasklet_work_scheduled )
keir@22501 591 new_task = IDLETASK(0);
keir@22501 592
keir@22501 593 /*
keir@22501 594 * Return the amount of time the next domain has to run and the address
keir@22501 595 * of the selected task's VCPU structure.
keir@22501 596 */
keir@22501 597 ret.time = next_switch_time - now;
keir@22501 598 ret.task = new_task;
keir@22501 599 ret.migrated = 0; /* we do not support migration */
keir@22501 600
keir@22501 601 BUG_ON(ret.time <= 0);
keir@22501 602
keir@22501 603 return ret;
keir@22501 604 }
keir@22501 605
keir@22501 606 /**
keir@22501 607 * Xen scheduler callback function to select a CPU for the VCPU to run on
keir@22501 608 *
keir@22501 609 * @param ops Pointer to this instance of the scheduler structure
keir@22501 610 * @param v Pointer to the VCPU structure for the current domain
keir@22501 611 *
keir@22501 612 * @return Number of selected physical CPU
keir@22501 613 */
keir@22501 614 static int
keir@22501 615 a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc)
keir@22501 616 {
keir@22501 617 /* this implementation only supports one physical CPU */
keir@22501 618 return 0;
keir@22501 619 }
keir@22501 620
keir@22501 621 /**
keir@22501 622 * Xen scheduler callback function to perform a global (not domain-specific)
keir@22501 623 * adjustment. It is used by the ARINC 653 scheduler to put in place a new
keir@22501 624 * ARINC 653 schedule or to retrieve the schedule currently in place.
keir@22501 625 *
keir@22501 626 * @param ops Pointer to this instance of the scheduler structure
keir@22501 627 * @param sc Pointer to the scheduler operation specified by Domain 0
keir@22501 628 */
keir@22501 629 static int
keir@22501 630 a653sched_adjust_global(const struct scheduler *ops,
keir@22501 631 struct xen_sysctl_scheduler_op *sc)
keir@22501 632 {
keir@22501 633 xen_sysctl_arinc653_schedule_t local_sched;
keir@22501 634 int rc = -EINVAL;
keir@22501 635
keir@22501 636 switch ( sc->cmd )
keir@22501 637 {
keir@22501 638 case XEN_SYSCTL_SCHEDOP_putinfo:
keir@22501 639 copy_from_guest(&local_sched, sc->u.sched_arinc653.schedule, 1);
keir@22501 640 rc = arinc653_sched_set(ops, &local_sched);
keir@22501 641 break;
keir@22501 642 case XEN_SYSCTL_SCHEDOP_getinfo:
keir@22501 643 rc = arinc653_sched_get(ops, &local_sched);
keir@22501 644 copy_to_guest(sc->u.sched_arinc653.schedule, &local_sched, 1);
keir@22501 645 break;
keir@22501 646 }
keir@22501 647
keir@22501 648 return rc;
keir@22501 649 }
keir@22501 650
keir@22501 651 /**
keir@22501 652 * This structure defines our scheduler for Xen.
keir@22501 653 * The entries tell Xen where to find our scheduler-specific
keir@22501 654 * callback functions.
keir@22501 655 * The symbol must be visible to the rest of Xen at link time.
keir@22501 656 */
keir@22501 657 struct scheduler sched_arinc653_def = {
keir@22501 658 .name = "ARINC 653 Scheduler",
keir@22501 659 .opt_name = "arinc653",
keir@22501 660 .sched_id = XEN_SCHEDULER_ARINC653,
keir@22501 661 .sched_data = NULL,
keir@22501 662
keir@22501 663 .init = a653sched_init,
keir@22501 664 .deinit = a653sched_deinit,
keir@22501 665
keir@22501 666 .free_vdata = a653sched_free_vdata,
keir@22501 667 .alloc_vdata = a653sched_alloc_vdata,
keir@22501 668
keir@22501 669 .free_pdata = a653sched_free_pdata,
keir@22501 670 .alloc_pdata = a653sched_alloc_pdata,
keir@22501 671
keir@22501 672 .free_domdata = a653sched_free_domdata,
keir@22501 673 .alloc_domdata = a653sched_alloc_domdata,
keir@22501 674
keir@22501 675 .init_domain = NULL,
keir@22501 676 .destroy_domain = NULL,
keir@22501 677
keir@22501 678 .insert_vcpu = NULL,
keir@22501 679 .remove_vcpu = NULL,
keir@22501 680
keir@22501 681 .sleep = a653sched_vcpu_sleep,
keir@22501 682 .wake = a653sched_vcpu_wake,
keir@22501 683 .yield = NULL,
keir@22501 684 .context_saved = NULL,
keir@22501 685
keir@22501 686 .do_schedule = a653sched_do_schedule,
keir@22501 687
keir@22501 688 .pick_cpu = a653sched_pick_cpu,
keir@22501 689
keir@22501 690 .adjust = NULL,
keir@22501 691 .adjust_global = a653sched_adjust_global,
keir@22501 692
keir@22501 693 .dump_settings = NULL,
keir@22501 694 .dump_cpu_state = NULL,
keir@22501 695
keir@22501 696 .tick_suspend = NULL,
keir@22501 697 .tick_resume = NULL,
keir@22501 698 };