Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/common/sched_arinc653.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * sched_arinc653.c
3
 *
4
 * An ARINC653-compatible scheduling algorithm for use in Xen.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to
8
 * deal in the Software without restriction, including without limitation the
9
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10
 * sell copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22
 * DEALINGS IN THE SOFTWARE.
23
 *
24
 * Copyright (c) 2010, DornerWorks, Ltd. <DornerWorks.com>
25
 */
26
27
#include <xen/lib.h>
28
#include <xen/sched.h>
29
#include <xen/sched-if.h>
30
#include <xen/timer.h>
31
#include <xen/softirq.h>
32
#include <xen/time.h>
33
#include <xen/errno.h>
34
#include <xen/list.h>
35
#include <xen/guest_access.h>
36
#include <public/sysctl.h>
37
38
/**************************************************************************
39
 * Private Macros                                                         *
40
 **************************************************************************/
41
42
/**
43
 * Default timeslice for domain 0.
44
 */
45
0
#define DEFAULT_TIMESLICE MILLISECS(10)
46
47
/**
48
 * Retrieve the idle VCPU for a given physical CPU
49
 */
50
0
#define IDLETASK(cpu)  (idle_vcpu[cpu])
51
52
/**
53
 * Return a pointer to the ARINC 653-specific scheduler data information
54
 * associated with the given VCPU (vc)
55
 */
56
0
#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)
57
58
/**
59
 * Return the global scheduler private data given the scheduler ops pointer
60
 */
61
0
#define SCHED_PRIV(s) ((a653sched_priv_t *)((s)->sched_data))
62
63
/**************************************************************************
64
 * Private Type Definitions                                               *
65
 **************************************************************************/
66
67
/**
68
 * The arinc653_vcpu_t structure holds ARINC 653-scheduler-specific
69
 * information for all non-idle VCPUs
70
 */
71
typedef struct arinc653_vcpu_s
72
{
73
    /* vc points to Xen's struct vcpu so we can get to it from an
74
     * arinc653_vcpu_t pointer. */
75
    struct vcpu *       vc;
76
    /* awake holds whether the VCPU has been woken with vcpu_wake() */
77
    bool_t              awake;
78
    /* list holds the linked list information for the list this VCPU
79
     * is stored in */
80
    struct list_head    list;
81
} arinc653_vcpu_t;
82
83
/**
84
 * The sched_entry_t structure holds a single entry of the
85
 * ARINC 653 schedule.
86
 */
87
typedef struct sched_entry_s
88
{
89
    /* dom_handle holds the handle ("UUID") for the domain that this
90
     * schedule entry refers to. */
91
    xen_domain_handle_t dom_handle;
92
    /* vcpu_id holds the VCPU number for the VCPU that this schedule
93
     * entry refers to. */
94
    int                 vcpu_id;
95
    /* runtime holds the number of nanoseconds that the VCPU for this
96
     * schedule entry should be allowed to run per major frame. */
97
    s_time_t            runtime;
98
    /* vc holds a pointer to the Xen VCPU structure */
99
    struct vcpu *       vc;
100
} sched_entry_t;
101
102
/**
103
 * This structure defines data that is global to an instance of the scheduler
104
 */
105
typedef struct a653sched_priv_s
106
{
107
    /* lock for the whole pluggable scheduler, nests inside cpupool_lock */
108
    spinlock_t lock;
109
110
    /**
111
     * This array holds the active ARINC 653 schedule.
112
     *
113
     * When the system tries to start a new VCPU, this schedule is scanned
114
     * to look for a matching (handle, VCPU #) pair. If both the handle (UUID)
115
     * and VCPU number match, then the VCPU is allowed to run. Its run time
116
     * (per major frame) is given in the third entry of the schedule.
117
     */
118
    sched_entry_t schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
119
120
    /**
121
     * This variable holds the number of entries that are valid in
122
     * the arinc653_schedule table.
123
     *
124
     * This is not necessarily the same as the number of domains in the
125
     * schedule. A domain could be listed multiple times within the schedule,
126
     * or a domain with multiple VCPUs could have a different
127
     * schedule entry for each VCPU.
128
     */
129
    unsigned int num_schedule_entries;
130
131
    /**
132
     * the major frame time for the ARINC 653 schedule.
133
     */
134
    s_time_t major_frame;
135
136
    /**
137
     * the time that the next major frame starts
138
     */
139
    s_time_t next_major_frame;
140
141
    /**
142
     * pointers to all Xen VCPU structures for iterating through
143
     */
144
    struct list_head vcpu_list;
145
} a653sched_priv_t;
146
147
/**************************************************************************
148
 * Helper functions                                                       *
149
 **************************************************************************/
150
151
/**
152
 * This function compares two domain handles.
153
 *
154
 * @param h1        Pointer to handle 1
155
 * @param h2        Pointer to handle 2
156
 *
157
 * @return          <ul>
158
 *                  <li> <0:  handle 1 is less than handle 2
159
 *                  <li>  0:  handle 1 is equal to handle 2
160
 *                  <li> >0:  handle 1 is greater than handle 2
161
 *                  </ul>
162
 */
163
static int dom_handle_cmp(const xen_domain_handle_t h1,
164
                          const xen_domain_handle_t h2)
165
0
{
166
0
    return memcmp(h1, h2, sizeof(xen_domain_handle_t));
167
0
}
168
169
/**
170
 * This function searches the vcpu list to find a VCPU that matches
171
 * the domain handle and VCPU ID specified.
172
 *
173
 * @param ops       Pointer to this instance of the scheduler structure
174
 * @param handle    Pointer to handler
175
 * @param vcpu_id   VCPU ID
176
 *
177
 * @return          <ul>
178
 *                  <li> Pointer to the matching VCPU if one is found
179
 *                  <li> NULL otherwise
180
 *                  </ul>
181
 */
182
static struct vcpu *find_vcpu(
183
    const struct scheduler *ops,
184
    xen_domain_handle_t handle,
185
    int vcpu_id)
186
0
{
187
0
    arinc653_vcpu_t *avcpu;
188
0
189
0
    /* loop through the vcpu_list looking for the specified VCPU */
190
0
    list_for_each_entry ( avcpu, &SCHED_PRIV(ops)->vcpu_list, list )
191
0
        if ( (dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0)
192
0
             && (vcpu_id == avcpu->vc->vcpu_id) )
193
0
            return avcpu->vc;
194
0
195
0
    return NULL;
196
0
}
197
198
/**
199
 * This function updates the pointer to the Xen VCPU structure for each entry
200
 * in the ARINC 653 schedule.
201
 *
202
 * @param ops       Pointer to this instance of the scheduler structure
203
 * @return          <None>
204
 */
205
static void update_schedule_vcpus(const struct scheduler *ops)
206
0
{
207
0
    unsigned int i, n_entries = SCHED_PRIV(ops)->num_schedule_entries;
208
0
209
0
    for ( i = 0; i < n_entries; i++ )
210
0
        SCHED_PRIV(ops)->schedule[i].vc =
211
0
            find_vcpu(ops,
212
0
                      SCHED_PRIV(ops)->schedule[i].dom_handle,
213
0
                      SCHED_PRIV(ops)->schedule[i].vcpu_id);
214
0
}
215
216
/**
217
 * This function is called by the adjust_global scheduler hook to put
218
 * in place a new ARINC653 schedule.
219
 *
220
 * @param ops       Pointer to this instance of the scheduler structure
221
 *
222
 * @return          <ul>
223
 *                  <li> 0 = success
224
 *                  <li> !0 = error
225
 *                  </ul>
226
 */
227
static int
228
arinc653_sched_set(
229
    const struct scheduler *ops,
230
    struct xen_sysctl_arinc653_schedule *schedule)
231
0
{
232
0
    a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
233
0
    s_time_t total_runtime = 0;
234
0
    unsigned int i;
235
0
    unsigned long flags;
236
0
    int rc = -EINVAL;
237
0
238
0
    spin_lock_irqsave(&sched_priv->lock, flags);
239
0
240
0
    /* Check for valid major frame and number of schedule entries. */
241
0
    if ( (schedule->major_frame <= 0)
242
0
         || (schedule->num_sched_entries < 1)
243
0
         || (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
244
0
        goto fail;
245
0
246
0
    for ( i = 0; i < schedule->num_sched_entries; i++ )
247
0
    {
248
0
        /* Check for a valid run time. */
249
0
        if ( schedule->sched_entries[i].runtime <= 0 )
250
0
            goto fail;
251
0
252
0
        /* Add this entry's run time to total run time. */
253
0
        total_runtime += schedule->sched_entries[i].runtime;
254
0
    }
255
0
256
0
    /*
257
0
     * Error if the major frame is not large enough to run all entries as
258
0
     * indicated by comparing the total run time to the major frame length.
259
0
     */
260
0
    if ( total_runtime > schedule->major_frame )
261
0
        goto fail;
262
0
263
0
    /* Copy the new schedule into place. */
264
0
    sched_priv->num_schedule_entries = schedule->num_sched_entries;
265
0
    sched_priv->major_frame = schedule->major_frame;
266
0
    for ( i = 0; i < schedule->num_sched_entries; i++ )
267
0
    {
268
0
        memcpy(sched_priv->schedule[i].dom_handle,
269
0
               schedule->sched_entries[i].dom_handle,
270
0
               sizeof(sched_priv->schedule[i].dom_handle));
271
0
        sched_priv->schedule[i].vcpu_id =
272
0
            schedule->sched_entries[i].vcpu_id;
273
0
        sched_priv->schedule[i].runtime =
274
0
            schedule->sched_entries[i].runtime;
275
0
    }
276
0
    update_schedule_vcpus(ops);
277
0
278
0
    /*
279
0
     * The newly-installed schedule takes effect immediately. We do not even
280
0
     * wait for the current major frame to expire.
281
0
     *
282
0
     * Signal a new major frame to begin. The next major frame is set up by
283
0
     * the do_schedule callback function when it is next invoked.
284
0
     */
285
0
    sched_priv->next_major_frame = NOW();
286
0
287
0
    rc = 0;
288
0
289
0
 fail:
290
0
    spin_unlock_irqrestore(&sched_priv->lock, flags);
291
0
    return rc;
292
0
}
293
294
/**
295
 * This function is called by the adjust_global scheduler hook to read the
296
 * current ARINC 653 schedule
297
 *
298
 * @param ops       Pointer to this instance of the scheduler structure
299
 * @return          <ul>
300
 *                  <li> 0 = success
301
 *                  <li> !0 = error
302
 *                  </ul>
303
 */
304
static int
305
arinc653_sched_get(
306
    const struct scheduler *ops,
307
    struct xen_sysctl_arinc653_schedule *schedule)
308
0
{
309
0
    a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
310
0
    unsigned int i;
311
0
    unsigned long flags;
312
0
313
0
    spin_lock_irqsave(&sched_priv->lock, flags);
314
0
315
0
    schedule->num_sched_entries = sched_priv->num_schedule_entries;
316
0
    schedule->major_frame = sched_priv->major_frame;
317
0
    for ( i = 0; i < sched_priv->num_schedule_entries; i++ )
318
0
    {
319
0
        memcpy(schedule->sched_entries[i].dom_handle,
320
0
               sched_priv->schedule[i].dom_handle,
321
0
               sizeof(sched_priv->schedule[i].dom_handle));
322
0
        schedule->sched_entries[i].vcpu_id = sched_priv->schedule[i].vcpu_id;
323
0
        schedule->sched_entries[i].runtime = sched_priv->schedule[i].runtime;
324
0
    }
325
0
326
0
    spin_unlock_irqrestore(&sched_priv->lock, flags);
327
0
328
0
    return 0;
329
0
}
330
331
/**************************************************************************
332
 * Scheduler callback functions                                           *
333
 **************************************************************************/
334
335
/**
336
 * This function performs initialization for an instance of the scheduler.
337
 *
338
 * @param ops       Pointer to this instance of the scheduler structure
339
 *
340
 * @return          <ul>
341
 *                  <li> 0 = success
342
 *                  <li> !0 = error
343
 *                  </ul>
344
 */
345
static int
346
a653sched_init(struct scheduler *ops)
347
0
{
348
0
    a653sched_priv_t *prv;
349
0
350
0
    prv = xzalloc(a653sched_priv_t);
351
0
    if ( prv == NULL )
352
0
        return -ENOMEM;
353
0
354
0
    ops->sched_data = prv;
355
0
356
0
    prv->next_major_frame = 0;
357
0
    spin_lock_init(&prv->lock);
358
0
    INIT_LIST_HEAD(&prv->vcpu_list);
359
0
360
0
    return 0;
361
0
}
362
363
/**
364
 * This function performs deinitialization for an instance of the scheduler
365
 *
366
 * @param ops       Pointer to this instance of the scheduler structure
367
 */
368
static void
369
a653sched_deinit(struct scheduler *ops)
370
0
{
371
0
    xfree(SCHED_PRIV(ops));
372
0
    ops->sched_data = NULL;
373
0
}
374
375
/**
376
 * This function allocates scheduler-specific data for a VCPU
377
 *
378
 * @param ops       Pointer to this instance of the scheduler structure
379
 *
380
 * @return          Pointer to the allocated data
381
 */
382
static void *
383
a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
384
0
{
385
0
    a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
386
0
    arinc653_vcpu_t *svc;
387
0
    unsigned int entry;
388
0
    unsigned long flags;
389
0
390
0
    /*
391
0
     * Allocate memory for the ARINC 653-specific scheduler data information
392
0
     * associated with the given VCPU (vc).
393
0
     */
394
0
    svc = xmalloc(arinc653_vcpu_t);
395
0
    if ( svc == NULL )
396
0
        return NULL;
397
0
398
0
    spin_lock_irqsave(&sched_priv->lock, flags);
399
0
400
0
    /* 
401
0
     * Add every one of dom0's vcpus to the schedule, as long as there are
402
0
     * slots available.
403
0
     */
404
0
    if ( vc->domain->domain_id == 0 )
405
0
    {
406
0
        entry = sched_priv->num_schedule_entries;
407
0
408
0
        if ( entry < ARINC653_MAX_DOMAINS_PER_SCHEDULE )
409
0
        {
410
0
            sched_priv->schedule[entry].dom_handle[0] = '\0';
411
0
            sched_priv->schedule[entry].vcpu_id = vc->vcpu_id;
412
0
            sched_priv->schedule[entry].runtime = DEFAULT_TIMESLICE;
413
0
            sched_priv->schedule[entry].vc = vc;
414
0
415
0
            sched_priv->major_frame += DEFAULT_TIMESLICE;
416
0
            ++sched_priv->num_schedule_entries;
417
0
        }
418
0
    }
419
0
420
0
    /*
421
0
     * Initialize our ARINC 653 scheduler-specific information for the VCPU.
422
0
     * The VCPU starts "asleep." When Xen is ready for the VCPU to run, it
423
0
     * will call the vcpu_wake scheduler callback function and our scheduler
424
0
     * will mark the VCPU awake.
425
0
     */
426
0
    svc->vc = vc;
427
0
    svc->awake = 0;
428
0
    if ( !is_idle_vcpu(vc) )
429
0
        list_add(&svc->list, &SCHED_PRIV(ops)->vcpu_list);
430
0
    update_schedule_vcpus(ops);
431
0
432
0
    spin_unlock_irqrestore(&sched_priv->lock, flags);
433
0
434
0
    return svc;
435
0
}
436
437
/**
438
 * This function frees scheduler-specific VCPU data
439
 *
440
 * @param ops       Pointer to this instance of the scheduler structure
441
 */
442
static void
443
a653sched_free_vdata(const struct scheduler *ops, void *priv)
444
0
{
445
0
    arinc653_vcpu_t *av = priv;
446
0
447
0
    if (av == NULL)
448
0
        return;
449
0
450
0
    if ( !is_idle_vcpu(av->vc) )
451
0
        list_del(&av->list);
452
0
453
0
    xfree(av);
454
0
    update_schedule_vcpus(ops);
455
0
}
456
457
/**
458
 * This function allocates scheduler-specific data for a domain
459
 *
460
 * We do not actually make use of any per-domain data but the hypervisor
461
 * expects a non-NULL return value
462
 *
463
 * @param ops       Pointer to this instance of the scheduler structure
464
 *
465
 * @return          Pointer to the allocated data
466
 */
467
static void *
468
a653sched_alloc_domdata(const struct scheduler *ops, struct domain *dom)
469
0
{
470
0
    /* return a non-NULL value to keep schedule.c happy */
471
0
    return SCHED_PRIV(ops);
472
0
}
473
474
/**
475
 * This function frees scheduler-specific data for a domain
476
 *
477
 * @param ops       Pointer to this instance of the scheduler structure
478
 */
479
static void
480
a653sched_free_domdata(const struct scheduler *ops, void *data)
481
0
{
482
0
    /* nop */
483
0
}
484
485
/**
486
 * Xen scheduler callback function to sleep a VCPU
487
 *
488
 * @param ops       Pointer to this instance of the scheduler structure
489
 * @param vc        Pointer to the VCPU structure for the current domain
490
 */
491
static void
492
a653sched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
493
0
{
494
0
    if ( AVCPU(vc) != NULL )
495
0
        AVCPU(vc)->awake = 0;
496
0
497
0
    /*
498
0
     * If the VCPU being put to sleep is the same one that is currently
499
0
     * running, raise a softirq to invoke the scheduler to switch domains.
500
0
     */
501
0
    if ( per_cpu(schedule_data, vc->processor).curr == vc )
502
0
        cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
503
0
}
504
505
/**
506
 * Xen scheduler callback function to wake up a VCPU
507
 *
508
 * @param ops       Pointer to this instance of the scheduler structure
509
 * @param vc        Pointer to the VCPU structure for the current domain
510
 */
511
static void
512
a653sched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
513
0
{
514
0
    if ( AVCPU(vc) != NULL )
515
0
        AVCPU(vc)->awake = 1;
516
0
517
0
    cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
518
0
}
519
520
/**
521
 * Xen scheduler callback function to select a VCPU to run.
522
 * This is the main scheduler routine.
523
 *
524
 * @param ops       Pointer to this instance of the scheduler structure
525
 * @param now       Current time
526
 *
527
 * @return          Address of the VCPU structure scheduled to be run next
528
 *                  Amount of time to execute the returned VCPU
529
 *                  Flag for whether the VCPU was migrated
530
 */
531
static struct task_slice
532
a653sched_do_schedule(
533
    const struct scheduler *ops,
534
    s_time_t now,
535
    bool_t tasklet_work_scheduled)
536
0
{
537
0
    struct task_slice ret;                      /* hold the chosen domain */
538
0
    struct vcpu * new_task = NULL;
539
0
    static unsigned int sched_index = 0;
540
0
    static s_time_t next_switch_time;
541
0
    a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
542
0
    const unsigned int cpu = smp_processor_id();
543
0
    unsigned long flags;
544
0
545
0
    spin_lock_irqsave(&sched_priv->lock, flags);
546
0
547
0
    if ( sched_priv->num_schedule_entries < 1 )
548
0
        sched_priv->next_major_frame = now + DEFAULT_TIMESLICE;
549
0
    else if ( now >= sched_priv->next_major_frame )
550
0
    {
551
0
        /* time to enter a new major frame
552
0
         * the first time this function is called, this will be true */
553
0
        /* start with the first domain in the schedule */
554
0
        sched_index = 0;
555
0
        sched_priv->next_major_frame = now + sched_priv->major_frame;
556
0
        next_switch_time = now + sched_priv->schedule[0].runtime;
557
0
    }
558
0
    else
559
0
    {
560
0
        while ( (now >= next_switch_time)
561
0
                && (sched_index < sched_priv->num_schedule_entries) )
562
0
        {
563
0
            /* time to switch to the next domain in this major frame */
564
0
            sched_index++;
565
0
            next_switch_time += sched_priv->schedule[sched_index].runtime;
566
0
        }
567
0
    }
568
0
569
0
    /*
570
0
     * If we exhausted the domains in the schedule and still have time left
571
0
     * in the major frame then switch next at the next major frame.
572
0
     */
573
0
    if ( sched_index >= sched_priv->num_schedule_entries )
574
0
        next_switch_time = sched_priv->next_major_frame;
575
0
576
0
    /*
577
0
     * If there are more domains to run in the current major frame, set
578
0
     * new_task equal to the address of next domain's VCPU structure.
579
0
     * Otherwise, set new_task equal to the address of the idle task's VCPU
580
0
     * structure.
581
0
     */
582
0
    new_task = (sched_index < sched_priv->num_schedule_entries)
583
0
        ? sched_priv->schedule[sched_index].vc
584
0
        : IDLETASK(cpu);
585
0
586
0
    /* Check to see if the new task can be run (awake & runnable). */
587
0
    if ( !((new_task != NULL)
588
0
           && (AVCPU(new_task) != NULL)
589
0
           && AVCPU(new_task)->awake
590
0
           && vcpu_runnable(new_task)) )
591
0
        new_task = IDLETASK(cpu);
592
0
    BUG_ON(new_task == NULL);
593
0
594
0
    /*
595
0
     * Check to make sure we did not miss a major frame.
596
0
     * This is a good test for robust partitioning.
597
0
     */
598
0
    BUG_ON(now >= sched_priv->next_major_frame);
599
0
600
0
    spin_unlock_irqrestore(&sched_priv->lock, flags);
601
0
602
0
    /* Tasklet work (which runs in idle VCPU context) overrides all else. */
603
0
    if ( tasklet_work_scheduled )
604
0
        new_task = IDLETASK(cpu);
605
0
606
0
    /* Running this task would result in a migration */
607
0
    if ( !is_idle_vcpu(new_task)
608
0
         && (new_task->processor != cpu) )
609
0
        new_task = IDLETASK(cpu);
610
0
611
0
    /*
612
0
     * Return the amount of time the next domain has to run and the address
613
0
     * of the selected task's VCPU structure.
614
0
     */
615
0
    ret.time = next_switch_time - now;
616
0
    ret.task = new_task;
617
0
    ret.migrated = 0;
618
0
619
0
    BUG_ON(ret.time <= 0);
620
0
621
0
    return ret;
622
0
}
623
624
/**
625
 * Xen scheduler callback function to select a CPU for the VCPU to run on
626
 *
627
 * @param ops       Pointer to this instance of the scheduler structure
628
 * @param v         Pointer to the VCPU structure for the current domain
629
 *
630
 * @return          Number of selected physical CPU
631
 */
632
static int
633
a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc)
634
0
{
635
0
    cpumask_t *online;
636
0
    unsigned int cpu;
637
0
638
0
    /* 
639
0
     * If present, prefer vc's current processor, else
640
0
     * just find the first valid vcpu .
641
0
     */
642
0
    online = cpupool_domain_cpumask(vc->domain);
643
0
644
0
    cpu = cpumask_first(online);
645
0
646
0
    if ( cpumask_test_cpu(vc->processor, online)
647
0
         || (cpu >= nr_cpu_ids) )
648
0
        cpu = vc->processor;
649
0
650
0
    return cpu;
651
0
}
652
653
/**
654
 * Xen scheduler callback to change the scheduler of a cpu
655
 *
656
 * @param new_ops   Pointer to this instance of the scheduler structure
657
 * @param cpu       The cpu that is changing scheduler
658
 * @param pdata     scheduler specific PCPU data (we don't have any)
659
 * @param vdata     scheduler specific VCPU data of the idle vcpu
660
 */
661
static void
662
a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
663
                  void *pdata, void *vdata)
664
0
{
665
0
    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
666
0
    arinc653_vcpu_t *svc = vdata;
667
0
668
0
    ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
669
0
670
0
    idle_vcpu[cpu]->sched_priv = vdata;
671
0
672
0
    per_cpu(scheduler, cpu) = new_ops;
673
0
    per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
674
0
675
0
    /*
676
0
     * (Re?)route the lock to its default location. We actually do not use
677
0
     * it, but if we leave it pointing to where it does now (i.e., the
678
0
     * runqueue lock for this PCPU in the default scheduler), we'd be
679
0
     * causing unnecessary contention on that lock (in cases where it is
680
0
     * shared among multiple PCPUs, like in Credit2 and RTDS).
681
0
     */
682
0
    sd->schedule_lock = &sd->_lock;
683
0
}
684
685
/**
686
 * Xen scheduler callback function to perform a global (not domain-specific)
687
 * adjustment. It is used by the ARINC 653 scheduler to put in place a new
688
 * ARINC 653 schedule or to retrieve the schedule currently in place.
689
 *
690
 * @param ops       Pointer to this instance of the scheduler structure
691
 * @param sc        Pointer to the scheduler operation specified by Domain 0
692
 */
693
static int
694
a653sched_adjust_global(const struct scheduler *ops,
695
                        struct xen_sysctl_scheduler_op *sc)
696
0
{
697
0
    struct xen_sysctl_arinc653_schedule local_sched;
698
0
    int rc = -EINVAL;
699
0
700
0
    switch ( sc->cmd )
701
0
    {
702
0
    case XEN_SYSCTL_SCHEDOP_putinfo:
703
0
        if ( copy_from_guest(&local_sched, sc->u.sched_arinc653.schedule, 1) )
704
0
        {
705
0
            rc = -EFAULT;
706
0
            break;
707
0
        }
708
0
709
0
        rc = arinc653_sched_set(ops, &local_sched);
710
0
        break;
711
0
    case XEN_SYSCTL_SCHEDOP_getinfo:
712
0
        memset(&local_sched, -1, sizeof(local_sched));
713
0
        rc = arinc653_sched_get(ops, &local_sched);
714
0
        if ( rc )
715
0
            break;
716
0
717
0
        if ( copy_to_guest(sc->u.sched_arinc653.schedule, &local_sched, 1) )
718
0
            rc = -EFAULT;
719
0
        break;
720
0
    }
721
0
722
0
    return rc;
723
0
}
724
725
/**
726
 * This structure defines our scheduler for Xen.
727
 * The entries tell Xen where to find our scheduler-specific
728
 * callback functions.
729
 * The symbol must be visible to the rest of Xen at link time.
730
 */
731
static const struct scheduler sched_arinc653_def = {
732
    .name           = "ARINC 653 Scheduler",
733
    .opt_name       = "arinc653",
734
    .sched_id       = XEN_SCHEDULER_ARINC653,
735
    .sched_data     = NULL,
736
737
    .init           = a653sched_init,
738
    .deinit         = a653sched_deinit,
739
740
    .free_vdata     = a653sched_free_vdata,
741
    .alloc_vdata    = a653sched_alloc_vdata,
742
743
    .free_domdata   = a653sched_free_domdata,
744
    .alloc_domdata  = a653sched_alloc_domdata,
745
746
    .init_domain    = NULL,
747
    .destroy_domain = NULL,
748
749
    .insert_vcpu    = NULL,
750
    .remove_vcpu    = NULL,
751
752
    .sleep          = a653sched_vcpu_sleep,
753
    .wake           = a653sched_vcpu_wake,
754
    .yield          = NULL,
755
    .context_saved  = NULL,
756
757
    .do_schedule    = a653sched_do_schedule,
758
759
    .pick_cpu       = a653sched_pick_cpu,
760
761
    .switch_sched   = a653_switch_sched,
762
763
    .adjust         = NULL,
764
    .adjust_global  = a653sched_adjust_global,
765
766
    .dump_settings  = NULL,
767
    .dump_cpu_state = NULL,
768
769
    .tick_suspend   = NULL,
770
    .tick_resume    = NULL,
771
};
772
773
REGISTER_SCHEDULER(sched_arinc653_def);
774
775
/*
776
 * Local variables:
777
 * mode: C
778
 * c-file-style: "BSD"
779
 * c-basic-offset: 4
780
 * tab-width: 4
781
 * indent-tabs-mode: nil
782
 * End:
783
 */