Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/drivers/passthrough/pci.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (C) 2008,  Netronome Systems, Inc.
3
 *                
4
 * This program is free software; you can redistribute it and/or modify it
5
 * under the terms and conditions of the GNU General Public License,
6
 * version 2, as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11
 * more details.
12
 *
13
 * You should have received a copy of the GNU General Public License along with
14
 * this program; If not, see <http://www.gnu.org/licenses/>.
15
 */
16
17
#include <xen/sched.h>
18
#include <xen/pci.h>
19
#include <xen/pci_regs.h>
20
#include <xen/list.h>
21
#include <xen/prefetch.h>
22
#include <xen/iommu.h>
23
#include <xen/irq.h>
24
#include <xen/vm_event.h>
25
#include <asm/hvm/irq.h>
26
#include <xen/delay.h>
27
#include <xen/keyhandler.h>
28
#include <xen/event.h>
29
#include <xen/guest_access.h>
30
#include <xen/paging.h>
31
#include <xen/radix-tree.h>
32
#include <xen/softirq.h>
33
#include <xen/tasklet.h>
34
#include <xen/vpci.h>
35
#include <xsm/xsm.h>
36
#include <asm/msi.h>
37
#include "ats.h"
38
39
struct pci_seg {
40
    struct list_head alldevs_list;
41
    u16 nr;
42
    unsigned long *ro_map;
43
    /* bus2bridge_lock protects bus2bridge array */
44
    spinlock_t bus2bridge_lock;
45
0
#define MAX_BUSES 256
46
    struct {
47
        u8 map;
48
        u8 bus;
49
        u8 devfn;
50
    } bus2bridge[MAX_BUSES];
51
};
52
53
static spinlock_t _pcidevs_lock = SPIN_LOCK_UNLOCKED;
54
55
void pcidevs_lock(void)
56
161
{
57
161
    spin_lock_recursive(&_pcidevs_lock);
58
161
}
59
60
void pcidevs_unlock(void)
61
161
{
62
161
    spin_unlock_recursive(&_pcidevs_lock);
63
161
}
64
65
bool_t pcidevs_locked(void)
66
65.8k
{
67
65.8k
    return !!spin_is_locked(&_pcidevs_lock);
68
65.8k
}
69
70
bool_t pcidevs_trylock(void)
71
0
{
72
0
    return !!spin_trylock_recursive(&_pcidevs_lock);
73
0
}
74
75
static struct radix_tree_root pci_segments;
76
77
static inline struct pci_seg *get_pseg(u16 seg)
78
132k
{
79
132k
    return radix_tree_lookup(&pci_segments, seg);
80
132k
}
81
82
bool_t pci_known_segment(u16 seg)
83
1
{
84
1
    return get_pseg(seg) != NULL;
85
1
}
86
87
static struct pci_seg *alloc_pseg(u16 seg)
88
2
{
89
2
    struct pci_seg *pseg = get_pseg(seg);
90
2
91
2
    if ( pseg )
92
1
        return pseg;
93
2
94
1
    pseg = xzalloc(struct pci_seg);
95
1
    if ( !pseg )
96
0
        return NULL;
97
1
98
1
    pseg->nr = seg;
99
1
    INIT_LIST_HEAD(&pseg->alldevs_list);
100
1
    spin_lock_init(&pseg->bus2bridge_lock);
101
1
102
1
    if ( radix_tree_insert(&pci_segments, seg, pseg) )
103
0
    {
104
0
        xfree(pseg);
105
0
        pseg = NULL;
106
0
    }
107
1
108
1
    return pseg;
109
1
}
110
111
static int pci_segments_iterate(
112
    int (*handler)(struct pci_seg *, void *), void *arg)
113
2
{
114
2
    u16 seg = 0;
115
2
    int rc = 0;
116
2
117
4
    do {
118
4
        struct pci_seg *pseg;
119
4
120
4
        if ( !radix_tree_gang_lookup(&pci_segments, (void **)&pseg, seg, 1) )
121
2
            break;
122
2
        rc = handler(pseg, arg);
123
2
        seg = pseg->nr + 1;
124
2
    } while (!rc && seg);
125
2
126
2
    return rc;
127
2
}
128
129
void __init pt_pci_init(void)
130
1
{
131
1
    radix_tree_init(&pci_segments);
132
1
    if ( !alloc_pseg(0) )
133
0
        panic("Could not initialize PCI segment 0");
134
1
}
135
136
int __init pci_add_segment(u16 seg)
137
1
{
138
1
    return alloc_pseg(seg) ? 0 : -ENOMEM;
139
1
}
140
141
const unsigned long *pci_get_ro_map(u16 seg)
142
0
{
143
0
    struct pci_seg *pseg = get_pseg(seg);
144
0
145
0
    return pseg ? pseg->ro_map : NULL;
146
0
}
147
148
static struct phantom_dev {
149
    u16 seg;
150
    u8 bus, slot, stride;
151
} phantom_devs[8];
152
static unsigned int nr_phantom_devs;
153
154
static int __init parse_phantom_dev(const char *str)
155
0
{
156
0
    const char *s;
157
0
    unsigned int seg, bus, slot;
158
0
    struct phantom_dev phantom;
159
0
160
0
    if ( !*str )
161
0
        return -EINVAL;
162
0
    if ( nr_phantom_devs >= ARRAY_SIZE(phantom_devs) )
163
0
        return -E2BIG;
164
0
165
0
    s = parse_pci(str, &seg, &bus, &slot, NULL);
166
0
    if ( !s || *s != ',' )
167
0
        return -EINVAL;
168
0
169
0
    phantom.seg = seg;
170
0
    phantom.bus = bus;
171
0
    phantom.slot = slot;
172
0
173
0
    switch ( phantom.stride = simple_strtol(s + 1, &s, 0) )
174
0
    {
175
0
    case 1: case 2: case 4:
176
0
        if ( *s )
177
0
    default:
178
0
            return -EINVAL;
179
0
    }
180
0
181
0
    phantom_devs[nr_phantom_devs++] = phantom;
182
0
183
0
    return 0;
184
0
}
185
custom_param("pci-phantom", parse_phantom_dev);
186
187
static u16 __read_mostly command_mask;
188
static u16 __read_mostly bridge_ctl_mask;
189
190
/*
191
 * The 'pci' parameter controls certain PCI device aspects.
192
 * Optional comma separated value may contain:
193
 *
194
 *   serr                       don't suppress system errors (default)
195
 *   no-serr                    suppress system errors
196
 *   perr                       don't suppress parity errors (default)
197
 *   no-perr                    suppress parity errors
198
 */
199
static int __init parse_pci_param(const char *s)
200
0
{
201
0
    const char *ss;
202
0
    int rc = 0;
203
0
204
0
    do {
205
0
        bool_t on = !!strncmp(s, "no-", 3);
206
0
        u16 cmd_mask = 0, brctl_mask = 0;
207
0
208
0
        if ( !on )
209
0
            s += 3;
210
0
211
0
        ss = strchr(s, ',');
212
0
        if ( !ss )
213
0
            ss = strchr(s, '\0');
214
0
215
0
        if ( !strncmp(s, "serr", ss - s) )
216
0
        {
217
0
            cmd_mask = PCI_COMMAND_SERR;
218
0
            brctl_mask = PCI_BRIDGE_CTL_SERR | PCI_BRIDGE_CTL_DTMR_SERR;
219
0
        }
220
0
        else if ( !strncmp(s, "perr", ss - s) )
221
0
        {
222
0
            cmd_mask = PCI_COMMAND_PARITY;
223
0
            brctl_mask = PCI_BRIDGE_CTL_PARITY;
224
0
        }
225
0
        else
226
0
            rc = -EINVAL;
227
0
228
0
        if ( on )
229
0
        {
230
0
            command_mask &= ~cmd_mask;
231
0
            bridge_ctl_mask &= ~brctl_mask;
232
0
        }
233
0
        else
234
0
        {
235
0
            command_mask |= cmd_mask;
236
0
            bridge_ctl_mask |= brctl_mask;
237
0
        }
238
0
239
0
        s = ss + 1;
240
0
    } while ( *ss );
241
0
242
0
    return rc;
243
0
}
244
custom_param("pci", parse_pci_param);
245
246
static void check_pdev(const struct pci_dev *pdev)
247
68
{
248
68
#define PCI_STATUS_CHECK \
249
0
    (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | \
250
0
     PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | \
251
0
     PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY)
252
68
    u16 seg = pdev->seg;
253
68
    u8 bus = pdev->bus;
254
68
    u8 dev = PCI_SLOT(pdev->devfn);
255
68
    u8 func = PCI_FUNC(pdev->devfn);
256
68
    u16 val;
257
68
258
68
    if ( command_mask )
259
0
    {
260
0
        val = pci_conf_read16(seg, bus, dev, func, PCI_COMMAND);
261
0
        if ( val & command_mask )
262
0
            pci_conf_write16(seg, bus, dev, func, PCI_COMMAND,
263
0
                             val & ~command_mask);
264
0
        val = pci_conf_read16(seg, bus, dev, func, PCI_STATUS);
265
0
        if ( val & PCI_STATUS_CHECK )
266
0
        {
267
0
            printk(XENLOG_INFO "%04x:%02x:%02x.%u status %04x -> %04x\n",
268
0
                   seg, bus, dev, func, val, val & ~PCI_STATUS_CHECK);
269
0
            pci_conf_write16(seg, bus, dev, func, PCI_STATUS,
270
0
                             val & PCI_STATUS_CHECK);
271
0
        }
272
0
    }
273
68
274
68
    switch ( pci_conf_read8(seg, bus, dev, func, PCI_HEADER_TYPE) & 0x7f )
275
68
    {
276
10
    case PCI_HEADER_TYPE_BRIDGE:
277
10
        if ( !bridge_ctl_mask )
278
10
            break;
279
0
        val = pci_conf_read16(seg, bus, dev, func, PCI_BRIDGE_CONTROL);
280
0
        if ( val & bridge_ctl_mask )
281
0
            pci_conf_write16(seg, bus, dev, func, PCI_BRIDGE_CONTROL,
282
0
                             val & ~bridge_ctl_mask);
283
0
        val = pci_conf_read16(seg, bus, dev, func, PCI_SEC_STATUS);
284
0
        if ( val & PCI_STATUS_CHECK )
285
0
        {
286
0
            printk(XENLOG_INFO
287
0
                   "%04x:%02x:%02x.%u secondary status %04x -> %04x\n",
288
0
                   seg, bus, dev, func, val, val & ~PCI_STATUS_CHECK);
289
0
            pci_conf_write16(seg, bus, dev, func, PCI_SEC_STATUS,
290
0
                             val & PCI_STATUS_CHECK);
291
0
        }
292
0
        break;
293
10
294
0
    case PCI_HEADER_TYPE_CARDBUS:
295
0
        /* TODO */
296
0
        break;
297
68
    }
298
68
#undef PCI_STATUS_CHECK
299
68
}
300
301
static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn)
302
68
{
303
68
    struct pci_dev *pdev;
304
68
305
68
    list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
306
2.27k
        if ( pdev->bus == bus && pdev->devfn == devfn )
307
0
            return pdev;
308
68
309
68
    pdev = xzalloc(struct pci_dev);
310
68
    if ( !pdev )
311
0
        return NULL;
312
68
313
68
    *(u16*) &pdev->seg = pseg->nr;
314
68
    *((u8*) &pdev->bus) = bus;
315
68
    *((u8*) &pdev->devfn) = devfn;
316
68
    pdev->domain = NULL;
317
68
    INIT_LIST_HEAD(&pdev->msi_list);
318
68
319
68
    if ( pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
320
68
                             PCI_CAP_ID_MSIX) )
321
5
    {
322
5
        struct arch_msix *msix = xzalloc(struct arch_msix);
323
5
324
5
        if ( !msix )
325
0
        {
326
0
            xfree(pdev);
327
0
            return NULL;
328
0
        }
329
5
        spin_lock_init(&msix->table_lock);
330
5
        pdev->msix = msix;
331
5
    }
332
68
333
68
    list_add(&pdev->alldevs_list, &pseg->alldevs_list);
334
68
335
68
    /* update bus2bridge */
336
68
    switch ( pdev->type = pdev_type(pseg->nr, bus, devfn) )
337
68
    {
338
0
        int pos;
339
0
        u16 cap;
340
0
        u8 sec_bus, sub_bus;
341
0
342
1
        case DEV_TYPE_PCIe2PCI_BRIDGE:
343
1
        case DEV_TYPE_LEGACY_PCI_BRIDGE:
344
1
            sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn),
345
1
                                     PCI_FUNC(devfn), PCI_SECONDARY_BUS);
346
1
            sub_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn),
347
1
                                     PCI_FUNC(devfn), PCI_SUBORDINATE_BUS);
348
1
349
1
            spin_lock(&pseg->bus2bridge_lock);
350
2
            for ( ; sec_bus <= sub_bus; sec_bus++ )
351
1
            {
352
1
                pseg->bus2bridge[sec_bus].map = 1;
353
1
                pseg->bus2bridge[sec_bus].bus = bus;
354
1
                pseg->bus2bridge[sec_bus].devfn = devfn;
355
1
            }
356
1
            spin_unlock(&pseg->bus2bridge_lock);
357
1
            break;
358
1
359
25
        case DEV_TYPE_PCIe_ENDPOINT:
360
25
            pos = pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn),
361
25
                                      PCI_FUNC(devfn), PCI_CAP_ID_EXP);
362
25
            BUG_ON(!pos);
363
25
            cap = pci_conf_read16(pseg->nr, bus, PCI_SLOT(devfn),
364
25
                                  PCI_FUNC(devfn), pos + PCI_EXP_DEVCAP);
365
25
            if ( cap & PCI_EXP_DEVCAP_PHANTOM )
366
0
            {
367
0
                pdev->phantom_stride = 8 >> MASK_EXTR(cap,
368
0
                                                      PCI_EXP_DEVCAP_PHANTOM);
369
0
                if ( PCI_FUNC(devfn) >= pdev->phantom_stride )
370
0
                    pdev->phantom_stride = 0;
371
0
            }
372
25
            else
373
25
            {
374
25
                unsigned int i;
375
25
376
25
                for ( i = 0; i < nr_phantom_devs; ++i )
377
0
                    if ( phantom_devs[i].seg == pseg->nr &&
378
0
                         phantom_devs[i].bus == bus &&
379
0
                         phantom_devs[i].slot == PCI_SLOT(devfn) &&
380
0
                         phantom_devs[i].stride > PCI_FUNC(devfn) )
381
0
                    {
382
0
                        pdev->phantom_stride = phantom_devs[i].stride;
383
0
                        break;
384
0
                    }
385
25
            }
386
25
            break;
387
1
388
42
        case DEV_TYPE_PCI:
389
42
        case DEV_TYPE_PCIe_BRIDGE:
390
42
        case DEV_TYPE_PCI_HOST_BRIDGE:
391
42
            break;
392
42
393
0
        default:
394
0
            printk(XENLOG_WARNING "%04x:%02x:%02x.%u: unknown type %d\n",
395
0
                   pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pdev->type);
396
0
            break;
397
68
    }
398
68
399
68
    check_pdev(pdev);
400
68
401
68
    return pdev;
402
68
}
403
404
static void free_pdev(struct pci_seg *pseg, struct pci_dev *pdev)
405
0
{
406
0
    /* update bus2bridge */
407
0
    switch ( pdev->type )
408
0
    {
409
0
        u8 dev, func, sec_bus, sub_bus;
410
0
411
0
        case DEV_TYPE_PCIe2PCI_BRIDGE:
412
0
        case DEV_TYPE_LEGACY_PCI_BRIDGE:
413
0
            dev = PCI_SLOT(pdev->devfn);
414
0
            func = PCI_FUNC(pdev->devfn);
415
0
            sec_bus = pci_conf_read8(pseg->nr, pdev->bus, dev, func,
416
0
                                     PCI_SECONDARY_BUS);
417
0
            sub_bus = pci_conf_read8(pseg->nr, pdev->bus, dev, func,
418
0
                                     PCI_SUBORDINATE_BUS);
419
0
420
0
            spin_lock(&pseg->bus2bridge_lock);
421
0
            for ( ; sec_bus <= sub_bus; sec_bus++ )
422
0
                pseg->bus2bridge[sec_bus] = pseg->bus2bridge[pdev->bus];
423
0
            spin_unlock(&pseg->bus2bridge_lock);
424
0
            break;
425
0
426
0
        default:
427
0
            break;
428
0
    }
429
0
430
0
    list_del(&pdev->alldevs_list);
431
0
    xfree(pdev->msix);
432
0
    xfree(pdev);
433
0
}
434
435
static void _pci_hide_device(struct pci_dev *pdev)
436
0
{
437
0
    if ( pdev->domain )
438
0
        return;
439
0
    pdev->domain = dom_xen;
440
0
    list_add(&pdev->domain_list, &dom_xen->arch.pdev_list);
441
0
}
442
443
int __init pci_hide_device(int bus, int devfn)
444
0
{
445
0
    struct pci_dev *pdev;
446
0
    int rc = -ENOMEM;
447
0
448
0
    pcidevs_lock();
449
0
    pdev = alloc_pdev(get_pseg(0), bus, devfn);
450
0
    if ( pdev )
451
0
    {
452
0
        _pci_hide_device(pdev);
453
0
        rc = 0;
454
0
    }
455
0
    pcidevs_unlock();
456
0
457
0
    return rc;
458
0
}
459
460
int __init pci_ro_device(int seg, int bus, int devfn)
461
0
{
462
0
    struct pci_seg *pseg = alloc_pseg(seg);
463
0
    struct pci_dev *pdev;
464
0
465
0
    if ( !pseg )
466
0
        return -ENOMEM;
467
0
    pdev = alloc_pdev(pseg, bus, devfn);
468
0
    if ( !pdev )
469
0
        return -ENOMEM;
470
0
471
0
    if ( !pseg->ro_map )
472
0
    {
473
0
        size_t sz = BITS_TO_LONGS(PCI_BDF(-1, -1, -1) + 1) * sizeof(long);
474
0
475
0
        pseg->ro_map = alloc_xenheap_pages(get_order_from_bytes(sz), 0);
476
0
        if ( !pseg->ro_map )
477
0
            return -ENOMEM;
478
0
        memset(pseg->ro_map, 0, sz);
479
0
    }
480
0
481
0
    __set_bit(PCI_BDF2(bus, devfn), pseg->ro_map);
482
0
    _pci_hide_device(pdev);
483
0
484
0
    return 0;
485
0
}
486
487
struct pci_dev *pci_get_pdev(int seg, int bus, int devfn)
488
65.5k
{
489
65.5k
    struct pci_seg *pseg = get_pseg(seg);
490
65.5k
    struct pci_dev *pdev = NULL;
491
65.5k
492
65.5k
    ASSERT(pcidevs_locked());
493
65.5k
    ASSERT(seg != -1 || bus == -1);
494
65.5k
    ASSERT(bus != -1 || devfn == -1);
495
65.5k
496
65.5k
    if ( !pseg )
497
0
    {
498
0
        if ( seg == -1 )
499
0
            radix_tree_gang_lookup(&pci_segments, (void **)&pseg, 0, 1);
500
0
        if ( !pseg )
501
0
            return NULL;
502
0
    }
503
65.5k
504
65.5k
    do {
505
65.5k
        list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
506
4.45M
            if ( (pdev->bus == bus || bus == -1) &&
507
16.6k
                 (pdev->devfn == devfn || devfn == -1) )
508
110
                return pdev;
509
65.4k
    } while ( radix_tree_gang_lookup(&pci_segments, (void **)&pseg,
510
65.4k
                                     pseg->nr + 1, 1) );
511
65.5k
512
65.4k
    return NULL;
513
65.5k
}
514
515
struct pci_dev *pci_get_real_pdev(int seg, int bus, int devfn)
516
0
{
517
0
    struct pci_dev *pdev;
518
0
    int stride;
519
0
520
0
    if ( seg < 0 || bus < 0 || devfn < 0 )
521
0
        return NULL;
522
0
523
0
    for ( pdev = pci_get_pdev(seg, bus, devfn), stride = 4;
524
0
          !pdev && stride; stride >>= 1 )
525
0
    {
526
0
        if ( !(devfn & (8 - stride)) )
527
0
            continue;
528
0
        pdev = pci_get_pdev(seg, bus, devfn & ~(8 - stride));
529
0
        if ( pdev && stride != pdev->phantom_stride )
530
0
            pdev = NULL;
531
0
    }
532
0
533
0
    return pdev;
534
0
}
535
536
struct pci_dev *pci_get_pdev_by_domain(const struct domain *d, int seg,
537
                                       int bus, int devfn)
538
66.4k
{
539
66.4k
    struct pci_seg *pseg = get_pseg(seg);
540
66.4k
    struct pci_dev *pdev = NULL;
541
66.4k
542
66.4k
    ASSERT(seg != -1 || bus == -1);
543
66.4k
    ASSERT(bus != -1 || devfn == -1);
544
66.4k
545
66.4k
    if ( !pseg )
546
8.08k
    {
547
8.08k
        if ( seg == -1 )
548
8.08k
            radix_tree_gang_lookup(&pci_segments, (void **)&pseg, 0, 1);
549
8.08k
        if ( !pseg )
550
0
            return NULL;
551
8.08k
    }
552
66.4k
553
66.4k
    do {
554
66.4k
        list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
555
3.70M
            if ( (pdev->bus == bus || bus == -1) &&
556
140k
                 (pdev->devfn == devfn || devfn == -1) &&
557
23.6k
                 (pdev->domain == d) )
558
17.0k
                return pdev;
559
49.4k
    } while ( radix_tree_gang_lookup(&pci_segments, (void **)&pseg,
560
49.4k
                                     pseg->nr + 1, 1) );
561
66.4k
562
49.4k
    return NULL;
563
66.4k
}
564
565
/**
566
 * pci_enable_acs - enable ACS if hardware support it
567
 * @dev: the PCI device
568
 */
569
static void pci_enable_acs(struct pci_dev *pdev)
570
0
{
571
0
    int pos;
572
0
    u16 cap, ctrl, seg = pdev->seg;
573
0
    u8 bus = pdev->bus;
574
0
    u8 dev = PCI_SLOT(pdev->devfn);
575
0
    u8 func = PCI_FUNC(pdev->devfn);
576
0
577
0
    if ( !iommu_enabled )
578
0
        return;
579
0
580
0
    pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_ACS);
581
0
    if (!pos)
582
0
        return;
583
0
584
0
    cap = pci_conf_read16(seg, bus, dev, func, pos + PCI_ACS_CAP);
585
0
    ctrl = pci_conf_read16(seg, bus, dev, func, pos + PCI_ACS_CTRL);
586
0
587
0
    /* Source Validation */
588
0
    ctrl |= (cap & PCI_ACS_SV);
589
0
590
0
    /* P2P Request Redirect */
591
0
    ctrl |= (cap & PCI_ACS_RR);
592
0
593
0
    /* P2P Completion Redirect */
594
0
    ctrl |= (cap & PCI_ACS_CR);
595
0
596
0
    /* Upstream Forwarding */
597
0
    ctrl |= (cap & PCI_ACS_UF);
598
0
599
0
    pci_conf_write16(seg, bus, dev, func, pos + PCI_ACS_CTRL, ctrl);
600
0
}
601
602
static int iommu_add_device(struct pci_dev *pdev);
603
static int iommu_enable_device(struct pci_dev *pdev);
604
static int iommu_remove_device(struct pci_dev *pdev);
605
606
int pci_size_mem_bar(pci_sbdf_t sbdf, unsigned int pos, uint64_t *paddr,
607
                     uint64_t *psize, unsigned int flags)
608
411
{
609
411
    uint32_t hi = 0, bar = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev,
610
411
                                           sbdf.func, pos);
611
411
    uint64_t size;
612
411
    bool is64bits = !(flags & PCI_BAR_ROM) &&
613
343
        (bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64;
614
411
    uint32_t mask = (flags & PCI_BAR_ROM) ? (uint32_t)PCI_ROM_ADDRESS_MASK
615
343
                                          : (uint32_t)PCI_BASE_ADDRESS_MEM_MASK;
616
411
617
411
    ASSERT(!((flags & PCI_BAR_VF) && (flags & PCI_BAR_ROM)));
618
411
    ASSERT((flags & PCI_BAR_ROM) ||
619
411
           (bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY);
620
411
    pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos, ~0);
621
411
    if ( is64bits )
622
7
    {
623
7
        if ( flags & PCI_BAR_LAST )
624
0
        {
625
0
            printk(XENLOG_WARNING
626
0
                   "%sdevice %04x:%02x:%02x.%u with 64-bit %sBAR in last slot\n",
627
0
                   (flags & PCI_BAR_VF) ? "SR-IOV " : "", sbdf.seg, sbdf.bus,
628
0
                   sbdf.dev, sbdf.func, (flags & PCI_BAR_VF) ? "vf " : "");
629
0
            return -EINVAL;
630
0
        }
631
7
        hi = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos + 4);
632
7
        pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos + 4, ~0);
633
7
    }
634
411
    size = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func,
635
411
                           pos) & mask;
636
411
    if ( is64bits )
637
7
    {
638
7
        size |= (uint64_t)pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev,
639
7
                                          sbdf.func, pos + 4) << 32;
640
7
        pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos + 4, hi);
641
7
    }
642
404
    else if ( size )
643
20
        size |= (uint64_t)~0 << 32;
644
411
    pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos, bar);
645
411
    size = -size;
646
411
647
411
    if ( paddr )
648
411
        *paddr = (bar & mask) | ((uint64_t)hi << 32);
649
411
    *psize = size;
650
411
651
404
    return is64bits ? 2 : 1;
652
411
}
653
654
int pci_add_device(u16 seg, u8 bus, u8 devfn,
655
                   const struct pci_dev_info *info, nodeid_t node)
656
0
{
657
0
    struct pci_seg *pseg;
658
0
    struct pci_dev *pdev;
659
0
    unsigned int slot = PCI_SLOT(devfn), func = PCI_FUNC(devfn);
660
0
    const char *pdev_type;
661
0
    int ret;
662
0
    bool pf_is_extfn = false;
663
0
664
0
    if ( !info )
665
0
        pdev_type = "device";
666
0
    else if ( info->is_virtfn )
667
0
    {
668
0
        pcidevs_lock();
669
0
        pdev = pci_get_pdev(seg, info->physfn.bus, info->physfn.devfn);
670
0
        if ( pdev )
671
0
            pf_is_extfn = pdev->info.is_extfn;
672
0
        pcidevs_unlock();
673
0
        if ( !pdev )
674
0
            pci_add_device(seg, info->physfn.bus, info->physfn.devfn,
675
0
                           NULL, node);
676
0
        pdev_type = "virtual function";
677
0
    }
678
0
    else if ( info->is_extfn )
679
0
        pdev_type = "extended function";
680
0
    else
681
0
    {
682
0
        info = NULL;
683
0
        pdev_type = "device";
684
0
    }
685
0
686
0
    ret = xsm_resource_plug_pci(XSM_PRIV, (seg << 16) | (bus << 8) | devfn);
687
0
    if ( ret )
688
0
        return ret;
689
0
690
0
    ret = -ENOMEM;
691
0
692
0
    pcidevs_lock();
693
0
    pseg = alloc_pseg(seg);
694
0
    if ( !pseg )
695
0
        goto out;
696
0
    pdev = alloc_pdev(pseg, bus, devfn);
697
0
    if ( !pdev )
698
0
        goto out;
699
0
700
0
    pdev->node = node;
701
0
702
0
    if ( info )
703
0
    {
704
0
        pdev->info = *info;
705
0
        /*
706
0
         * VF's 'is_extfn' field is used to indicate whether its PF is an
707
0
         * extended function.
708
0
         */
709
0
        if ( pdev->info.is_virtfn )
710
0
            pdev->info.is_extfn = pf_is_extfn;
711
0
    }
712
0
    else if ( !pdev->vf_rlen[0] )
713
0
    {
714
0
        unsigned int pos = pci_find_ext_capability(seg, bus, devfn,
715
0
                                                   PCI_EXT_CAP_ID_SRIOV);
716
0
        u16 ctrl = pci_conf_read16(seg, bus, slot, func, pos + PCI_SRIOV_CTRL);
717
0
718
0
        if ( !pos )
719
0
            /* Nothing */;
720
0
        else if ( !(ctrl & (PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE)) )
721
0
        {
722
0
            unsigned int i;
723
0
724
0
            BUILD_BUG_ON(ARRAY_SIZE(pdev->vf_rlen) != PCI_SRIOV_NUM_BARS);
725
0
            for ( i = 0; i < PCI_SRIOV_NUM_BARS; )
726
0
            {
727
0
                unsigned int idx = pos + PCI_SRIOV_BAR + i * 4;
728
0
                u32 bar = pci_conf_read32(seg, bus, slot, func, idx);
729
0
                pci_sbdf_t sbdf = {
730
0
                    .seg = seg,
731
0
                    .bus = bus,
732
0
                    .dev = slot,
733
0
                    .func = func,
734
0
                };
735
0
736
0
                if ( (bar & PCI_BASE_ADDRESS_SPACE) ==
737
0
                     PCI_BASE_ADDRESS_SPACE_IO )
738
0
                {
739
0
                    printk(XENLOG_WARNING
740
0
                           "SR-IOV device %04x:%02x:%02x.%u with vf BAR%u"
741
0
                           " in IO space\n",
742
0
                           seg, bus, slot, func, i);
743
0
                    continue;
744
0
                }
745
0
                ret = pci_size_mem_bar(sbdf, idx, NULL, &pdev->vf_rlen[i],
746
0
                                       PCI_BAR_VF |
747
0
                                       (i == PCI_SRIOV_NUM_BARS - 1) ?
748
0
                                       PCI_BAR_LAST : 0);
749
0
                if ( ret < 0 )
750
0
                    break;
751
0
752
0
                ASSERT(ret);
753
0
                i += ret;
754
0
            }
755
0
        }
756
0
        else
757
0
            printk(XENLOG_WARNING
758
0
                   "SR-IOV device %04x:%02x:%02x.%u has its virtual"
759
0
                   " functions already enabled (%04x)\n",
760
0
                   seg, bus, slot, func, ctrl);
761
0
    }
762
0
763
0
    check_pdev(pdev);
764
0
765
0
    ret = 0;
766
0
    if ( !pdev->domain )
767
0
    {
768
0
        pdev->domain = hardware_domain;
769
0
        ret = iommu_add_device(pdev);
770
0
        if ( ret )
771
0
        {
772
0
            pdev->domain = NULL;
773
0
            goto out;
774
0
        }
775
0
776
0
        list_add(&pdev->domain_list, &hardware_domain->arch.pdev_list);
777
0
    }
778
0
    else
779
0
        iommu_enable_device(pdev);
780
0
781
0
    pci_enable_acs(pdev);
782
0
783
0
out:
784
0
    pcidevs_unlock();
785
0
    if ( !ret )
786
0
    {
787
0
        printk(XENLOG_DEBUG "PCI add %s %04x:%02x:%02x.%u\n", pdev_type,
788
0
               seg, bus, slot, func);
789
0
        while ( pdev->phantom_stride )
790
0
        {
791
0
            func += pdev->phantom_stride;
792
0
            if ( PCI_SLOT(func) )
793
0
                break;
794
0
            printk(XENLOG_DEBUG "PCI phantom %04x:%02x:%02x.%u\n",
795
0
                   seg, bus, slot, func);
796
0
        }
797
0
    }
798
0
    return ret;
799
0
}
800
801
int pci_remove_device(u16 seg, u8 bus, u8 devfn)
802
0
{
803
0
    struct pci_seg *pseg = get_pseg(seg);
804
0
    struct pci_dev *pdev;
805
0
    int ret;
806
0
807
0
    ret = xsm_resource_unplug_pci(XSM_PRIV, (seg << 16) | (bus << 8) | devfn);
808
0
    if ( ret )
809
0
        return ret;
810
0
811
0
    ret = -ENODEV;
812
0
813
0
    if ( !pseg )
814
0
        return -ENODEV;
815
0
816
0
    pcidevs_lock();
817
0
    list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
818
0
        if ( pdev->bus == bus && pdev->devfn == devfn )
819
0
        {
820
0
            ret = iommu_remove_device(pdev);
821
0
            if ( pdev->domain )
822
0
                list_del(&pdev->domain_list);
823
0
            pci_cleanup_msi(pdev);
824
0
            free_pdev(pseg, pdev);
825
0
            printk(XENLOG_DEBUG "PCI remove device %04x:%02x:%02x.%u\n",
826
0
                   seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
827
0
            break;
828
0
        }
829
0
830
0
    pcidevs_unlock();
831
0
    return ret;
832
0
}
833
834
static int pci_clean_dpci_irq(struct domain *d,
835
                              struct hvm_pirq_dpci *pirq_dpci, void *arg)
836
0
{
837
0
    struct dev_intx_gsi_link *digl, *tmp;
838
0
839
0
    pirq_guest_unbind(d, dpci_pirq(pirq_dpci));
840
0
841
0
    if ( pt_irq_need_timer(pirq_dpci->flags) )
842
0
        kill_timer(&pirq_dpci->timer);
843
0
844
0
    list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list )
845
0
    {
846
0
        list_del(&digl->list);
847
0
        xfree(digl);
848
0
    }
849
0
850
0
    return pt_pirq_softirq_active(pirq_dpci) ? -ERESTART : 0;
851
0
}
852
853
static int pci_clean_dpci_irqs(struct domain *d)
854
0
{
855
0
    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
856
0
857
0
    if ( !iommu_enabled )
858
0
        return 0;
859
0
860
0
    if ( !is_hvm_domain(d) )
861
0
        return 0;
862
0
863
0
    spin_lock(&d->event_lock);
864
0
    hvm_irq_dpci = domain_get_irq_dpci(d);
865
0
    if ( hvm_irq_dpci != NULL )
866
0
    {
867
0
        int ret = pt_pirq_iterate(d, pci_clean_dpci_irq, NULL);
868
0
869
0
        if ( ret )
870
0
        {
871
0
            spin_unlock(&d->event_lock);
872
0
            return ret;
873
0
        }
874
0
875
0
        hvm_domain_irq(d)->dpci = NULL;
876
0
        free_hvm_irq_dpci(hvm_irq_dpci);
877
0
    }
878
0
    spin_unlock(&d->event_lock);
879
0
    return 0;
880
0
}
881
882
int pci_release_devices(struct domain *d)
883
0
{
884
0
    struct pci_dev *pdev;
885
0
    u8 bus, devfn;
886
0
    int ret;
887
0
888
0
    pcidevs_lock();
889
0
    ret = pci_clean_dpci_irqs(d);
890
0
    if ( ret )
891
0
    {
892
0
        pcidevs_unlock();
893
0
        return ret;
894
0
    }
895
0
    while ( (pdev = pci_get_pdev_by_domain(d, -1, -1, -1)) )
896
0
    {
897
0
        bus = pdev->bus;
898
0
        devfn = pdev->devfn;
899
0
        if ( deassign_device(d, pdev->seg, bus, devfn) )
900
0
            printk("domain %d: deassign device (%04x:%02x:%02x.%u) failed!\n",
901
0
                   d->domain_id, pdev->seg, bus,
902
0
                   PCI_SLOT(devfn), PCI_FUNC(devfn));
903
0
    }
904
0
    pcidevs_unlock();
905
0
906
0
    return 0;
907
0
}
908
909
1
#define PCI_CLASS_BRIDGE_HOST    0x0600
910
10
#define PCI_CLASS_BRIDGE_PCI     0x0604
911
912
enum pdev_type pdev_type(u16 seg, u8 bus, u8 devfn)
913
68
{
914
68
    u16 class_device, creg;
915
68
    u8 d = PCI_SLOT(devfn), f = PCI_FUNC(devfn);
916
68
    int pos = pci_find_cap_offset(seg, bus, d, f, PCI_CAP_ID_EXP);
917
68
918
68
    class_device = pci_conf_read16(seg, bus, d, f, PCI_CLASS_DEVICE);
919
68
    switch ( class_device )
920
68
    {
921
10
    case PCI_CLASS_BRIDGE_PCI:
922
10
        if ( !pos )
923
1
            return DEV_TYPE_LEGACY_PCI_BRIDGE;
924
9
        creg = pci_conf_read16(seg, bus, d, f, pos + PCI_EXP_FLAGS);
925
9
        switch ( (creg & PCI_EXP_FLAGS_TYPE) >> 4 )
926
9
        {
927
0
        case PCI_EXP_TYPE_PCI_BRIDGE:
928
0
            return DEV_TYPE_PCIe2PCI_BRIDGE;
929
0
        case PCI_EXP_TYPE_PCIE_BRIDGE:
930
0
            return DEV_TYPE_PCI2PCIe_BRIDGE;
931
9
        }
932
9
        return DEV_TYPE_PCIe_BRIDGE;
933
1
    case PCI_CLASS_BRIDGE_HOST:
934
1
        return DEV_TYPE_PCI_HOST_BRIDGE;
935
9
936
0
    case 0x0000: case 0xffff:
937
0
        return DEV_TYPE_PCI_UNKNOWN;
938
68
    }
939
68
940
57
    return pos ? DEV_TYPE_PCIe_ENDPOINT : DEV_TYPE_PCI;
941
68
}
942
943
/*
944
 * find the upstream PCIe-to-PCI/PCIX bridge or PCI legacy bridge
945
 * return 0: the device is integrated PCI device or PCIe
946
 * return 1: find PCIe-to-PCI/PCIX bridge or PCI legacy bridge
947
 * return -1: fail
948
 */
949
int find_upstream_bridge(u16 seg, u8 *bus, u8 *devfn, u8 *secbus)
950
38
{
951
38
    struct pci_seg *pseg = get_pseg(seg);
952
38
    int ret = 0;
953
38
    int cnt = 0;
954
38
955
38
    if ( *bus == 0 )
956
15
        return 0;
957
38
958
23
    if ( !pseg )
959
0
        return -1;
960
23
961
23
    if ( !pseg->bus2bridge[*bus].map )
962
23
        return 0;
963
23
964
0
    ret = 1;
965
0
    spin_lock(&pseg->bus2bridge_lock);
966
0
    while ( pseg->bus2bridge[*bus].map )
967
0
    {
968
0
        *secbus = *bus;
969
0
        *devfn = pseg->bus2bridge[*bus].devfn;
970
0
        *bus = pseg->bus2bridge[*bus].bus;
971
0
        if ( cnt++ >= MAX_BUSES )
972
0
        {
973
0
            ret = -1;
974
0
            goto out;
975
0
        }
976
0
    }
977
0
978
0
out:
979
0
    spin_unlock(&pseg->bus2bridge_lock);
980
0
    return ret;
981
0
}
982
983
bool_t __init pci_device_detect(u16 seg, u8 bus, u8 dev, u8 func)
984
8.33k
{
985
8.33k
    u32 vendor;
986
8.33k
987
8.33k
    vendor = pci_conf_read32(seg, bus, dev, func, PCI_VENDOR_ID);
988
8.33k
    /* some broken boards return 0 or ~0 if a slot is empty: */
989
8.33k
    if ( (vendor == 0xffffffff) || (vendor == 0x00000000) ||
990
68
         (vendor == 0x0000ffff) || (vendor == 0xffff0000) )
991
8.27k
        return 0;
992
68
    return 1;
993
8.33k
}
994
995
void pci_check_disable_device(u16 seg, u8 bus, u8 devfn)
996
0
{
997
0
    struct pci_dev *pdev;
998
0
    s_time_t now = NOW();
999
0
    u16 cword;
1000
0
1001
0
    pcidevs_lock();
1002
0
    pdev = pci_get_real_pdev(seg, bus, devfn);
1003
0
    if ( pdev )
1004
0
    {
1005
0
        if ( now < pdev->fault.time ||
1006
0
             now - pdev->fault.time > MILLISECS(10) )
1007
0
            pdev->fault.count >>= 1;
1008
0
        pdev->fault.time = now;
1009
0
        if ( ++pdev->fault.count < PT_FAULT_THRESHOLD )
1010
0
            pdev = NULL;
1011
0
    }
1012
0
    pcidevs_unlock();
1013
0
1014
0
    if ( !pdev )
1015
0
        return;
1016
0
1017
0
    /* Tell the device to stop DMAing; we can't rely on the guest to
1018
0
     * control it for us. */
1019
0
    devfn = pdev->devfn;
1020
0
    cword = pci_conf_read16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1021
0
                            PCI_COMMAND);
1022
0
    pci_conf_write16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1023
0
                     PCI_COMMAND, cword & ~PCI_COMMAND_MASTER);
1024
0
}
1025
1026
/*
1027
 * scan pci devices to add all existed PCI devices to alldevs_list,
1028
 * and setup pci hierarchy in array bus2bridge.
1029
 */
1030
static int __init _scan_pci_devices(struct pci_seg *pseg, void *arg)
1031
1
{
1032
1
    struct pci_dev *pdev;
1033
1
    int bus, dev, func;
1034
1
1035
257
    for ( bus = 0; bus < 256; bus++ )
1036
256
    {
1037
8.44k
        for ( dev = 0; dev < 32; dev++ )
1038
8.19k
        {
1039
8.36k
            for ( func = 0; func < 8; func++ )
1040
8.33k
            {
1041
8.33k
                if ( !pci_device_detect(pseg->nr, bus, dev, func) )
1042
8.27k
                {
1043
8.27k
                    if ( !func )
1044
8.16k
                        break;
1045
111
                    continue;
1046
8.27k
                }
1047
8.33k
1048
68
                pdev = alloc_pdev(pseg, bus, PCI_DEVFN(dev, func));
1049
68
                if ( !pdev )
1050
0
                {
1051
0
                    printk(XENLOG_WARNING "%04x:%02x:%02x.%u: alloc_pdev failed\n",
1052
0
                           pseg->nr, bus, dev, func);
1053
0
                    return -ENOMEM;
1054
0
                }
1055
68
1056
68
                if ( !func && !(pci_conf_read8(pseg->nr, bus, dev, func,
1057
32
                                               PCI_HEADER_TYPE) & 0x80) )
1058
11
                    break;
1059
68
            }
1060
8.19k
        }
1061
256
    }
1062
1
1063
1
    return 0;
1064
1
}
1065
1066
int __init scan_pci_devices(void)
1067
1
{
1068
1
    int ret;
1069
1
1070
1
    pcidevs_lock();
1071
1
    ret = pci_segments_iterate(_scan_pci_devices, NULL);
1072
1
    pcidevs_unlock();
1073
1
1074
1
    return ret;
1075
1
}
1076
1077
struct setup_hwdom {
1078
    struct domain *d;
1079
    int (*handler)(u8 devfn, struct pci_dev *);
1080
};
1081
1082
static void __hwdom_init setup_one_hwdom_device(const struct setup_hwdom *ctxt,
1083
                                                struct pci_dev *pdev)
1084
68
{
1085
68
    u8 devfn = pdev->devfn;
1086
68
    int err;
1087
68
1088
68
    do {
1089
68
        err = ctxt->handler(devfn, pdev);
1090
68
        if ( err )
1091
0
        {
1092
0
            printk(XENLOG_ERR "setup %04x:%02x:%02x.%u for d%d failed (%d)\n",
1093
0
                   pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1094
0
                   ctxt->d->domain_id, err);
1095
0
            if ( devfn == pdev->devfn )
1096
0
                return;
1097
0
        }
1098
68
        devfn += pdev->phantom_stride;
1099
68
    } while ( devfn != pdev->devfn &&
1100
0
              PCI_SLOT(devfn) == PCI_SLOT(pdev->devfn) );
1101
68
1102
68
    err = vpci_add_handlers(pdev);
1103
68
    if ( err )
1104
0
        printk(XENLOG_ERR "setup of vPCI for d%d failed: %d\n",
1105
0
               ctxt->d->domain_id, err);
1106
68
}
1107
1108
static int __hwdom_init _setup_hwdom_pci_devices(struct pci_seg *pseg, void *arg)
1109
1
{
1110
1
    struct setup_hwdom *ctxt = arg;
1111
1
    int bus, devfn;
1112
1
1113
257
    for ( bus = 0; bus < 256; bus++ )
1114
256
    {
1115
65.7k
        for ( devfn = 0; devfn < 256; devfn++ )
1116
65.5k
        {
1117
65.5k
            struct pci_dev *pdev = pci_get_pdev(pseg->nr, bus, devfn);
1118
65.5k
1119
65.5k
            if ( !pdev )
1120
65.4k
                continue;
1121
65.5k
1122
68
            if ( !pdev->domain )
1123
68
            {
1124
68
                pdev->domain = ctxt->d;
1125
68
                list_add(&pdev->domain_list, &ctxt->d->arch.pdev_list);
1126
68
                setup_one_hwdom_device(ctxt, pdev);
1127
68
            }
1128
0
            else if ( pdev->domain == dom_xen )
1129
0
            {
1130
0
                pdev->domain = ctxt->d;
1131
0
                setup_one_hwdom_device(ctxt, pdev);
1132
0
                pdev->domain = dom_xen;
1133
0
            }
1134
0
            else if ( pdev->domain != ctxt->d )
1135
0
                printk(XENLOG_WARNING "Dom%d owning %04x:%02x:%02x.%u?\n",
1136
0
                       pdev->domain->domain_id, pseg->nr, bus,
1137
0
                       PCI_SLOT(devfn), PCI_FUNC(devfn));
1138
68
1139
68
            if ( iommu_verbose )
1140
68
            {
1141
68
                pcidevs_unlock();
1142
68
                process_pending_softirqs();
1143
68
                pcidevs_lock();
1144
68
            }
1145
68
        }
1146
256
1147
256
        if ( !iommu_verbose )
1148
0
        {
1149
0
            pcidevs_unlock();
1150
0
            process_pending_softirqs();
1151
0
            pcidevs_lock();
1152
0
        }
1153
256
    }
1154
1
1155
1
    return 0;
1156
1
}
1157
1158
void __hwdom_init setup_hwdom_pci_devices(
1159
    struct domain *d, int (*handler)(u8 devfn, struct pci_dev *))
1160
1
{
1161
1
    struct setup_hwdom ctxt = { .d = d, .handler = handler };
1162
1
1163
1
    pcidevs_lock();
1164
1
    pci_segments_iterate(_setup_hwdom_pci_devices, &ctxt);
1165
1
    pcidevs_unlock();
1166
1
}
1167
1168
#ifdef CONFIG_ACPI
1169
#include <acpi/acpi.h>
1170
#include <acpi/apei.h>
1171
1172
static int hest_match_pci(const struct acpi_hest_aer_common *p,
1173
                          const struct pci_dev *pdev)
1174
0
{
1175
0
    return ACPI_HEST_SEGMENT(p->bus) == pdev->seg &&
1176
0
           ACPI_HEST_BUS(p->bus)     == pdev->bus &&
1177
0
           p->device                 == PCI_SLOT(pdev->devfn) &&
1178
0
           p->function               == PCI_FUNC(pdev->devfn);
1179
0
}
1180
1181
static bool_t hest_match_type(const struct acpi_hest_header *hest_hdr,
1182
                              const struct pci_dev *pdev)
1183
0
{
1184
0
    unsigned int pos = pci_find_cap_offset(pdev->seg, pdev->bus,
1185
0
                                           PCI_SLOT(pdev->devfn),
1186
0
                                           PCI_FUNC(pdev->devfn),
1187
0
                                           PCI_CAP_ID_EXP);
1188
0
    u8 pcie = MASK_EXTR(pci_conf_read16(pdev->seg, pdev->bus,
1189
0
                                        PCI_SLOT(pdev->devfn),
1190
0
                                        PCI_FUNC(pdev->devfn),
1191
0
                                        pos + PCI_EXP_FLAGS),
1192
0
                        PCI_EXP_FLAGS_TYPE);
1193
0
1194
0
    switch ( hest_hdr->type )
1195
0
    {
1196
0
    case ACPI_HEST_TYPE_AER_ROOT_PORT:
1197
0
        return pcie == PCI_EXP_TYPE_ROOT_PORT;
1198
0
    case ACPI_HEST_TYPE_AER_ENDPOINT:
1199
0
        return pcie == PCI_EXP_TYPE_ENDPOINT;
1200
0
    case ACPI_HEST_TYPE_AER_BRIDGE:
1201
0
        return pci_conf_read16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
1202
0
                               PCI_FUNC(pdev->devfn), PCI_CLASS_DEVICE) ==
1203
0
               PCI_CLASS_BRIDGE_PCI;
1204
0
    }
1205
0
1206
0
    return 0;
1207
0
}
1208
1209
struct aer_hest_parse_info {
1210
    const struct pci_dev *pdev;
1211
    bool_t firmware_first;
1212
};
1213
1214
static bool_t hest_source_is_pcie_aer(const struct acpi_hest_header *hest_hdr)
1215
0
{
1216
0
    if ( hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
1217
0
         hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
1218
0
         hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE )
1219
0
        return 1;
1220
0
    return 0;
1221
0
}
1222
1223
static int aer_hest_parse(const struct acpi_hest_header *hest_hdr, void *data)
1224
0
{
1225
0
    struct aer_hest_parse_info *info = data;
1226
0
    const struct acpi_hest_aer_common *p;
1227
0
    bool_t ff;
1228
0
1229
0
    if ( !hest_source_is_pcie_aer(hest_hdr) )
1230
0
        return 0;
1231
0
1232
0
    p = (const struct acpi_hest_aer_common *)(hest_hdr + 1);
1233
0
    ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
1234
0
1235
0
    /*
1236
0
     * If no specific device is supplied, determine whether
1237
0
     * FIRMWARE_FIRST is set for *any* PCIe device.
1238
0
     */
1239
0
    if ( !info->pdev )
1240
0
    {
1241
0
        info->firmware_first |= ff;
1242
0
        return 0;
1243
0
    }
1244
0
1245
0
    /* Otherwise, check the specific device */
1246
0
    if ( p->flags & ACPI_HEST_GLOBAL ?
1247
0
         hest_match_type(hest_hdr, info->pdev) :
1248
0
         hest_match_pci(p, info->pdev) )
1249
0
    {
1250
0
        info->firmware_first = ff;
1251
0
        return 1;
1252
0
    }
1253
0
1254
0
    return 0;
1255
0
}
1256
1257
bool_t pcie_aer_get_firmware_first(const struct pci_dev *pdev)
1258
0
{
1259
0
    struct aer_hest_parse_info info = { .pdev = pdev };
1260
0
1261
0
    return pci_find_cap_offset(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
1262
0
                               PCI_FUNC(pdev->devfn), PCI_CAP_ID_EXP) &&
1263
0
           apei_hest_parse(aer_hest_parse, &info) >= 0 &&
1264
0
           info.firmware_first;
1265
0
}
1266
#endif
1267
1268
static int _dump_pci_devices(struct pci_seg *pseg, void *arg)
1269
0
{
1270
0
    struct pci_dev *pdev;
1271
0
    struct msi_desc *msi;
1272
0
1273
0
    printk("==== segment %04x ====\n", pseg->nr);
1274
0
1275
0
    list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
1276
0
    {
1277
0
        printk("%04x:%02x:%02x.%u - dom %-3d - node %-3d - MSIs < ",
1278
0
               pseg->nr, pdev->bus,
1279
0
               PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1280
0
               pdev->domain ? pdev->domain->domain_id : -1,
1281
0
               (pdev->node != NUMA_NO_NODE) ? pdev->node : -1);
1282
0
        list_for_each_entry ( msi, &pdev->msi_list, list )
1283
0
               printk("%d ", msi->irq);
1284
0
        printk(">\n");
1285
0
    }
1286
0
1287
0
    return 0;
1288
0
}
1289
1290
static void dump_pci_devices(unsigned char ch)
1291
0
{
1292
0
    printk("==== PCI devices ====\n");
1293
0
    pcidevs_lock();
1294
0
    pci_segments_iterate(_dump_pci_devices, NULL);
1295
0
    pcidevs_unlock();
1296
0
}
1297
1298
static int __init setup_dump_pcidevs(void)
1299
1
{
1300
1
    register_keyhandler('Q', dump_pci_devices, "dump PCI devices", 1);
1301
1
    return 0;
1302
1
}
1303
__initcall(setup_dump_pcidevs);
1304
1305
int iommu_update_ire_from_msi(
1306
    struct msi_desc *msi_desc, struct msi_msg *msg)
1307
84
{
1308
84
    return iommu_intremap
1309
84
           ? iommu_get_ops()->update_ire_from_msi(msi_desc, msg) : 0;
1310
84
}
1311
1312
void iommu_read_msi_from_ire(
1313
    struct msi_desc *msi_desc, struct msi_msg *msg)
1314
42
{
1315
42
    if ( iommu_intremap )
1316
42
        iommu_get_ops()->read_msi_from_ire(msi_desc, msg);
1317
42
}
1318
1319
static int iommu_add_device(struct pci_dev *pdev)
1320
0
{
1321
0
    const struct domain_iommu *hd;
1322
0
    int rc;
1323
0
    u8 devfn;
1324
0
1325
0
    if ( !pdev->domain )
1326
0
        return -EINVAL;
1327
0
1328
0
    ASSERT(pcidevs_locked());
1329
0
1330
0
    hd = dom_iommu(pdev->domain);
1331
0
    if ( !iommu_enabled || !hd->platform_ops )
1332
0
        return 0;
1333
0
1334
0
    rc = hd->platform_ops->add_device(pdev->devfn, pci_to_dev(pdev));
1335
0
    if ( rc || !pdev->phantom_stride )
1336
0
        return rc;
1337
0
1338
0
    for ( devfn = pdev->devfn ; ; )
1339
0
    {
1340
0
        devfn += pdev->phantom_stride;
1341
0
        if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
1342
0
            return 0;
1343
0
        rc = hd->platform_ops->add_device(devfn, pci_to_dev(pdev));
1344
0
        if ( rc )
1345
0
            printk(XENLOG_WARNING "IOMMU: add %04x:%02x:%02x.%u failed (%d)\n",
1346
0
                   pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn), rc);
1347
0
    }
1348
0
}
1349
1350
static int iommu_enable_device(struct pci_dev *pdev)
1351
0
{
1352
0
    const struct domain_iommu *hd;
1353
0
1354
0
    if ( !pdev->domain )
1355
0
        return -EINVAL;
1356
0
1357
0
    ASSERT(pcidevs_locked());
1358
0
1359
0
    hd = dom_iommu(pdev->domain);
1360
0
    if ( !iommu_enabled || !hd->platform_ops ||
1361
0
         !hd->platform_ops->enable_device )
1362
0
        return 0;
1363
0
1364
0
    return hd->platform_ops->enable_device(pci_to_dev(pdev));
1365
0
}
1366
1367
static int iommu_remove_device(struct pci_dev *pdev)
1368
0
{
1369
0
    const struct domain_iommu *hd;
1370
0
    u8 devfn;
1371
0
1372
0
    if ( !pdev->domain )
1373
0
        return -EINVAL;
1374
0
1375
0
    hd = dom_iommu(pdev->domain);
1376
0
    if ( !iommu_enabled || !hd->platform_ops )
1377
0
        return 0;
1378
0
1379
0
    for ( devfn = pdev->devfn ; pdev->phantom_stride; )
1380
0
    {
1381
0
        int rc;
1382
0
1383
0
        devfn += pdev->phantom_stride;
1384
0
        if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
1385
0
            break;
1386
0
        rc = hd->platform_ops->remove_device(devfn, pci_to_dev(pdev));
1387
0
        if ( !rc )
1388
0
            continue;
1389
0
1390
0
        printk(XENLOG_ERR "IOMMU: remove %04x:%02x:%02x.%u failed (%d)\n",
1391
0
               pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn), rc);
1392
0
        return rc;
1393
0
    }
1394
0
1395
0
    return hd->platform_ops->remove_device(pdev->devfn, pci_to_dev(pdev));
1396
0
}
1397
1398
/*
1399
 * If the device isn't owned by the hardware domain, it means it already
1400
 * has been assigned to other domain, or it doesn't exist.
1401
 */
1402
static int device_assigned(u16 seg, u8 bus, u8 devfn)
1403
0
{
1404
0
    struct pci_dev *pdev;
1405
0
1406
0
    pcidevs_lock();
1407
0
    pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
1408
0
    pcidevs_unlock();
1409
0
1410
0
    return pdev ? 0 : -EBUSY;
1411
0
}
1412
1413
static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
1414
0
{
1415
0
    const struct domain_iommu *hd = dom_iommu(d);
1416
0
    struct pci_dev *pdev;
1417
0
    int rc = 0;
1418
0
1419
0
    if ( !iommu_enabled || !hd->platform_ops )
1420
0
        return 0;
1421
0
1422
0
    /* Prevent device assign if mem paging or mem sharing have been 
1423
0
     * enabled for this domain */
1424
0
    if ( unlikely(!need_iommu(d) &&
1425
0
            (d->arch.hvm_domain.mem_sharing_enabled ||
1426
0
             vm_event_check_ring(d->vm_event_paging) ||
1427
0
             p2m_get_hostp2m(d)->global_logdirty)) )
1428
0
        return -EXDEV;
1429
0
1430
0
    if ( !pcidevs_trylock() )
1431
0
        return -ERESTART;
1432
0
1433
0
    rc = iommu_construct(d);
1434
0
    if ( rc )
1435
0
    {
1436
0
        pcidevs_unlock();
1437
0
        return rc;
1438
0
    }
1439
0
1440
0
    pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
1441
0
    if ( !pdev )
1442
0
    {
1443
0
        rc = pci_get_pdev(seg, bus, devfn) ? -EBUSY : -ENODEV;
1444
0
        goto done;
1445
0
    }
1446
0
1447
0
    if ( pdev->msix )
1448
0
        msixtbl_init(d);
1449
0
1450
0
    pdev->fault.count = 0;
1451
0
1452
0
    if ( (rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), flag)) )
1453
0
        goto done;
1454
0
1455
0
    for ( ; pdev->phantom_stride; rc = 0 )
1456
0
    {
1457
0
        devfn += pdev->phantom_stride;
1458
0
        if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
1459
0
            break;
1460
0
        rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), flag);
1461
0
        if ( rc )
1462
0
            printk(XENLOG_G_WARNING "d%d: assign %04x:%02x:%02x.%u failed (%d)\n",
1463
0
                   d->domain_id, seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1464
0
                   rc);
1465
0
    }
1466
0
1467
0
 done:
1468
0
    if ( !has_arch_pdevs(d) && need_iommu(d) )
1469
0
        iommu_teardown(d);
1470
0
    pcidevs_unlock();
1471
0
1472
0
    return rc;
1473
0
}
1474
1475
/* caller should hold the pcidevs_lock */
1476
int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
1477
0
{
1478
0
    const struct domain_iommu *hd = dom_iommu(d);
1479
0
    struct pci_dev *pdev = NULL;
1480
0
    int ret = 0;
1481
0
1482
0
    if ( !iommu_enabled || !hd->platform_ops )
1483
0
        return -EINVAL;
1484
0
1485
0
    ASSERT(pcidevs_locked());
1486
0
    pdev = pci_get_pdev_by_domain(d, seg, bus, devfn);
1487
0
    if ( !pdev )
1488
0
        return -ENODEV;
1489
0
1490
0
    while ( pdev->phantom_stride )
1491
0
    {
1492
0
        devfn += pdev->phantom_stride;
1493
0
        if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
1494
0
            break;
1495
0
        ret = hd->platform_ops->reassign_device(d, hardware_domain, devfn,
1496
0
                                                pci_to_dev(pdev));
1497
0
        if ( !ret )
1498
0
            continue;
1499
0
1500
0
        printk(XENLOG_G_ERR "d%d: deassign %04x:%02x:%02x.%u failed (%d)\n",
1501
0
               d->domain_id, seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), ret);
1502
0
        return ret;
1503
0
    }
1504
0
1505
0
    devfn = pdev->devfn;
1506
0
    ret = hd->platform_ops->reassign_device(d, hardware_domain, devfn,
1507
0
                                            pci_to_dev(pdev));
1508
0
    if ( ret )
1509
0
    {
1510
0
        dprintk(XENLOG_G_ERR,
1511
0
                "d%d: deassign device (%04x:%02x:%02x.%u) failed\n",
1512
0
                d->domain_id, seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1513
0
        return ret;
1514
0
    }
1515
0
1516
0
    pdev->fault.count = 0;
1517
0
1518
0
    if ( !has_arch_pdevs(d) && need_iommu(d) )
1519
0
        iommu_teardown(d);
1520
0
1521
0
    return ret;
1522
0
}
1523
1524
static int iommu_get_device_group(
1525
    struct domain *d, u16 seg, u8 bus, u8 devfn,
1526
    XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs)
1527
0
{
1528
0
    const struct domain_iommu *hd = dom_iommu(d);
1529
0
    struct pci_dev *pdev;
1530
0
    int group_id, sdev_id;
1531
0
    u32 bdf;
1532
0
    int i = 0;
1533
0
    const struct iommu_ops *ops = hd->platform_ops;
1534
0
1535
0
    if ( !iommu_enabled || !ops || !ops->get_device_group_id )
1536
0
        return 0;
1537
0
1538
0
    group_id = ops->get_device_group_id(seg, bus, devfn);
1539
0
1540
0
    pcidevs_lock();
1541
0
    for_each_pdev( d, pdev )
1542
0
    {
1543
0
        if ( (pdev->seg != seg) ||
1544
0
             ((pdev->bus == bus) && (pdev->devfn == devfn)) )
1545
0
            continue;
1546
0
1547
0
        if ( xsm_get_device_group(XSM_HOOK, (seg << 16) | (pdev->bus << 8) | pdev->devfn) )
1548
0
            continue;
1549
0
1550
0
        sdev_id = ops->get_device_group_id(seg, pdev->bus, pdev->devfn);
1551
0
        if ( (sdev_id == group_id) && (i < max_sdevs) )
1552
0
        {
1553
0
            bdf = 0;
1554
0
            bdf |= (pdev->bus & 0xff) << 16;
1555
0
            bdf |= (pdev->devfn & 0xff) << 8;
1556
0
1557
0
            if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
1558
0
            {
1559
0
                pcidevs_unlock();
1560
0
                return -1;
1561
0
            }
1562
0
            i++;
1563
0
        }
1564
0
    }
1565
0
1566
0
    pcidevs_unlock();
1567
0
1568
0
    return i;
1569
0
}
1570
1571
void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev)
1572
0
{
1573
0
    pcidevs_lock();
1574
0
1575
0
    disable_ats_device(pdev);
1576
0
1577
0
    ASSERT(pdev->domain);
1578
0
    if ( d != pdev->domain )
1579
0
    {
1580
0
        pcidevs_unlock();
1581
0
        return;
1582
0
    }
1583
0
1584
0
    list_del(&pdev->domain_list);
1585
0
    pdev->domain = NULL;
1586
0
    _pci_hide_device(pdev);
1587
0
1588
0
    if ( !d->is_shutting_down && printk_ratelimit() )
1589
0
        printk(XENLOG_ERR
1590
0
               "dom%d: ATS device %04x:%02x:%02x.%u flush failed\n",
1591
0
               d->domain_id, pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
1592
0
               PCI_FUNC(pdev->devfn));
1593
0
    if ( !is_hardware_domain(d) )
1594
0
        domain_crash(d);
1595
0
1596
0
    pcidevs_unlock();
1597
0
}
1598
1599
int iommu_do_pci_domctl(
1600
    struct xen_domctl *domctl, struct domain *d,
1601
    XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
1602
0
{
1603
0
    u16 seg;
1604
0
    u8 bus, devfn;
1605
0
    int ret = 0;
1606
0
    uint32_t machine_sbdf;
1607
0
1608
0
    switch ( domctl->cmd )
1609
0
    {
1610
0
        unsigned int flags;
1611
0
1612
0
    case XEN_DOMCTL_get_device_group:
1613
0
    {
1614
0
        u32 max_sdevs;
1615
0
        XEN_GUEST_HANDLE_64(uint32) sdevs;
1616
0
1617
0
        ret = xsm_get_device_group(XSM_HOOK, domctl->u.get_device_group.machine_sbdf);
1618
0
        if ( ret )
1619
0
            break;
1620
0
1621
0
        seg = domctl->u.get_device_group.machine_sbdf >> 16;
1622
0
        bus = PCI_BUS(domctl->u.get_device_group.machine_sbdf);
1623
0
        devfn = PCI_DEVFN2(domctl->u.get_device_group.machine_sbdf);
1624
0
        max_sdevs = domctl->u.get_device_group.max_sdevs;
1625
0
        sdevs = domctl->u.get_device_group.sdev_array;
1626
0
1627
0
        ret = iommu_get_device_group(d, seg, bus, devfn, sdevs, max_sdevs);
1628
0
        if ( ret < 0 )
1629
0
        {
1630
0
            dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n");
1631
0
            ret = -EFAULT;
1632
0
            domctl->u.get_device_group.num_sdevs = 0;
1633
0
        }
1634
0
        else
1635
0
        {
1636
0
            domctl->u.get_device_group.num_sdevs = ret;
1637
0
            ret = 0;
1638
0
        }
1639
0
        if ( __copy_field_to_guest(u_domctl, domctl, u.get_device_group) )
1640
0
            ret = -EFAULT;
1641
0
    }
1642
0
    break;
1643
0
1644
0
    case XEN_DOMCTL_assign_device:
1645
0
        ASSERT(d);
1646
0
        /* fall through */
1647
0
    case XEN_DOMCTL_test_assign_device:
1648
0
        /* Don't support self-assignment of devices. */
1649
0
        if ( d == current->domain )
1650
0
        {
1651
0
            ret = -EINVAL;
1652
0
            break;
1653
0
        }
1654
0
1655
0
        ret = -ENODEV;
1656
0
        if ( domctl->u.assign_device.dev != XEN_DOMCTL_DEV_PCI )
1657
0
            break;
1658
0
1659
0
        ret = -EINVAL;
1660
0
        flags = domctl->u.assign_device.flags;
1661
0
        if ( domctl->cmd == XEN_DOMCTL_assign_device
1662
0
             ? d->is_dying || (flags & ~XEN_DOMCTL_DEV_RDM_RELAXED)
1663
0
             : flags )
1664
0
            break;
1665
0
1666
0
        machine_sbdf = domctl->u.assign_device.u.pci.machine_sbdf;
1667
0
1668
0
        ret = xsm_assign_device(XSM_HOOK, d, machine_sbdf);
1669
0
        if ( ret )
1670
0
            break;
1671
0
1672
0
        seg = machine_sbdf >> 16;
1673
0
        bus = PCI_BUS(machine_sbdf);
1674
0
        devfn = PCI_DEVFN2(machine_sbdf);
1675
0
1676
0
        ret = device_assigned(seg, bus, devfn);
1677
0
        if ( domctl->cmd == XEN_DOMCTL_test_assign_device )
1678
0
        {
1679
0
            if ( ret )
1680
0
            {
1681
0
                printk(XENLOG_G_INFO
1682
0
                       "%04x:%02x:%02x.%u already assigned, or non-existent\n",
1683
0
                       seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1684
0
                ret = -EINVAL;
1685
0
            }
1686
0
            break;
1687
0
        }
1688
0
        if ( !ret )
1689
0
            ret = assign_device(d, seg, bus, devfn, flags);
1690
0
        if ( ret == -ERESTART )
1691
0
            ret = hypercall_create_continuation(__HYPERVISOR_domctl,
1692
0
                                                "h", u_domctl);
1693
0
        else if ( ret )
1694
0
            printk(XENLOG_G_ERR "XEN_DOMCTL_assign_device: "
1695
0
                   "assign %04x:%02x:%02x.%u to dom%d failed (%d)\n",
1696
0
                   seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1697
0
                   d->domain_id, ret);
1698
0
1699
0
        break;
1700
0
1701
0
    case XEN_DOMCTL_deassign_device:
1702
0
        /* Don't support self-deassignment of devices. */
1703
0
        if ( d == current->domain )
1704
0
        {
1705
0
            ret = -EINVAL;
1706
0
            break;
1707
0
        }
1708
0
1709
0
        ret = -ENODEV;
1710
0
        if ( domctl->u.assign_device.dev != XEN_DOMCTL_DEV_PCI )
1711
0
            break;
1712
0
1713
0
        ret = -EINVAL;
1714
0
        if ( domctl->u.assign_device.flags )
1715
0
            break;
1716
0
1717
0
        machine_sbdf = domctl->u.assign_device.u.pci.machine_sbdf;
1718
0
1719
0
        ret = xsm_deassign_device(XSM_HOOK, d, machine_sbdf);
1720
0
        if ( ret )
1721
0
            break;
1722
0
1723
0
        seg = machine_sbdf >> 16;
1724
0
        bus = PCI_BUS(machine_sbdf);
1725
0
        devfn = PCI_DEVFN2(machine_sbdf);
1726
0
1727
0
        pcidevs_lock();
1728
0
        ret = deassign_device(d, seg, bus, devfn);
1729
0
        pcidevs_unlock();
1730
0
        if ( ret )
1731
0
            printk(XENLOG_G_ERR
1732
0
                   "deassign %04x:%02x:%02x.%u from dom%d failed (%d)\n",
1733
0
                   seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1734
0
                   d->domain_id, ret);
1735
0
1736
0
        break;
1737
0
1738
0
    default:
1739
0
        ret = -ENOSYS;
1740
0
        break;
1741
0
    }
1742
0
1743
0
    return ret;
1744
0
}
1745
1746
/*
1747
 * Local variables:
1748
 * mode: C
1749
 * c-file-style: "BSD"
1750
 * c-basic-offset: 4
1751
 * indent-tabs-mode: nil
1752
 * End:
1753
 */