Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/drivers/passthrough/amd/pci_amd_iommu.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (C) 2007 Advanced Micro Devices, Inc.
3
 * Author: Leo Duran <leo.duran@amd.com>
4
 * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation; either version 2 of the License, or
9
 * (at your option) any later version.
10
 *
11
 * This program is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU General Public License
17
 * along with this program; If not, see <http://www.gnu.org/licenses/>.
18
 */
19
20
#include <xen/sched.h>
21
#include <xen/iocap.h>
22
#include <xen/pci.h>
23
#include <xen/pci_regs.h>
24
#include <xen/paging.h>
25
#include <xen/softirq.h>
26
#include <asm/amd-iommu.h>
27
#include <asm/hvm/svm/amd-iommu-proto.h>
28
#include "../ats.h"
29
30
static bool_t __read_mostly init_done;
31
32
struct amd_iommu *find_iommu_for_device(int seg, int bdf)
33
0
{
34
0
    struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
35
0
36
0
    if ( !ivrs_mappings || bdf >= ivrs_bdf_entries )
37
0
        return NULL;
38
0
39
0
    if ( unlikely(!ivrs_mappings[bdf].iommu) && likely(init_done) )
40
0
    {
41
0
        unsigned int bd0 = bdf & ~PCI_FUNC(~0);
42
0
43
0
        if ( ivrs_mappings[bd0].iommu )
44
0
        {
45
0
            struct ivrs_mappings tmp = ivrs_mappings[bd0];
46
0
47
0
            tmp.iommu = NULL;
48
0
            if ( tmp.dte_requestor_id == bd0 )
49
0
                tmp.dte_requestor_id = bdf;
50
0
            ivrs_mappings[bdf] = tmp;
51
0
52
0
            printk(XENLOG_WARNING "%04x:%02x:%02x.%u not found in ACPI tables;"
53
0
                   " using same IOMMU as function 0\n",
54
0
                   seg, PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf));
55
0
56
0
            /* write iommu field last */
57
0
            ivrs_mappings[bdf].iommu = ivrs_mappings[bd0].iommu;
58
0
        }
59
0
    }
60
0
61
0
    return ivrs_mappings[bdf].iommu;
62
0
}
63
64
/*
65
 * Some devices will use alias id and original device id to index interrupt
66
 * table and I/O page table respectively. Such devices will have
67
 * both alias entry and select entry in IVRS structure.
68
 *
69
 * Return original device id, if device has valid interrupt remapping
70
 * table setup for both select entry and alias entry.
71
 */
72
int get_dma_requestor_id(u16 seg, u16 bdf)
73
0
{
74
0
    struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
75
0
    int req_id;
76
0
77
0
    BUG_ON ( bdf >= ivrs_bdf_entries );
78
0
    req_id = ivrs_mappings[bdf].dte_requestor_id;
79
0
    if ( (ivrs_mappings[bdf].intremap_table != NULL) &&
80
0
         (ivrs_mappings[req_id].intremap_table != NULL) )
81
0
        req_id = bdf;
82
0
83
0
    return req_id;
84
0
}
85
86
static int is_translation_valid(u32 *entry)
87
0
{
88
0
    return (get_field_from_reg_u32(entry[0],
89
0
                                   IOMMU_DEV_TABLE_VALID_MASK,
90
0
                                   IOMMU_DEV_TABLE_VALID_SHIFT) &&
91
0
            get_field_from_reg_u32(entry[0],
92
0
                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
93
0
                                   IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
94
0
}
95
96
static void disable_translation(u32 *dte)
97
0
{
98
0
    u32 entry;
99
0
100
0
    entry = dte[0];
101
0
    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
102
0
                         IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
103
0
                         IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
104
0
    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
105
0
                         IOMMU_DEV_TABLE_VALID_MASK,
106
0
                         IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
107
0
    dte[0] = entry;
108
0
}
109
110
static void amd_iommu_setup_domain_device(
111
    struct domain *domain, struct amd_iommu *iommu,
112
    u8 devfn, struct pci_dev *pdev)
113
0
{
114
0
    void *dte;
115
0
    unsigned long flags;
116
0
    int req_id, valid = 1;
117
0
    int dte_i = 0;
118
0
    u8 bus = pdev->bus;
119
0
    const struct domain_iommu *hd = dom_iommu(domain);
120
0
121
0
    BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
122
0
            !iommu->dev_table.buffer );
123
0
124
0
    if ( iommu_passthrough && is_hardware_domain(domain) )
125
0
        valid = 0;
126
0
127
0
    if ( ats_enabled )
128
0
        dte_i = 1;
129
0
130
0
    /* get device-table entry */
131
0
    req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn));
132
0
    dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
133
0
134
0
    spin_lock_irqsave(&iommu->lock, flags);
135
0
136
0
    if ( !is_translation_valid((u32 *)dte) )
137
0
    {
138
0
        /* bind DTE to domain page-tables */
139
0
        amd_iommu_set_root_page_table(
140
0
            (u32 *)dte, page_to_maddr(hd->arch.root_table), domain->domain_id,
141
0
            hd->arch.paging_mode, valid);
142
0
143
0
        if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
144
0
             iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
145
0
            iommu_dte_set_iotlb((u32 *)dte, dte_i);
146
0
147
0
        amd_iommu_flush_device(iommu, req_id);
148
0
149
0
        AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, type = %#x, "
150
0
                        "root table = %#"PRIx64", "
151
0
                        "domain = %d, paging mode = %d\n",
152
0
                        req_id, pdev->type,
153
0
                        page_to_maddr(hd->arch.root_table),
154
0
                        domain->domain_id, hd->arch.paging_mode);
155
0
    }
156
0
157
0
    spin_unlock_irqrestore(&iommu->lock, flags);
158
0
159
0
    ASSERT(pcidevs_locked());
160
0
161
0
    if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
162
0
         !pci_ats_enabled(iommu->seg, bus, pdev->devfn) )
163
0
    {
164
0
        if ( devfn == pdev->devfn )
165
0
            enable_ats_device(pdev, &iommu->ats_devices);
166
0
167
0
        amd_iommu_flush_iotlb(devfn, pdev, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
168
0
    }
169
0
}
170
171
int __init amd_iov_detect(void)
172
0
{
173
0
    INIT_LIST_HEAD(&amd_iommu_head);
174
0
175
0
    if ( !iommu_enable && !iommu_intremap )
176
0
        return 0;
177
0
178
0
    if ( (amd_iommu_detect_acpi() !=0) || (iommu_found() == 0) )
179
0
    {
180
0
        printk("AMD-Vi: IOMMU not found!\n");
181
0
        iommu_intremap = 0;
182
0
        return -ENODEV;
183
0
    }
184
0
185
0
    if ( amd_iommu_init() != 0 )
186
0
    {
187
0
        printk("AMD-Vi: Error initialization\n");
188
0
        return -ENODEV;
189
0
    }
190
0
191
0
    init_done = 1;
192
0
193
0
    if ( !amd_iommu_perdev_intremap )
194
0
        printk(XENLOG_WARNING "AMD-Vi: Using global interrupt remap table is not recommended (see XSA-36)!\n");
195
0
    return scan_pci_devices();
196
0
}
197
198
int amd_iommu_alloc_root(struct domain_iommu *hd)
199
0
{
200
0
    if ( unlikely(!hd->arch.root_table) )
201
0
    {
202
0
        hd->arch.root_table = alloc_amd_iommu_pgtable();
203
0
        if ( !hd->arch.root_table )
204
0
            return -ENOMEM;
205
0
    }
206
0
207
0
    return 0;
208
0
}
209
210
static int __must_check allocate_domain_resources(struct domain_iommu *hd)
211
0
{
212
0
    int rc;
213
0
214
0
    spin_lock(&hd->arch.mapping_lock);
215
0
    rc = amd_iommu_alloc_root(hd);
216
0
    spin_unlock(&hd->arch.mapping_lock);
217
0
218
0
    return rc;
219
0
}
220
221
static int get_paging_mode(unsigned long entries)
222
0
{
223
0
    int level = 1;
224
0
225
0
    BUG_ON( !entries );
226
0
227
0
    while ( entries > PTE_PER_TABLE_SIZE )
228
0
    {
229
0
        entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
230
0
        if ( ++level > 6 )
231
0
            return -ENOMEM;
232
0
    }
233
0
234
0
    return level;
235
0
}
236
237
static int amd_iommu_domain_init(struct domain *d)
238
0
{
239
0
    struct domain_iommu *hd = dom_iommu(d);
240
0
241
0
    /* For pv and dom0, stick with get_paging_mode(max_page)
242
0
     * For HVM dom0, use 2 level page table at first */
243
0
    hd->arch.paging_mode = is_hvm_domain(d) ?
244
0
                      IOMMU_PAGING_MODE_LEVEL_2 :
245
0
                      get_paging_mode(max_page);
246
0
    return 0;
247
0
}
248
249
static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev);
250
251
static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
252
0
{
253
0
    unsigned long i; 
254
0
    const struct amd_iommu *iommu;
255
0
256
0
    if ( allocate_domain_resources(dom_iommu(d)) )
257
0
        BUG();
258
0
259
0
    if ( !iommu_passthrough && !need_iommu(d) )
260
0
    {
261
0
        int rc = 0;
262
0
263
0
        /* Set up 1:1 page table for dom0 */
264
0
        for ( i = 0; i < max_pdx; i++ )
265
0
        {
266
0
            unsigned long pfn = pdx_to_pfn(i);
267
0
268
0
            /*
269
0
             * XXX Should we really map all non-RAM (above 4G)? Minimally
270
0
             * a pfn_valid() check would seem desirable here.
271
0
             */
272
0
            if ( mfn_valid(_mfn(pfn)) )
273
0
            {
274
0
                int ret = amd_iommu_map_page(d, pfn, pfn,
275
0
                                             IOMMUF_readable|IOMMUF_writable);
276
0
277
0
                if ( !rc )
278
0
                    rc = ret;
279
0
            }
280
0
281
0
            if ( !(i & 0xfffff) )
282
0
                process_pending_softirqs();
283
0
        }
284
0
285
0
        if ( rc )
286
0
            AMD_IOMMU_DEBUG("d%d: IOMMU mapping failed: %d\n",
287
0
                            d->domain_id, rc);
288
0
    }
289
0
290
0
    for_each_amd_iommu ( iommu )
291
0
        if ( iomem_deny_access(d, PFN_DOWN(iommu->mmio_base_phys),
292
0
                               PFN_DOWN(iommu->mmio_base_phys +
293
0
                                        IOMMU_MMIO_REGION_LENGTH - 1)) )
294
0
            BUG();
295
0
296
0
    setup_hwdom_pci_devices(d, amd_iommu_add_device);
297
0
}
298
299
void amd_iommu_disable_domain_device(struct domain *domain,
300
                                     struct amd_iommu *iommu,
301
                                     u8 devfn, struct pci_dev *pdev)
302
0
{
303
0
    void *dte;
304
0
    unsigned long flags;
305
0
    int req_id;
306
0
    u8 bus = pdev->bus;
307
0
308
0
    BUG_ON ( iommu->dev_table.buffer == NULL );
309
0
    req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn));
310
0
    dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
311
0
312
0
    spin_lock_irqsave(&iommu->lock, flags);
313
0
    if ( is_translation_valid((u32 *)dte) )
314
0
    {
315
0
        disable_translation((u32 *)dte);
316
0
317
0
        if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
318
0
             iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
319
0
            iommu_dte_set_iotlb((u32 *)dte, 0);
320
0
321
0
        amd_iommu_flush_device(iommu, req_id);
322
0
323
0
        AMD_IOMMU_DEBUG("Disable: device id = %#x, "
324
0
                        "domain = %d, paging mode = %d\n",
325
0
                        req_id,  domain->domain_id,
326
0
                        dom_iommu(domain)->arch.paging_mode);
327
0
    }
328
0
    spin_unlock_irqrestore(&iommu->lock, flags);
329
0
330
0
    ASSERT(pcidevs_locked());
331
0
332
0
    if ( devfn == pdev->devfn &&
333
0
         pci_ats_device(iommu->seg, bus, devfn) &&
334
0
         pci_ats_enabled(iommu->seg, bus, devfn) )
335
0
        disable_ats_device(pdev);
336
0
}
337
338
static int reassign_device(struct domain *source, struct domain *target,
339
                           u8 devfn, struct pci_dev *pdev)
340
0
{
341
0
    struct amd_iommu *iommu;
342
0
    int bdf, rc;
343
0
    struct domain_iommu *t = dom_iommu(target);
344
0
345
0
    bdf = PCI_BDF2(pdev->bus, pdev->devfn);
346
0
    iommu = find_iommu_for_device(pdev->seg, bdf);
347
0
    if ( !iommu )
348
0
    {
349
0
        AMD_IOMMU_DEBUG("Fail to find iommu."
350
0
                        " %04x:%02x:%x02.%x cannot be assigned to dom%d\n",
351
0
                        pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
352
0
                        target->domain_id);
353
0
        return -ENODEV;
354
0
    }
355
0
356
0
    amd_iommu_disable_domain_device(source, iommu, devfn, pdev);
357
0
358
0
    if ( devfn == pdev->devfn )
359
0
    {
360
0
        list_move(&pdev->domain_list, &target->arch.pdev_list);
361
0
        pdev->domain = target;
362
0
    }
363
0
364
0
    rc = allocate_domain_resources(t);
365
0
    if ( rc )
366
0
        return rc;
367
0
368
0
    amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
369
0
    AMD_IOMMU_DEBUG("Re-assign %04x:%02x:%02x.%u from dom%d to dom%d\n",
370
0
                    pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
371
0
                    source->domain_id, target->domain_id);
372
0
373
0
    return 0;
374
0
}
375
376
static int amd_iommu_assign_device(struct domain *d, u8 devfn,
377
                                   struct pci_dev *pdev,
378
                                   u32 flag)
379
0
{
380
0
    struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg);
381
0
    int bdf = PCI_BDF2(pdev->bus, devfn);
382
0
    int req_id = get_dma_requestor_id(pdev->seg, bdf);
383
0
384
0
    if ( ivrs_mappings[req_id].unity_map_enable )
385
0
    {
386
0
        amd_iommu_reserve_domain_unity_map(
387
0
            d,
388
0
            ivrs_mappings[req_id].addr_range_start,
389
0
            ivrs_mappings[req_id].addr_range_length,
390
0
            ivrs_mappings[req_id].write_permission,
391
0
            ivrs_mappings[req_id].read_permission);
392
0
    }
393
0
394
0
    return reassign_device(hardware_domain, d, devfn, pdev);
395
0
}
396
397
static void deallocate_next_page_table(struct page_info *pg, int level)
398
0
{
399
0
    PFN_ORDER(pg) = level;
400
0
    spin_lock(&iommu_pt_cleanup_lock);
401
0
    page_list_add_tail(pg, &iommu_pt_cleanup_list);
402
0
    spin_unlock(&iommu_pt_cleanup_lock);
403
0
}
404
405
static void deallocate_page_table(struct page_info *pg)
406
0
{
407
0
    void *table_vaddr, *pde;
408
0
    u64 next_table_maddr;
409
0
    unsigned int index, level = PFN_ORDER(pg), next_level;
410
0
411
0
    PFN_ORDER(pg) = 0;
412
0
413
0
    if ( level <= 1 )
414
0
    {
415
0
        free_amd_iommu_pgtable(pg);
416
0
        return;
417
0
    }
418
0
419
0
    table_vaddr = __map_domain_page(pg);
420
0
421
0
    for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
422
0
    {
423
0
        pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
424
0
        next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
425
0
        next_level = iommu_next_level((u32*)pde);
426
0
427
0
        if ( (next_table_maddr != 0) && (next_level != 0) &&
428
0
             iommu_is_pte_present((u32*)pde) )
429
0
        {
430
0
            /* We do not support skip levels yet */
431
0
            ASSERT(next_level == level - 1);
432
0
            deallocate_next_page_table(maddr_to_page(next_table_maddr), 
433
0
                                       next_level);
434
0
        }
435
0
    }
436
0
437
0
    unmap_domain_page(table_vaddr);
438
0
    free_amd_iommu_pgtable(pg);
439
0
}
440
441
static void deallocate_iommu_page_tables(struct domain *d)
442
0
{
443
0
    struct domain_iommu *hd = dom_iommu(d);
444
0
445
0
    if ( iommu_use_hap_pt(d) )
446
0
        return;
447
0
448
0
    spin_lock(&hd->arch.mapping_lock);
449
0
    if ( hd->arch.root_table )
450
0
    {
451
0
        deallocate_next_page_table(hd->arch.root_table, hd->arch.paging_mode);
452
0
        hd->arch.root_table = NULL;
453
0
    }
454
0
    spin_unlock(&hd->arch.mapping_lock);
455
0
}
456
457
458
static void amd_iommu_domain_destroy(struct domain *d)
459
0
{
460
0
    deallocate_iommu_page_tables(d);
461
0
    amd_iommu_flush_all_pages(d);
462
0
}
463
464
static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
465
0
{
466
0
    struct amd_iommu *iommu;
467
0
    u16 bdf;
468
0
469
0
    if ( !pdev->domain )
470
0
        return -EINVAL;
471
0
472
0
    bdf = PCI_BDF2(pdev->bus, pdev->devfn);
473
0
    iommu = find_iommu_for_device(pdev->seg, bdf);
474
0
    if ( unlikely(!iommu) )
475
0
    {
476
0
        /* Filter bridge devices. */
477
0
        if ( pdev->type == DEV_TYPE_PCI_HOST_BRIDGE &&
478
0
             is_hardware_domain(pdev->domain) )
479
0
        {
480
0
            AMD_IOMMU_DEBUG("Skipping host bridge %04x:%02x:%02x.%u\n",
481
0
                            pdev->seg, pdev->bus, PCI_SLOT(devfn),
482
0
                            PCI_FUNC(devfn));
483
0
            return 0;
484
0
        }
485
0
486
0
        AMD_IOMMU_DEBUG("No iommu for %04x:%02x:%02x.%u; cannot be handed to d%d\n",
487
0
                        pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
488
0
                        pdev->domain->domain_id);
489
0
        return -ENODEV;
490
0
    }
491
0
492
0
    amd_iommu_setup_domain_device(pdev->domain, iommu, devfn, pdev);
493
0
    return 0;
494
0
}
495
496
static int amd_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
497
0
{
498
0
    struct amd_iommu *iommu;
499
0
    u16 bdf;
500
0
    if ( !pdev->domain )
501
0
        return -EINVAL;
502
0
503
0
    bdf = PCI_BDF2(pdev->bus, pdev->devfn);
504
0
    iommu = find_iommu_for_device(pdev->seg, bdf);
505
0
    if ( !iommu )
506
0
    {
507
0
        AMD_IOMMU_DEBUG("Fail to find iommu."
508
0
                        " %04x:%02x:%02x.%u cannot be removed from dom%d\n",
509
0
                        pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
510
0
                        pdev->domain->domain_id);
511
0
        return -ENODEV;
512
0
    }
513
0
514
0
    amd_iommu_disable_domain_device(pdev->domain, iommu, devfn, pdev);
515
0
    return 0;
516
0
}
517
518
static int amd_iommu_group_id(u16 seg, u8 bus, u8 devfn)
519
0
{
520
0
    int bdf = PCI_BDF2(bus, devfn);
521
0
522
0
    return (bdf < ivrs_bdf_entries) ? get_dma_requestor_id(seg, bdf) : bdf;
523
0
}
524
525
#include <asm/io_apic.h>
526
527
static void amd_dump_p2m_table_level(struct page_info* pg, int level, 
528
                                     paddr_t gpa, int indent)
529
0
{
530
0
    paddr_t address;
531
0
    void *table_vaddr, *pde;
532
0
    paddr_t next_table_maddr;
533
0
    int index, next_level, present;
534
0
    u32 *entry;
535
0
536
0
    if ( level < 1 )
537
0
        return;
538
0
539
0
    table_vaddr = __map_domain_page(pg);
540
0
    if ( table_vaddr == NULL )
541
0
    {
542
0
        printk("Failed to map IOMMU domain page %"PRIpaddr"\n", 
543
0
                page_to_maddr(pg));
544
0
        return;
545
0
    }
546
0
547
0
    for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
548
0
    {
549
0
        if ( !(index % 2) )
550
0
            process_pending_softirqs();
551
0
552
0
        pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
553
0
        next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
554
0
        entry = (u32*)pde;
555
0
556
0
        present = get_field_from_reg_u32(entry[0],
557
0
                                         IOMMU_PDE_PRESENT_MASK,
558
0
                                         IOMMU_PDE_PRESENT_SHIFT);
559
0
560
0
        if ( !present )
561
0
            continue;
562
0
563
0
        next_level = get_field_from_reg_u32(entry[0],
564
0
                                            IOMMU_PDE_NEXT_LEVEL_MASK,
565
0
                                            IOMMU_PDE_NEXT_LEVEL_SHIFT);
566
0
567
0
        if ( next_level && (next_level != (level - 1)) )
568
0
        {
569
0
            printk("IOMMU p2m table error. next_level = %d, expected %d\n",
570
0
                   next_level, level - 1);
571
0
572
0
            continue;
573
0
        }
574
0
575
0
        address = gpa + amd_offset_level_address(index, level);
576
0
        if ( next_level >= 1 )
577
0
            amd_dump_p2m_table_level(
578
0
                maddr_to_page(next_table_maddr), next_level,
579
0
                address, indent + 1);
580
0
        else
581
0
            printk("%*sgfn: %08lx  mfn: %08lx\n",
582
0
                   indent, "",
583
0
                   (unsigned long)PFN_DOWN(address),
584
0
                   (unsigned long)PFN_DOWN(next_table_maddr));
585
0
    }
586
0
587
0
    unmap_domain_page(table_vaddr);
588
0
}
589
590
static void amd_dump_p2m_table(struct domain *d)
591
0
{
592
0
    const struct domain_iommu *hd = dom_iommu(d);
593
0
594
0
    if ( !hd->arch.root_table )
595
0
        return;
596
0
597
0
    printk("p2m table has %d levels\n", hd->arch.paging_mode);
598
0
    amd_dump_p2m_table_level(hd->arch.root_table, hd->arch.paging_mode, 0, 0);
599
0
}
600
601
const struct iommu_ops amd_iommu_ops = {
602
    .init = amd_iommu_domain_init,
603
    .hwdom_init = amd_iommu_hwdom_init,
604
    .add_device = amd_iommu_add_device,
605
    .remove_device = amd_iommu_remove_device,
606
    .assign_device  = amd_iommu_assign_device,
607
    .teardown = amd_iommu_domain_destroy,
608
    .map_page = amd_iommu_map_page,
609
    .unmap_page = amd_iommu_unmap_page,
610
    .free_page_table = deallocate_page_table,
611
    .reassign_device = reassign_device,
612
    .get_device_group_id = amd_iommu_group_id,
613
    .update_ire_from_apic = amd_iommu_ioapic_update_ire,
614
    .update_ire_from_msi = amd_iommu_msi_msg_update_ire,
615
    .read_apic_from_ire = amd_iommu_read_ioapic_from_ire,
616
    .read_msi_from_ire = amd_iommu_read_msi_from_ire,
617
    .setup_hpet_msi = amd_setup_hpet_msi,
618
    .suspend = amd_iommu_suspend,
619
    .resume = amd_iommu_resume,
620
    .share_p2m = amd_iommu_share_p2m,
621
    .crash_shutdown = amd_iommu_crash_shutdown,
622
    .dump_p2m_table = amd_dump_p2m_table,
623
};