Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/drivers/passthrough/x86/iommu.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * This program is free software; you can redistribute it and/or modify it
3
 * under the terms and conditions of the GNU General Public License,
4
 * version 2, as published by the Free Software Foundation.
5
 *
6
 * This program is distributed in the hope it will be useful, but WITHOUT
7
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
9
 * more details.
10
 *
11
 * You should have received a copy of the GNU General Public License along with
12
 * this program; If not, see <http://www.gnu.org/licenses/>.
13
 */
14
15
#include <xen/sched.h>
16
#include <xen/iommu.h>
17
#include <xen/paging.h>
18
#include <xen/guest_access.h>
19
#include <xen/event.h>
20
#include <xen/softirq.h>
21
#include <xsm/xsm.h>
22
23
void iommu_update_ire_from_apic(
24
    unsigned int apic, unsigned int reg, unsigned int value)
25
93
{
26
93
    const struct iommu_ops *ops = iommu_get_ops();
27
93
    ops->update_ire_from_apic(apic, reg, value);
28
93
}
29
30
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
31
222
{
32
222
    const struct iommu_ops *ops = iommu_get_ops();
33
222
    return ops->read_apic_from_ire(apic, reg);
34
222
}
35
36
int __init iommu_setup_hpet_msi(struct msi_desc *msi)
37
0
{
38
0
    const struct iommu_ops *ops = iommu_get_ops();
39
0
    return ops->setup_hpet_msi ? ops->setup_hpet_msi(msi) : -ENODEV;
40
0
}
41
42
int arch_iommu_populate_page_table(struct domain *d)
43
0
{
44
0
    const struct domain_iommu *hd = dom_iommu(d);
45
0
    struct page_info *page;
46
0
    int rc = 0, n = 0;
47
0
48
0
    d->need_iommu = -1;
49
0
50
0
    this_cpu(iommu_dont_flush_iotlb) = 1;
51
0
    spin_lock(&d->page_alloc_lock);
52
0
53
0
    if ( unlikely(d->is_dying) )
54
0
        rc = -ESRCH;
55
0
56
0
    while ( !rc && (page = page_list_remove_head(&d->page_list)) )
57
0
    {
58
0
        if ( is_hvm_domain(d) ||
59
0
            (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
60
0
        {
61
0
            unsigned long mfn = page_to_mfn(page);
62
0
            unsigned long gfn = mfn_to_gmfn(d, mfn);
63
0
64
0
            if ( gfn != gfn_x(INVALID_GFN) )
65
0
            {
66
0
                ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
67
0
                BUG_ON(SHARED_M2P(gfn));
68
0
                rc = hd->platform_ops->map_page(d, gfn, mfn,
69
0
                                                IOMMUF_readable |
70
0
                                                IOMMUF_writable);
71
0
            }
72
0
            if ( rc )
73
0
            {
74
0
                page_list_add(page, &d->page_list);
75
0
                break;
76
0
            }
77
0
        }
78
0
        page_list_add_tail(page, &d->arch.relmem_list);
79
0
        if ( !(++n & 0xff) && !page_list_empty(&d->page_list) &&
80
0
             hypercall_preempt_check() )
81
0
            rc = -ERESTART;
82
0
    }
83
0
84
0
    if ( !rc )
85
0
    {
86
0
        /*
87
0
         * The expectation here is that generally there are many normal pages
88
0
         * on relmem_list (the ones we put there) and only few being in an
89
0
         * offline/broken state. The latter ones are always at the head of the
90
0
         * list. Hence we first move the whole list, and then move back the
91
0
         * first few entries.
92
0
         */
93
0
        page_list_move(&d->page_list, &d->arch.relmem_list);
94
0
        while ( !page_list_empty(&d->page_list) &&
95
0
                (page = page_list_first(&d->page_list),
96
0
                 (page->count_info & (PGC_state|PGC_broken))) )
97
0
        {
98
0
            page_list_del(page, &d->page_list);
99
0
            page_list_add_tail(page, &d->arch.relmem_list);
100
0
        }
101
0
    }
102
0
103
0
    spin_unlock(&d->page_alloc_lock);
104
0
    this_cpu(iommu_dont_flush_iotlb) = 0;
105
0
106
0
    if ( !rc )
107
0
        rc = iommu_iotlb_flush_all(d);
108
0
109
0
    if ( rc && rc != -ERESTART )
110
0
        iommu_teardown(d);
111
0
112
0
    return rc;
113
0
}
114
115
void __hwdom_init arch_iommu_check_autotranslated_hwdom(struct domain *d)
116
1
{
117
1
    if ( !iommu_enabled )
118
0
        panic("Presently, iommu must be enabled for PVH hardware domain\n");
119
1
}
120
121
int arch_iommu_domain_init(struct domain *d)
122
1
{
123
1
    struct domain_iommu *hd = dom_iommu(d);
124
1
125
1
    spin_lock_init(&hd->arch.mapping_lock);
126
1
    INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
127
1
128
1
    return 0;
129
1
}
130
131
void arch_iommu_domain_destroy(struct domain *d)
132
0
{
133
0
}
134
135
/*
136
 * Local variables:
137
 * mode: C
138
 * c-file-style: "BSD"
139
 * c-basic-offset: 4
140
 * indent-tabs-mode: nil
141
 * End:
142
 */