Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/hvm/asid.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * asid.c: ASID management
3
 * Copyright (c) 2007, Advanced Micro Devices, Inc.
4
 * Copyright (c) 2009, Citrix Systems, Inc.
5
 *
6
 * This program is free software; you can redistribute it and/or modify it
7
 * under the terms and conditions of the GNU General Public License,
8
 * version 2, as published by the Free Software Foundation.
9
 *
10
 * This program is distributed in the hope it will be useful, but WITHOUT
11
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13
 * more details.
14
 *
15
 * You should have received a copy of the GNU General Public License along with
16
 * this program; If not, see <http://www.gnu.org/licenses/>.
17
 */
18
19
#include <xen/init.h>
20
#include <xen/lib.h>
21
#include <xen/sched.h>
22
#include <xen/smp.h>
23
#include <xen/percpu.h>
24
#include <asm/hvm/asid.h>
25
26
/* Xen command-line option to enable ASIDs */
27
static int opt_asid_enabled = 1;
28
boolean_param("asid", opt_asid_enabled);
29
30
/*
31
 * ASIDs partition the physical TLB.  In the current implementation ASIDs are
32
 * introduced to reduce the number of TLB flushes.  Each time the guest's
33
 * virtual address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4}
34
 * operation), instead of flushing the TLB, a new ASID is assigned.  This
35
 * reduces the number of TLB flushes to at most 1/#ASIDs.  The biggest
36
 * advantage is that hot parts of the hypervisor's code and data retain in
37
 * the TLB.
38
 *
39
 * Sketch of the Implementation:
40
 *
41
 * ASIDs are a CPU-local resource.  As preemption of ASIDs is not possible,
42
 * ASIDs are assigned in a round-robin scheme.  To minimize the overhead of
43
 * ASID invalidation, at the time of a TLB flush,  ASIDs are tagged with a
44
 * 64-bit generation.  Only on a generation overflow the code needs to
45
 * invalidate all ASID information stored at the VCPUs with are run on the
46
 * specific physical processor.  This overflow appears after about 2^80
47
 * host processor cycles, so we do not optimize this case, but simply disable
48
 * ASID useage to retain correctness.
49
 */
50
51
/* Per-CPU ASID management. */
52
struct hvm_asid_data {
53
   uint64_t core_asid_generation;
54
   uint32_t next_asid;
55
   uint32_t max_asid;
56
   bool_t disabled;
57
};
58
59
static DEFINE_PER_CPU(struct hvm_asid_data, hvm_asid_data);
60
61
void hvm_asid_init(int nasids)
62
12
{
63
12
    static int8_t g_disabled = -1;
64
12
    struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
65
12
66
12
    data->max_asid = nasids - 1;
67
12
    data->disabled = !opt_asid_enabled || (nasids <= 1);
68
12
69
12
    if ( g_disabled != data->disabled )
70
1
    {
71
1
        printk("HVM: ASIDs %sabled.\n", data->disabled ? "dis" : "en");
72
1
        if ( g_disabled < 0 )
73
1
            g_disabled = data->disabled;
74
1
    }
75
12
76
12
    /* Zero indicates 'invalid generation', so we start the count at one. */
77
12
    data->core_asid_generation = 1;
78
12
79
12
    /* Zero indicates 'ASIDs disabled', so we start the count at one. */
80
12
    data->next_asid = 1;
81
12
}
82
83
void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid)
84
1.42k
{
85
1.42k
    asid->generation = 0;
86
1.42k
}
87
88
void hvm_asid_flush_vcpu(struct vcpu *v)
89
715
{
90
715
    hvm_asid_flush_vcpu_asid(&v->arch.hvm_vcpu.n1asid);
91
715
    hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
92
715
}
93
94
void hvm_asid_flush_core(void)
95
39.9k
{
96
39.9k
    struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
97
39.9k
98
39.9k
    if ( data->disabled )
99
0
        return;
100
39.9k
101
39.9k
    if ( likely(++data->core_asid_generation != 0) )
102
39.9k
        return;
103
39.9k
104
39.9k
    /*
105
39.9k
     * ASID generations are 64 bit.  Overflow of generations never happens.
106
39.9k
     * For safety, we simply disable ASIDs, so correctness is established; it
107
39.9k
     * only runs a bit slower.
108
39.9k
     */
109
18.4E
    printk("HVM: ASID generation overrun. Disabling ASIDs.\n");
110
18.4E
    data->disabled = 1;
111
18.4E
}
112
113
bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
114
5.07M
{
115
5.07M
    struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
116
5.07M
117
5.07M
    /* On erratum #170 systems we must flush the TLB. 
118
5.07M
     * Generation overruns are taken here, too. */
119
5.07M
    if ( data->disabled )
120
0
        goto disabled;
121
5.07M
122
5.07M
    /* Test if VCPU has valid ASID. */
123
5.07M
    if ( asid->generation == data->core_asid_generation )
124
5.03M
        return 0;
125
5.07M
126
5.07M
    /* If there are no free ASIDs, need to go to a new generation */
127
36.9k
    if ( unlikely(data->next_asid > data->max_asid) )
128
0
    {
129
0
        hvm_asid_flush_core();
130
0
        data->next_asid = 1;
131
0
        if ( data->disabled )
132
0
            goto disabled;
133
0
    }
134
36.9k
135
36.9k
    /* Now guaranteed to be a free ASID. */
136
36.9k
    asid->asid = data->next_asid++;
137
36.9k
    asid->generation = data->core_asid_generation;
138
36.9k
139
36.9k
    /*
140
36.9k
     * When we assign ASID 1, flush all TLB entries as we are starting a new
141
36.9k
     * generation, and all old ASID allocations are now stale. 
142
36.9k
     */
143
36.9k
    return (asid->asid == 1);
144
36.9k
145
0
 disabled:
146
0
    asid->asid = 0;
147
0
    return 0;
148
36.9k
}
149
150
/*
151
 * Local variables:
152
 * mode: C
153
 * c-file-style: "BSD"
154
 * c-basic-offset: 4
155
 * tab-width: 4
156
 * indent-tabs-mode: nil
157
 * End:
158
 */