Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/flushtlb.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * flushtlb.c
3
 * 
4
 * TLB flushes are timestamped using a global virtual 'clock' which ticks
5
 * on any TLB flush on any processor.
6
 * 
7
 * Copyright (c) 2003-2006, K A Fraser
8
 */
9
10
#include <xen/sched.h>
11
#include <xen/softirq.h>
12
#include <asm/flushtlb.h>
13
#include <asm/page.h>
14
15
/* Debug builds: Wrap frequently to stress-test the wrap logic. */
16
#ifdef NDEBUG
17
#define WRAP_MASK (0xFFFFFFFFU)
18
#else
19
39.6k
#define WRAP_MASK (0x000003FFU)
20
#endif
21
22
u32 tlbflush_clock = 1U;
23
DEFINE_PER_CPU(u32, tlbflush_time);
24
25
/*
26
 * pre_flush(): Increment the virtual TLB-flush clock. Returns new clock value.
27
 * 
28
 * This must happen *before* we flush the TLB. If we do it after, we race other
29
 * CPUs invalidating PTEs. For example, a page invalidated after the flush
30
 * might get the old timestamp, but this CPU can speculatively fetch the
31
 * mapping into its TLB after the flush but before inc'ing the clock.
32
 */
33
static u32 pre_flush(void)
34
40.0k
{
35
40.0k
    u32 t, t1, t2;
36
40.0k
37
40.0k
    t = tlbflush_clock;
38
40.0k
    do {
39
40.0k
        t1 = t2 = t;
40
40.0k
        /* Clock wrapped: someone else is leading a global TLB shootdown. */
41
40.0k
        if ( unlikely(t1 == 0) )
42
347
            goto skip_clocktick;
43
39.6k
        t2 = (t + 1) & WRAP_MASK;
44
39.6k
    }
45
39.6k
    while ( unlikely((t = cmpxchg(&tlbflush_clock, t1, t2)) != t1) );
46
40.0k
47
40.0k
    /* Clock wrapped: we will lead a global TLB shootdown. */
48
39.6k
    if ( unlikely(t2 == 0) )
49
38
        raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
50
39.6k
51
40.0k
 skip_clocktick:
52
40.0k
    return t2;
53
39.6k
}
54
55
/*
56
 * post_flush(): Update this CPU's timestamp with specified clock value.
57
 * 
58
 * Note that this happens *after* flushing the TLB, as otherwise we can race a 
59
 * NEED_FLUSH() test on another CPU. (e.g., other CPU sees the updated CPU 
60
 * stamp and so does not force a synchronous TLB flush, but the flush in this
61
 * function hasn't yet occurred and so the TLB might be stale). The ordering 
62
 * would only actually matter if this function were interruptible, and 
63
 * something that abuses the stale mapping could exist in an interrupt 
64
 * handler. In fact neither of these is the case, so really we are being ultra 
65
 * paranoid.
66
 */
67
static void post_flush(u32 t)
68
40.0k
{
69
40.0k
    this_cpu(tlbflush_time) = t;
70
40.0k
}
71
72
void write_cr3(unsigned long cr3)
73
39.6k
{
74
39.6k
    unsigned long flags, cr4 = read_cr4();
75
39.6k
    u32 t;
76
39.6k
77
39.6k
    /* This non-reentrant function is sometimes called in interrupt context. */
78
39.6k
    local_irq_save(flags);
79
39.6k
80
39.6k
    t = pre_flush();
81
39.6k
82
39.6k
    hvm_flush_guest_tlbs();
83
39.6k
84
39.6k
    write_cr4(cr4 & ~X86_CR4_PGE);
85
39.6k
    asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
86
39.6k
    write_cr4(cr4);
87
39.6k
88
39.6k
    post_flush(t);
89
39.6k
90
39.6k
    local_irq_restore(flags);
91
39.6k
}
92
93
/*
94
 * The return value of this function is the passed in "flags" argument with
95
 * bits cleared that have been fully (i.e. system-wide) taken care of, i.e.
96
 * namely not requiring any further action on remote CPUs.
97
 */
98
unsigned int flush_area_local(const void *va, unsigned int flags)
99
700
{
100
700
    unsigned int order = (flags - 1) & FLUSH_ORDER_MASK;
101
700
    unsigned long irqfl;
102
700
103
700
    /* This non-reentrant function is sometimes called in interrupt context. */
104
700
    local_irq_save(irqfl);
105
700
106
700
    if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) )
107
492
    {
108
492
        if ( order == 0 )
109
108
        {
110
108
            /*
111
108
             * We don't INVLPG multi-page regions because the 2M/4M/1G
112
108
             * region may not have been mapped with a superpage. Also there
113
108
             * are various errata surrounding INVLPG usage on superpages, and
114
108
             * a full flush is in any case not *that* expensive.
115
108
             */
116
108
            asm volatile ( "invlpg %0"
117
108
                           : : "m" (*(const char *)(va)) : "memory" );
118
108
        }
119
492
        else
120
384
        {
121
384
            u32 t = pre_flush();
122
384
            unsigned long cr4 = read_cr4();
123
384
124
384
            hvm_flush_guest_tlbs();
125
384
126
384
            write_cr4(cr4 & ~X86_CR4_PGE);
127
384
            barrier();
128
384
            write_cr4(cr4);
129
384
130
384
            post_flush(t);
131
384
        }
132
492
    }
133
700
134
700
    if ( flags & FLUSH_CACHE )
135
159
    {
136
159
        const struct cpuinfo_x86 *c = &current_cpu_data;
137
159
        unsigned long i, sz = 0;
138
159
139
159
        if ( order < (BITS_PER_LONG - PAGE_SHIFT) )
140
0
            sz = 1UL << (order + PAGE_SHIFT);
141
159
142
159
        if ( (!(flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL)) ||
143
0
              (flags & FLUSH_VA_VALID)) &&
144
171
             c->x86_clflush_size && c->x86_cache_size && sz &&
145
0
             ((sz >> 10) < c->x86_cache_size) )
146
0
        {
147
0
            alternative(ASM_NOP3, "sfence", X86_FEATURE_CLFLUSHOPT);
148
0
            for ( i = 0; i < sz; i += c->x86_clflush_size )
149
0
                alternative_input(".byte " __stringify(NOP_DS_PREFIX) ";"
150
0
                                  " clflush %0",
151
0
                                  "data16 clflush %0",      /* clflushopt */
152
0
                                  X86_FEATURE_CLFLUSHOPT,
153
0
                                  "m" (((const char *)va)[i]));
154
0
            flags &= ~FLUSH_CACHE;
155
0
        }
156
159
        else
157
159
        {
158
159
            wbinvd();
159
159
        }
160
159
    }
161
700
162
700
    local_irq_restore(irqfl);
163
700
164
700
    return flags;
165
700
}