/root/src/xen/xen/arch/x86/cpu/mcheck/barrier.h
Line | Count | Source (jump to first uncovered line) |
1 | | #ifndef _MCHECK_BARRIER_H |
2 | | #define _MCHECK_BARRIER_H |
3 | | |
4 | | #include <asm/atomic.h> |
5 | | |
6 | | /* MCE handling */ |
7 | | struct mce_softirq_barrier { |
8 | | atomic_t val; |
9 | | atomic_t ingen; |
10 | | atomic_t outgen; |
11 | | }; |
12 | | |
13 | | #define DEFINE_MCE_BARRIER(name) \ |
14 | 0 | struct mce_softirq_barrier name = { \ |
15 | 0 | .val = ATOMIC_INIT(0), \ |
16 | 0 | .ingen = ATOMIC_INIT(0), \ |
17 | 0 | .outgen = ATOMIC_INIT(0), \ |
18 | 0 | } |
19 | | |
20 | | /* |
21 | | * Initialize a barrier. Just set it to 0. |
22 | | */ |
23 | | void mce_barrier_init(struct mce_softirq_barrier *); |
24 | | |
25 | | /* |
26 | | * This function will need to be used when offlining a CPU in the |
27 | | * recovery actions. |
28 | | * |
29 | | * Decrement a barrier only. Needed for cases where the CPU |
30 | | * in question can't do it itself (e.g. it is being offlined). |
31 | | */ |
32 | | void mce_barrier_dec(struct mce_softirq_barrier *); |
33 | | |
34 | | /* |
35 | | * If @wait is false, mce_barrier_enter/exit() will return immediately |
36 | | * without touching the barrier. It's used when handling a |
37 | | * non-broadcasting MCE (e.g. MCE on some old Intel CPU, MCE on AMD |
38 | | * CPU and LMCE on Intel Skylake-server CPU) which is received on only |
39 | | * one CPU and thus does not invoke mce_barrier_enter/exit() calls on |
40 | | * all CPUs. |
41 | | * |
42 | | * If @wait is true, mce_barrier_enter/exit() will handle the given |
43 | | * barrier as below. |
44 | | * |
45 | | * Increment the generation number and the value. The generation number |
46 | | * is incremented when entering a barrier. This way, it can be checked |
47 | | * on exit if a CPU is trying to re-enter the barrier. This can happen |
48 | | * if the first CPU to make it out immediately exits or re-enters, while |
49 | | * another CPU that is still in the loop becomes otherwise occupied |
50 | | * (e.g. it needs to service an interrupt, etc), missing the value |
51 | | * it's waiting for. |
52 | | * |
53 | | * These barrier functions should always be paired, so that the |
54 | | * counter value will reach 0 again after all CPUs have exited. |
55 | | */ |
56 | | void mce_barrier_enter(struct mce_softirq_barrier *, bool wait); |
57 | | void mce_barrier_exit(struct mce_softirq_barrier *, bool wait); |
58 | | |
59 | | void mce_barrier(struct mce_softirq_barrier *); |
60 | | |
61 | | #endif /* _MCHECK_BARRIER_H */ |