/root/src/xen/xen/arch/x86/percpu.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include <xen/percpu.h> |
2 | | #include <xen/cpu.h> |
3 | | #include <xen/init.h> |
4 | | #include <xen/mm.h> |
5 | | #include <xen/rcupdate.h> |
6 | | |
7 | | unsigned long __per_cpu_offset[NR_CPUS]; |
8 | | |
9 | | /* |
10 | | * Force uses of per_cpu() with an invalid area to attempt to access the |
11 | | * middle of the non-canonical address space resulting in a #GP, rather than a |
12 | | * possible #PF at (NULL + a little) which has security implications in the |
13 | | * context of PV guests. |
14 | | */ |
15 | 266 | #define INVALID_PERCPU_AREA (0x8000000000000000L - (long)__per_cpu_start) |
16 | 11 | #define PERCPU_ORDER get_order_from_bytes(__per_cpu_data_end - __per_cpu_start) |
17 | | |
18 | | void __init percpu_init_areas(void) |
19 | 1 | { |
20 | 1 | unsigned int cpu; |
21 | 1 | |
22 | 256 | for ( cpu = 1; cpu < NR_CPUS; cpu++ ) |
23 | 255 | __per_cpu_offset[cpu] = INVALID_PERCPU_AREA; |
24 | 1 | } |
25 | | |
26 | | static int init_percpu_area(unsigned int cpu) |
27 | 11 | { |
28 | 11 | char *p; |
29 | 11 | |
30 | 11 | if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA ) |
31 | 0 | return -EBUSY; |
32 | 11 | |
33 | 11 | if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL ) |
34 | 0 | return -ENOMEM; |
35 | 11 | |
36 | 11 | memset(p, 0, __per_cpu_data_end - __per_cpu_start); |
37 | 11 | __per_cpu_offset[cpu] = p - __per_cpu_start; |
38 | 11 | |
39 | 11 | return 0; |
40 | 11 | } |
41 | | |
42 | | struct free_info { |
43 | | unsigned int cpu; |
44 | | struct rcu_head rcu; |
45 | | }; |
46 | | static DEFINE_PER_CPU(struct free_info, free_info); |
47 | | |
48 | | static void _free_percpu_area(struct rcu_head *head) |
49 | 0 | { |
50 | 0 | struct free_info *info = container_of(head, struct free_info, rcu); |
51 | 0 | unsigned int cpu = info->cpu; |
52 | 0 | char *p = __per_cpu_start + __per_cpu_offset[cpu]; |
53 | 0 |
|
54 | 0 | free_xenheap_pages(p, PERCPU_ORDER); |
55 | 0 | __per_cpu_offset[cpu] = INVALID_PERCPU_AREA; |
56 | 0 | } |
57 | | |
58 | | static void free_percpu_area(unsigned int cpu) |
59 | 0 | { |
60 | 0 | struct free_info *info = &per_cpu(free_info, cpu); |
61 | 0 |
|
62 | 0 | info->cpu = cpu; |
63 | 0 | call_rcu(&info->rcu, _free_percpu_area); |
64 | 0 | } |
65 | | |
66 | | static int cpu_percpu_callback( |
67 | | struct notifier_block *nfb, unsigned long action, void *hcpu) |
68 | 33 | { |
69 | 33 | unsigned int cpu = (unsigned long)hcpu; |
70 | 33 | int rc = 0; |
71 | 33 | |
72 | 33 | switch ( action ) |
73 | 33 | { |
74 | 11 | case CPU_UP_PREPARE: |
75 | 11 | rc = init_percpu_area(cpu); |
76 | 11 | break; |
77 | 0 | case CPU_UP_CANCELED: |
78 | 0 | case CPU_DEAD: |
79 | 0 | free_percpu_area(cpu); |
80 | 0 | break; |
81 | 22 | default: |
82 | 22 | break; |
83 | 33 | } |
84 | 33 | |
85 | 33 | return !rc ? NOTIFY_DONE : notifier_from_errno(rc); |
86 | 33 | } |
87 | | |
88 | | static struct notifier_block cpu_percpu_nfb = { |
89 | | .notifier_call = cpu_percpu_callback, |
90 | | .priority = 100 /* highest priority */ |
91 | | }; |
92 | | |
93 | | static int __init percpu_presmp_init(void) |
94 | 1 | { |
95 | 1 | register_cpu_notifier(&cpu_percpu_nfb); |
96 | 1 | |
97 | 1 | return 0; |
98 | 1 | } |
99 | | presmp_initcall(percpu_presmp_init); |
100 | | |
101 | | /* |
102 | | * Local variables: |
103 | | * mode: C |
104 | | * c-file-style: "BSD" |
105 | | * c-basic-offset: 4 |
106 | | * indent-tabs-mode: nil |
107 | | * End: |
108 | | */ |