/root/src/xen/xen/common/virtual_region.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved. |
3 | | */ |
4 | | |
5 | | #include <xen/init.h> |
6 | | #include <xen/kernel.h> |
7 | | #include <xen/rcupdate.h> |
8 | | #include <xen/spinlock.h> |
9 | | #include <xen/virtual_region.h> |
10 | | |
11 | | static struct virtual_region core = { |
12 | | .list = LIST_HEAD_INIT(core.list), |
13 | | .start = _stext, |
14 | | .end = _etext, |
15 | | }; |
16 | | |
17 | | /* Becomes irrelevant when __init sections are cleared. */ |
18 | | static struct virtual_region core_init __initdata = { |
19 | | .list = LIST_HEAD_INIT(core_init.list), |
20 | | .start = _sinittext, |
21 | | .end = _einittext, |
22 | | }; |
23 | | |
24 | | /* |
25 | | * RCU locking. Additions are done either at startup (when there is only |
26 | | * one CPU) or when all CPUs are running without IRQs. |
27 | | * |
28 | | * Deletions are bit tricky. We do it when Live Patch (all CPUs running |
29 | | * without IRQs) or during bootup (when clearing the init). |
30 | | * |
31 | | * Hence we use list_del_rcu (which sports an memory fence) and a spinlock |
32 | | * on deletion. |
33 | | * |
34 | | * All readers of virtual_region_list MUST use list_for_each_entry_rcu. |
35 | | */ |
36 | | static LIST_HEAD(virtual_region_list); |
37 | | static DEFINE_SPINLOCK(virtual_region_lock); |
38 | | static DEFINE_RCU_READ_LOCK(rcu_virtual_region_lock); |
39 | | |
40 | | const struct virtual_region *find_text_region(unsigned long addr) |
41 | 14 | { |
42 | 14 | const struct virtual_region *region; |
43 | 14 | |
44 | 14 | rcu_read_lock(&rcu_virtual_region_lock); |
45 | 14 | list_for_each_entry_rcu( region, &virtual_region_list, list ) |
46 | 24 | { |
47 | 24 | if ( (void *)addr >= region->start && (void *)addr < region->end ) |
48 | 6 | { |
49 | 6 | rcu_read_unlock(&rcu_virtual_region_lock); |
50 | 6 | return region; |
51 | 6 | } |
52 | 24 | } |
53 | 8 | rcu_read_unlock(&rcu_virtual_region_lock); |
54 | 8 | |
55 | 8 | return NULL; |
56 | 14 | } |
57 | | |
58 | | void register_virtual_region(struct virtual_region *r) |
59 | 2 | { |
60 | 2 | ASSERT(!local_irq_is_enabled()); |
61 | 2 | |
62 | 2 | list_add_tail_rcu(&r->list, &virtual_region_list); |
63 | 2 | } |
64 | | |
65 | | static void remove_virtual_region(struct virtual_region *r) |
66 | 1 | { |
67 | 1 | unsigned long flags; |
68 | 1 | |
69 | 1 | spin_lock_irqsave(&virtual_region_lock, flags); |
70 | 1 | list_del_rcu(&r->list); |
71 | 1 | spin_unlock_irqrestore(&virtual_region_lock, flags); |
72 | 1 | /* |
73 | 1 | * We do not need to invoke call_rcu. |
74 | 1 | * |
75 | 1 | * This is due to the fact that on the deletion we have made sure |
76 | 1 | * to use spinlocks (to guard against somebody else calling |
77 | 1 | * unregister_virtual_region) and list_deletion spiced with |
78 | 1 | * memory barrier. |
79 | 1 | * |
80 | 1 | * That protects us from corrupting the list as the readers all |
81 | 1 | * use list_for_each_entry_rcu which is safe against concurrent |
82 | 1 | * deletions. |
83 | 1 | */ |
84 | 1 | } |
85 | | |
86 | | void unregister_virtual_region(struct virtual_region *r) |
87 | 0 | { |
88 | 0 | /* Expected to be called from Live Patch - which has IRQs disabled. */ |
89 | 0 | ASSERT(!local_irq_is_enabled()); |
90 | 0 |
|
91 | 0 | remove_virtual_region(r); |
92 | 0 | } |
93 | | |
94 | | void __init unregister_init_virtual_region(void) |
95 | 1 | { |
96 | 1 | BUG_ON(system_state != SYS_STATE_active); |
97 | 1 | |
98 | 1 | remove_virtual_region(&core_init); |
99 | 1 | } |
100 | | |
101 | | void __init setup_virtual_regions(const struct exception_table_entry *start, |
102 | | const struct exception_table_entry *end) |
103 | 1 | { |
104 | 1 | size_t sz; |
105 | 1 | unsigned int i; |
106 | 1 | static const struct bug_frame *const __initconstrel bug_frames[] = { |
107 | 1 | __start_bug_frames, |
108 | 1 | __stop_bug_frames_0, |
109 | 1 | __stop_bug_frames_1, |
110 | 1 | __stop_bug_frames_2, |
111 | 1 | #ifdef CONFIG_X86 |
112 | 1 | __stop_bug_frames_3, |
113 | 1 | #endif |
114 | 1 | NULL |
115 | 1 | }; |
116 | 1 | |
117 | 5 | for ( i = 1; bug_frames[i]; i++ ) |
118 | 4 | { |
119 | 4 | const struct bug_frame *s; |
120 | 4 | |
121 | 4 | s = bug_frames[i - 1]; |
122 | 4 | sz = bug_frames[i] - s; |
123 | 4 | |
124 | 4 | core.frame[i - 1].n_bugs = sz; |
125 | 4 | core.frame[i - 1].bugs = s; |
126 | 4 | |
127 | 4 | core_init.frame[i - 1].n_bugs = sz; |
128 | 4 | core_init.frame[i - 1].bugs = s; |
129 | 4 | } |
130 | 1 | |
131 | 1 | core_init.ex = core.ex = start; |
132 | 1 | core_init.ex_end = core.ex_end = end; |
133 | 1 | |
134 | 1 | register_virtual_region(&core_init); |
135 | 1 | register_virtual_region(&core); |
136 | 1 | } |
137 | | |
138 | | /* |
139 | | * Local variables: |
140 | | * mode: C |
141 | | * c-file-style: "BSD" |
142 | | * c-basic-offset: 4 |
143 | | * tab-width: 4 |
144 | | * indent-tabs-mode: nil |
145 | | * End: |
146 | | */ |