/root/src/xen/xen/arch/x86/mm/mm-locks.h
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * arch/x86/mm/mm-locks.h |
3 | | * |
4 | | * Spinlocks used by the code in arch/x86/mm. |
5 | | * |
6 | | * Copyright (c) 2011 Citrix Systems, inc. |
7 | | * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) |
8 | | * Copyright (c) 2006-2007 XenSource Inc. |
9 | | * Copyright (c) 2006 Michael A Fetterman |
10 | | * |
11 | | * This program is free software; you can redistribute it and/or modify |
12 | | * it under the terms of the GNU General Public License as published by |
13 | | * the Free Software Foundation; either version 2 of the License, or |
14 | | * (at your option) any later version. |
15 | | * |
16 | | * This program is distributed in the hope that it will be useful, |
17 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
19 | | * GNU General Public License for more details. |
20 | | * |
21 | | * You should have received a copy of the GNU General Public License |
22 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
23 | | */ |
24 | | |
25 | | #ifndef _MM_LOCKS_H |
26 | | #define _MM_LOCKS_H |
27 | | |
28 | | #include <asm/mem_sharing.h> |
29 | | |
30 | | /* Per-CPU variable for enforcing the lock ordering */ |
31 | | DECLARE_PER_CPU(int, mm_lock_level); |
32 | 1.64M | #define __get_lock_level() (this_cpu(mm_lock_level)) |
33 | | |
34 | | DECLARE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock); |
35 | | |
36 | | static inline void mm_lock_init(mm_lock_t *l) |
37 | 24 | { |
38 | 24 | spin_lock_init(&l->lock); |
39 | 24 | l->locker = -1; |
40 | 24 | l->locker_function = "nobody"; |
41 | 24 | l->unlock_level = 0; |
42 | 24 | } Line | Count | Source | 37 | 1 | { | 38 | 1 | spin_lock_init(&l->lock); | 39 | 1 | l->locker = -1; | 40 | 1 | l->locker_function = "nobody"; | 41 | 1 | l->unlock_level = 0; | 42 | 1 | } |
Unexecuted instantiation: nested_ept.c:mm_lock_init Unexecuted instantiation: nested_hap.c:mm_lock_init Unexecuted instantiation: guest_walk.c:mm_lock_init Unexecuted instantiation: hap.c:mm_lock_init Unexecuted instantiation: multi.c:mm_lock_init Unexecuted instantiation: common.c:mm_lock_init Unexecuted instantiation: mem_access.c:mm_lock_init Unexecuted instantiation: mem_sharing.c:mm_lock_init Unexecuted instantiation: p2m-pod.c:mm_lock_init Unexecuted instantiation: p2m-ept.c:mm_lock_init Unexecuted instantiation: p2m-pt.c:mm_lock_init Line | Count | Source | 37 | 23 | { | 38 | 23 | spin_lock_init(&l->lock); | 39 | 23 | l->locker = -1; | 40 | 23 | l->locker_function = "nobody"; | 41 | 23 | l->unlock_level = 0; | 42 | 23 | } |
|
43 | | |
44 | | static inline int mm_locked_by_me(mm_lock_t *l) |
45 | 71.8k | { |
46 | 71.8k | return (l->lock.recurse_cpu == current->processor); |
47 | 71.8k | } Line | Count | Source | 45 | 34.5k | { | 46 | 34.5k | return (l->lock.recurse_cpu == current->processor); | 47 | 34.5k | } |
Unexecuted instantiation: p2m.c:mm_locked_by_me Unexecuted instantiation: p2m-pt.c:mm_locked_by_me Unexecuted instantiation: p2m-ept.c:mm_locked_by_me Unexecuted instantiation: p2m-pod.c:mm_locked_by_me Unexecuted instantiation: mem_sharing.c:mm_locked_by_me Unexecuted instantiation: mem_access.c:mm_locked_by_me Unexecuted instantiation: common.c:mm_locked_by_me Unexecuted instantiation: multi.c:mm_locked_by_me Line | Count | Source | 45 | 37.2k | { | 46 | 37.2k | return (l->lock.recurse_cpu == current->processor); | 47 | 37.2k | } |
Unexecuted instantiation: guest_walk.c:mm_locked_by_me Unexecuted instantiation: nested_hap.c:mm_locked_by_me Unexecuted instantiation: nested_ept.c:mm_locked_by_me |
48 | | |
49 | | /* |
50 | | * If you see this crash, the numbers printed are order levels defined |
51 | | * in this file. |
52 | | */ |
53 | 2.50M | #define __check_lock_level(l) \ |
54 | 2.50M | do { \ |
55 | 2.50M | if ( unlikely(__get_lock_level() > (l)) ) \ |
56 | 0 | { \ |
57 | 0 | printk("mm locking order violation: %i > %i\n", \ |
58 | 0 | __get_lock_level(), (l)); \ |
59 | 0 | BUG(); \ |
60 | 0 | } \ |
61 | 2.50M | } while(0) |
62 | | |
63 | 1.09M | #define __set_lock_level(l) \ |
64 | 1.09M | do { \ |
65 | 1.09M | __get_lock_level() = (l); \ |
66 | 1.09M | } while(0) |
67 | | |
68 | | static inline void _mm_lock(mm_lock_t *l, const char *func, int level, int rec) |
69 | 35.9k | { |
70 | 35.9k | if ( !((mm_locked_by_me(l)) && rec) ) |
71 | 35.9k | __check_lock_level(level); |
72 | 35.9k | spin_lock_recursive(&l->lock); |
73 | 35.9k | if ( l->lock.recurse_cnt == 1 ) |
74 | 35.9k | { |
75 | 35.9k | l->locker_function = func; |
76 | 35.9k | l->unlock_level = __get_lock_level(); |
77 | 35.9k | } |
78 | 0 | else if ( (unlikely(!rec)) ) |
79 | 0 | panic("mm lock already held by %s", l->locker_function); |
80 | 35.9k | __set_lock_level(level); |
81 | 35.9k | } Unexecuted instantiation: mem_access.c:_mm_lock Unexecuted instantiation: nested_ept.c:_mm_lock Unexecuted instantiation: nested_hap.c:_mm_lock Unexecuted instantiation: guest_walk.c:_mm_lock Line | Count | Source | 69 | 1.38k | { | 70 | 1.38k | if ( !((mm_locked_by_me(l)) && rec) ) | 71 | 1.38k | __check_lock_level(level); | 72 | 1.38k | spin_lock_recursive(&l->lock); | 73 | 1.38k | if ( l->lock.recurse_cnt == 1 ) | 74 | 1.38k | { | 75 | 1.38k | l->locker_function = func; | 76 | 1.38k | l->unlock_level = __get_lock_level(); | 77 | 1.38k | } | 78 | 0 | else if ( (unlikely(!rec)) ) | 79 | 0 | panic("mm lock already held by %s", l->locker_function); | 80 | 1.38k | __set_lock_level(level); | 81 | 1.38k | } |
Unexecuted instantiation: multi.c:_mm_lock Unexecuted instantiation: common.c:_mm_lock Unexecuted instantiation: mem_sharing.c:_mm_lock Unexecuted instantiation: p2m-pod.c:_mm_lock Unexecuted instantiation: p2m-ept.c:_mm_lock Unexecuted instantiation: p2m-pt.c:_mm_lock Unexecuted instantiation: p2m.c:_mm_lock Line | Count | Source | 69 | 34.5k | { | 70 | 34.5k | if ( !((mm_locked_by_me(l)) && rec) ) | 71 | 34.5k | __check_lock_level(level); | 72 | 34.5k | spin_lock_recursive(&l->lock); | 73 | 34.5k | if ( l->lock.recurse_cnt == 1 ) | 74 | 34.5k | { | 75 | 34.5k | l->locker_function = func; | 76 | 34.5k | l->unlock_level = __get_lock_level(); | 77 | 34.5k | } | 78 | 0 | else if ( (unlikely(!rec)) ) | 79 | 0 | panic("mm lock already held by %s", l->locker_function); | 80 | 34.5k | __set_lock_level(level); | 81 | 34.5k | } |
|
82 | | |
83 | | static inline void _mm_enforce_order_lock_pre(int level) |
84 | 0 | { |
85 | 0 | __check_lock_level(level); |
86 | 0 | } Unexecuted instantiation: paging.c:_mm_enforce_order_lock_pre Unexecuted instantiation: nested_ept.c:_mm_enforce_order_lock_pre Unexecuted instantiation: p2m.c:_mm_enforce_order_lock_pre Unexecuted instantiation: p2m-pt.c:_mm_enforce_order_lock_pre Unexecuted instantiation: p2m-ept.c:_mm_enforce_order_lock_pre Unexecuted instantiation: p2m-pod.c:_mm_enforce_order_lock_pre Unexecuted instantiation: mem_sharing.c:_mm_enforce_order_lock_pre Unexecuted instantiation: mem_access.c:_mm_enforce_order_lock_pre Unexecuted instantiation: common.c:_mm_enforce_order_lock_pre Unexecuted instantiation: multi.c:_mm_enforce_order_lock_pre Unexecuted instantiation: hap.c:_mm_enforce_order_lock_pre Unexecuted instantiation: guest_walk.c:_mm_enforce_order_lock_pre Unexecuted instantiation: nested_hap.c:_mm_enforce_order_lock_pre |
87 | | |
88 | | static inline void _mm_enforce_order_lock_post(int level, int *unlock_level, |
89 | | unsigned short *recurse_count) |
90 | 0 | { |
91 | 0 | if ( recurse_count ) |
92 | 0 | { |
93 | 0 | if ( (*recurse_count)++ == 0 ) |
94 | 0 | { |
95 | 0 | *unlock_level = __get_lock_level(); |
96 | 0 | } |
97 | 0 | } else { |
98 | 0 | *unlock_level = __get_lock_level(); |
99 | 0 | } |
100 | 0 | __set_lock_level(level); |
101 | 0 | } Unexecuted instantiation: paging.c:_mm_enforce_order_lock_post Unexecuted instantiation: p2m.c:_mm_enforce_order_lock_post Unexecuted instantiation: p2m-pt.c:_mm_enforce_order_lock_post Unexecuted instantiation: p2m-ept.c:_mm_enforce_order_lock_post Unexecuted instantiation: p2m-pod.c:_mm_enforce_order_lock_post Unexecuted instantiation: mem_sharing.c:_mm_enforce_order_lock_post Unexecuted instantiation: mem_access.c:_mm_enforce_order_lock_post Unexecuted instantiation: common.c:_mm_enforce_order_lock_post Unexecuted instantiation: multi.c:_mm_enforce_order_lock_post Unexecuted instantiation: hap.c:_mm_enforce_order_lock_post Unexecuted instantiation: guest_walk.c:_mm_enforce_order_lock_post Unexecuted instantiation: nested_hap.c:_mm_enforce_order_lock_post Unexecuted instantiation: nested_ept.c:_mm_enforce_order_lock_post |
102 | | |
103 | | |
104 | | static inline void mm_rwlock_init(mm_rwlock_t *l) |
105 | 21 | { |
106 | 21 | percpu_rwlock_resource_init(&l->lock, p2m_percpu_rwlock); |
107 | 21 | l->locker = -1; |
108 | 21 | l->locker_function = "nobody"; |
109 | 21 | l->unlock_level = 0; |
110 | 21 | } Line | Count | Source | 105 | 21 | { | 106 | 21 | percpu_rwlock_resource_init(&l->lock, p2m_percpu_rwlock); | 107 | 21 | l->locker = -1; | 108 | 21 | l->locker_function = "nobody"; | 109 | 21 | l->unlock_level = 0; | 110 | 21 | } |
Unexecuted instantiation: p2m-pt.c:mm_rwlock_init Unexecuted instantiation: p2m-ept.c:mm_rwlock_init Unexecuted instantiation: p2m-pod.c:mm_rwlock_init Unexecuted instantiation: mem_sharing.c:mm_rwlock_init Unexecuted instantiation: paging.c:mm_rwlock_init Unexecuted instantiation: common.c:mm_rwlock_init Unexecuted instantiation: multi.c:mm_rwlock_init Unexecuted instantiation: hap.c:mm_rwlock_init Unexecuted instantiation: guest_walk.c:mm_rwlock_init Unexecuted instantiation: nested_hap.c:mm_rwlock_init Unexecuted instantiation: nested_ept.c:mm_rwlock_init Unexecuted instantiation: mem_access.c:mm_rwlock_init |
111 | | |
112 | | static inline int mm_write_locked_by_me(mm_rwlock_t *l) |
113 | 2.98M | { |
114 | 2.98M | return (l->locker == get_processor_id()); |
115 | 2.98M | } Unexecuted instantiation: nested_ept.c:mm_write_locked_by_me Unexecuted instantiation: nested_hap.c:mm_write_locked_by_me Unexecuted instantiation: guest_walk.c:mm_write_locked_by_me Unexecuted instantiation: hap.c:mm_write_locked_by_me Unexecuted instantiation: multi.c:mm_write_locked_by_me Unexecuted instantiation: common.c:mm_write_locked_by_me Unexecuted instantiation: mem_access.c:mm_write_locked_by_me Unexecuted instantiation: mem_sharing.c:mm_write_locked_by_me Unexecuted instantiation: p2m-pod.c:mm_write_locked_by_me p2m-ept.c:mm_write_locked_by_me Line | Count | Source | 113 | 3.15k | { | 114 | 3.15k | return (l->locker == get_processor_id()); | 115 | 3.15k | } |
Unexecuted instantiation: p2m-pt.c:mm_write_locked_by_me p2m.c:mm_write_locked_by_me Line | Count | Source | 113 | 2.97M | { | 114 | 2.97M | return (l->locker == get_processor_id()); | 115 | 2.97M | } |
Unexecuted instantiation: paging.c:mm_write_locked_by_me |
116 | | |
117 | | static inline void _mm_write_lock(mm_rwlock_t *l, const char *func, int level) |
118 | 510k | { |
119 | 510k | if ( !mm_write_locked_by_me(l) ) |
120 | 510k | { |
121 | 510k | __check_lock_level(level); |
122 | 510k | percpu_write_lock(p2m_percpu_rwlock, &l->lock); |
123 | 510k | l->locker = get_processor_id(); |
124 | 510k | l->locker_function = func; |
125 | 510k | l->unlock_level = __get_lock_level(); |
126 | 510k | __set_lock_level(level); |
127 | 510k | } |
128 | 510k | l->recurse_count++; |
129 | 510k | } Unexecuted instantiation: paging.c:_mm_write_lock Unexecuted instantiation: nested_ept.c:_mm_write_lock Unexecuted instantiation: nested_hap.c:_mm_write_lock Unexecuted instantiation: guest_walk.c:_mm_write_lock Unexecuted instantiation: hap.c:_mm_write_lock Unexecuted instantiation: multi.c:_mm_write_lock Unexecuted instantiation: common.c:_mm_write_lock Unexecuted instantiation: mem_access.c:_mm_write_lock Unexecuted instantiation: mem_sharing.c:_mm_write_lock Unexecuted instantiation: p2m-pod.c:_mm_write_lock Line | Count | Source | 118 | 3.15k | { | 119 | 3.15k | if ( !mm_write_locked_by_me(l) ) | 120 | 3.15k | { | 121 | 3.15k | __check_lock_level(level); | 122 | 3.15k | percpu_write_lock(p2m_percpu_rwlock, &l->lock); | 123 | 3.15k | l->locker = get_processor_id(); | 124 | 3.15k | l->locker_function = func; | 125 | 3.15k | l->unlock_level = __get_lock_level(); | 126 | 3.15k | __set_lock_level(level); | 127 | 3.15k | } | 128 | 3.15k | l->recurse_count++; | 129 | 3.15k | } |
Unexecuted instantiation: p2m-pt.c:_mm_write_lock Line | Count | Source | 118 | 507k | { | 119 | 507k | if ( !mm_write_locked_by_me(l) ) | 120 | 507k | { | 121 | 507k | __check_lock_level(level); | 122 | 507k | percpu_write_lock(p2m_percpu_rwlock, &l->lock); | 123 | 507k | l->locker = get_processor_id(); | 124 | 507k | l->locker_function = func; | 125 | 507k | l->unlock_level = __get_lock_level(); | 126 | 507k | __set_lock_level(level); | 127 | 507k | } | 128 | 507k | l->recurse_count++; | 129 | 507k | } |
|
130 | | |
131 | | static inline void mm_write_unlock(mm_rwlock_t *l) |
132 | 511k | { |
133 | 511k | if ( --(l->recurse_count) != 0 ) |
134 | 58 | return; |
135 | 510k | l->locker = -1; |
136 | 510k | l->locker_function = "nobody"; |
137 | 510k | __set_lock_level(l->unlock_level); |
138 | 510k | percpu_write_unlock(p2m_percpu_rwlock, &l->lock); |
139 | 510k | } Unexecuted instantiation: mem_access.c:mm_write_unlock Unexecuted instantiation: nested_ept.c:mm_write_unlock Unexecuted instantiation: nested_hap.c:mm_write_unlock Unexecuted instantiation: guest_walk.c:mm_write_unlock Unexecuted instantiation: hap.c:mm_write_unlock Unexecuted instantiation: multi.c:mm_write_unlock Unexecuted instantiation: common.c:mm_write_unlock Unexecuted instantiation: paging.c:mm_write_unlock Unexecuted instantiation: mem_sharing.c:mm_write_unlock Unexecuted instantiation: p2m-pod.c:mm_write_unlock Unexecuted instantiation: p2m-ept.c:mm_write_unlock Unexecuted instantiation: p2m-pt.c:mm_write_unlock Line | Count | Source | 132 | 511k | { | 133 | 511k | if ( --(l->recurse_count) != 0 ) | 134 | 58 | return; | 135 | 510k | l->locker = -1; | 136 | 510k | l->locker_function = "nobody"; | 137 | 510k | __set_lock_level(l->unlock_level); | 138 | 510k | percpu_write_unlock(p2m_percpu_rwlock, &l->lock); | 139 | 510k | } |
|
140 | | |
141 | | static inline void _mm_read_lock(mm_rwlock_t *l, int level) |
142 | 1.96M | { |
143 | 1.96M | __check_lock_level(level); |
144 | 1.96M | percpu_read_lock(p2m_percpu_rwlock, &l->lock); |
145 | 1.96M | /* There's nowhere to store the per-CPU unlock level so we can't |
146 | 1.96M | * set the lock level. */ |
147 | 1.96M | } Unexecuted instantiation: nested_ept.c:_mm_read_lock Line | Count | Source | 142 | 1.96M | { | 143 | 1.96M | __check_lock_level(level); | 144 | 1.96M | percpu_read_lock(p2m_percpu_rwlock, &l->lock); | 145 | 1.96M | /* There's nowhere to store the per-CPU unlock level so we can't | 146 | 1.96M | * set the lock level. */ | 147 | 1.96M | } |
Unexecuted instantiation: p2m-pt.c:_mm_read_lock Unexecuted instantiation: p2m-ept.c:_mm_read_lock Unexecuted instantiation: p2m-pod.c:_mm_read_lock Unexecuted instantiation: mem_sharing.c:_mm_read_lock Unexecuted instantiation: mem_access.c:_mm_read_lock Unexecuted instantiation: paging.c:_mm_read_lock Unexecuted instantiation: common.c:_mm_read_lock Unexecuted instantiation: multi.c:_mm_read_lock Unexecuted instantiation: hap.c:_mm_read_lock Unexecuted instantiation: guest_walk.c:_mm_read_lock Unexecuted instantiation: nested_hap.c:_mm_read_lock |
148 | | |
149 | | static inline void mm_read_unlock(mm_rwlock_t *l) |
150 | 1.98M | { |
151 | 1.98M | percpu_read_unlock(p2m_percpu_rwlock, &l->lock); |
152 | 1.98M | } Unexecuted instantiation: paging.c:mm_read_unlock Line | Count | Source | 150 | 1.98M | { | 151 | 1.98M | percpu_read_unlock(p2m_percpu_rwlock, &l->lock); | 152 | 1.98M | } |
Unexecuted instantiation: p2m-pt.c:mm_read_unlock Unexecuted instantiation: p2m-ept.c:mm_read_unlock Unexecuted instantiation: p2m-pod.c:mm_read_unlock Unexecuted instantiation: mem_sharing.c:mm_read_unlock Unexecuted instantiation: common.c:mm_read_unlock Unexecuted instantiation: multi.c:mm_read_unlock Unexecuted instantiation: hap.c:mm_read_unlock Unexecuted instantiation: guest_walk.c:mm_read_unlock Unexecuted instantiation: nested_hap.c:mm_read_unlock Unexecuted instantiation: nested_ept.c:mm_read_unlock Unexecuted instantiation: mem_access.c:mm_read_unlock |
153 | | |
154 | | /* This wrapper uses the line number to express the locking order below */ |
155 | | #define declare_mm_lock(name) \ |
156 | | static inline void mm_lock_##name(mm_lock_t *l, const char *func, int rec)\ |
157 | 35.9k | { _mm_lock(l, func, MM_LOCK_ORDER_##name, rec); } Unexecuted instantiation: paging.c:mm_lock_altp2mlist Unexecuted instantiation: p2m-ept.c:mm_lock_pod Unexecuted instantiation: paging.c:mm_lock_pod Unexecuted instantiation: paging.c:mm_lock_nestedp2m Unexecuted instantiation: p2m.c:mm_lock_pod Unexecuted instantiation: p2m.c:mm_lock_nestedp2m Unexecuted instantiation: p2m.c:mm_lock_altp2mlist Unexecuted instantiation: p2m.c:mm_lock_paging Unexecuted instantiation: p2m-pt.c:mm_lock_nestedp2m Unexecuted instantiation: p2m-pt.c:mm_lock_paging Unexecuted instantiation: p2m-pt.c:mm_lock_pod Unexecuted instantiation: p2m-pt.c:mm_lock_altp2mlist Unexecuted instantiation: p2m-ept.c:mm_lock_nestedp2m Unexecuted instantiation: p2m-ept.c:mm_lock_altp2mlist Unexecuted instantiation: p2m-ept.c:mm_lock_paging Unexecuted instantiation: p2m-pod.c:mm_lock_pod Unexecuted instantiation: p2m-pod.c:mm_lock_paging Unexecuted instantiation: p2m-pod.c:mm_lock_altp2mlist Unexecuted instantiation: p2m-pod.c:mm_lock_nestedp2m Unexecuted instantiation: mem_sharing.c:mm_lock_altp2mlist Unexecuted instantiation: mem_sharing.c:mm_lock_nestedp2m Unexecuted instantiation: mem_sharing.c:mm_lock_pod Unexecuted instantiation: mem_sharing.c:mm_lock_paging Unexecuted instantiation: mem_access.c:mm_lock_paging Unexecuted instantiation: mem_access.c:mm_lock_nestedp2m Unexecuted instantiation: mem_access.c:mm_lock_pod Unexecuted instantiation: mem_access.c:mm_lock_altp2mlist Unexecuted instantiation: common.c:mm_lock_paging Unexecuted instantiation: common.c:mm_lock_nestedp2m Unexecuted instantiation: common.c:mm_lock_pod Unexecuted instantiation: common.c:mm_lock_altp2mlist Unexecuted instantiation: multi.c:mm_lock_paging Unexecuted instantiation: multi.c:mm_lock_nestedp2m Unexecuted instantiation: multi.c:mm_lock_altp2mlist Unexecuted instantiation: multi.c:mm_lock_pod Line | Count | Source | 157 | 1.38k | { _mm_lock(l, func, MM_LOCK_ORDER_##name, rec); } |
Unexecuted instantiation: hap.c:mm_lock_nestedp2m Unexecuted instantiation: hap.c:mm_lock_altp2mlist Unexecuted instantiation: hap.c:mm_lock_pod Line | Count | Source | 157 | 34.5k | { _mm_lock(l, func, MM_LOCK_ORDER_##name, rec); } |
Unexecuted instantiation: guest_walk.c:mm_lock_nestedp2m Unexecuted instantiation: guest_walk.c:mm_lock_pod Unexecuted instantiation: guest_walk.c:mm_lock_paging Unexecuted instantiation: guest_walk.c:mm_lock_altp2mlist Unexecuted instantiation: nested_hap.c:mm_lock_paging Unexecuted instantiation: nested_hap.c:mm_lock_pod Unexecuted instantiation: nested_hap.c:mm_lock_altp2mlist Unexecuted instantiation: nested_hap.c:mm_lock_nestedp2m Unexecuted instantiation: nested_ept.c:mm_lock_paging Unexecuted instantiation: nested_ept.c:mm_lock_pod Unexecuted instantiation: nested_ept.c:mm_lock_altp2mlist Unexecuted instantiation: nested_ept.c:mm_lock_nestedp2m |
158 | | #define declare_mm_rwlock(name) \ |
159 | | static inline void mm_write_lock_##name(mm_rwlock_t *l, const char *func) \ |
160 | 510k | { _mm_write_lock(l, func, MM_LOCK_ORDER_##name); } \ Unexecuted instantiation: hap.c:mm_write_lock_p2m Unexecuted instantiation: mem_access.c:mm_write_lock_p2m Unexecuted instantiation: paging.c:mm_write_lock_altp2m Unexecuted instantiation: nested_ept.c:mm_write_lock_p2m Unexecuted instantiation: nested_ept.c:mm_write_lock_altp2m Unexecuted instantiation: nested_hap.c:mm_write_lock_p2m Unexecuted instantiation: nested_hap.c:mm_write_lock_altp2m Unexecuted instantiation: guest_walk.c:mm_write_lock_altp2m Unexecuted instantiation: guest_walk.c:mm_write_lock_p2m Unexecuted instantiation: hap.c:mm_write_lock_altp2m Unexecuted instantiation: multi.c:mm_write_lock_p2m Unexecuted instantiation: multi.c:mm_write_lock_altp2m Unexecuted instantiation: common.c:mm_write_lock_p2m Unexecuted instantiation: common.c:mm_write_lock_altp2m Unexecuted instantiation: paging.c:mm_write_lock_p2m Unexecuted instantiation: p2m.c:mm_write_lock_altp2m Line | Count | Source | 160 | 507k | { _mm_write_lock(l, func, MM_LOCK_ORDER_##name); } \ |
Unexecuted instantiation: p2m-pt.c:mm_write_lock_altp2m Unexecuted instantiation: p2m-pt.c:mm_write_lock_p2m Unexecuted instantiation: p2m-ept.c:mm_write_lock_altp2m p2m-ept.c:mm_write_lock_p2m Line | Count | Source | 160 | 3.15k | { _mm_write_lock(l, func, MM_LOCK_ORDER_##name); } \ |
Unexecuted instantiation: p2m-pod.c:mm_write_lock_altp2m Unexecuted instantiation: p2m-pod.c:mm_write_lock_p2m Unexecuted instantiation: mem_sharing.c:mm_write_lock_altp2m Unexecuted instantiation: mem_sharing.c:mm_write_lock_p2m Unexecuted instantiation: mem_access.c:mm_write_lock_altp2m |
161 | | static inline void mm_read_lock_##name(mm_rwlock_t *l) \ |
162 | 1.96M | { _mm_read_lock(l, MM_LOCK_ORDER_##name); } Unexecuted instantiation: nested_hap.c:mm_read_lock_p2m Unexecuted instantiation: mem_sharing.c:mm_read_lock_altp2m Unexecuted instantiation: mem_access.c:mm_read_lock_altp2m Unexecuted instantiation: mem_access.c:mm_read_lock_p2m Unexecuted instantiation: common.c:mm_read_lock_altp2m Unexecuted instantiation: mem_sharing.c:mm_read_lock_p2m Unexecuted instantiation: common.c:mm_read_lock_p2m Unexecuted instantiation: multi.c:mm_read_lock_p2m Unexecuted instantiation: multi.c:mm_read_lock_altp2m Unexecuted instantiation: hap.c:mm_read_lock_p2m Unexecuted instantiation: hap.c:mm_read_lock_altp2m Unexecuted instantiation: guest_walk.c:mm_read_lock_p2m Unexecuted instantiation: guest_walk.c:mm_read_lock_altp2m Unexecuted instantiation: nested_ept.c:mm_read_lock_p2m Unexecuted instantiation: p2m-pod.c:mm_read_lock_p2m Unexecuted instantiation: p2m-pod.c:mm_read_lock_altp2m Unexecuted instantiation: p2m-ept.c:mm_read_lock_p2m Unexecuted instantiation: p2m-ept.c:mm_read_lock_altp2m Unexecuted instantiation: p2m-pt.c:mm_read_lock_altp2m Unexecuted instantiation: p2m-pt.c:mm_read_lock_p2m Unexecuted instantiation: p2m.c:mm_read_lock_altp2m Line | Count | Source | 162 | 1.96M | { _mm_read_lock(l, MM_LOCK_ORDER_##name); } |
Unexecuted instantiation: paging.c:mm_read_lock_p2m Unexecuted instantiation: paging.c:mm_read_lock_altp2m Unexecuted instantiation: nested_hap.c:mm_read_lock_altp2m Unexecuted instantiation: nested_ept.c:mm_read_lock_altp2m |
163 | | /* These capture the name of the calling function */ |
164 | 1.10M | #define mm_lock(name, l) mm_lock_##name(l, __func__, 0) |
165 | 1.28k | #define mm_lock_recursive(name, l) mm_lock_##name(l, __func__, 1) |
166 | 1.02M | #define mm_write_lock(name, l) mm_write_lock_##name(l, __func__) |
167 | 1.96M | #define mm_read_lock(name, l) mm_read_lock_##name(l) |
168 | | |
169 | | /* This wrapper is intended for "external" locks which do not use |
170 | | * the mm_lock_t types. Such locks inside the mm code are also subject |
171 | | * to ordering constraints. */ |
172 | | #define declare_mm_order_constraint(name) \ |
173 | | static inline void mm_enforce_order_lock_pre_##name(void) \ |
174 | 0 | { _mm_enforce_order_lock_pre(MM_LOCK_ORDER_##name); } \ Unexecuted instantiation: common.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: mem_access.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: paging.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: p2m.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: p2m.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: p2m-pt.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: p2m-pt.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: p2m-ept.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: p2m-ept.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: p2m-pod.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: p2m-pod.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: mem_sharing.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: mem_sharing.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: mem_access.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: nested_ept.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: nested_ept.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: nested_hap.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: nested_hap.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: guest_walk.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: guest_walk.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: hap.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: hap.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: multi.c:mm_enforce_order_lock_pre_page_alloc Unexecuted instantiation: multi.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: common.c:mm_enforce_order_lock_pre_per_page_sharing Unexecuted instantiation: paging.c:mm_enforce_order_lock_pre_page_alloc |
175 | | static inline void mm_enforce_order_lock_post_##name( \ |
176 | | int *unlock_level, unsigned short *recurse_count) \ |
177 | 0 | { _mm_enforce_order_lock_post(MM_LOCK_ORDER_##name, unlock_level, recurse_count); } \ Unexecuted instantiation: common.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: multi.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: multi.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: hap.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: hap.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: guest_walk.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: guest_walk.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: nested_hap.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: nested_hap.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: nested_ept.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: nested_ept.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: paging.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: mem_access.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: mem_sharing.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: mem_sharing.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: p2m-pod.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: p2m-pod.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: p2m-ept.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: mem_access.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: p2m-ept.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: p2m-pt.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: p2m-pt.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: p2m.c:mm_enforce_order_lock_post_page_alloc Unexecuted instantiation: p2m.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: paging.c:mm_enforce_order_lock_post_per_page_sharing Unexecuted instantiation: common.c:mm_enforce_order_lock_post_page_alloc |
178 | | |
179 | | static inline void mm_unlock(mm_lock_t *l) |
180 | 35.9k | { |
181 | 35.9k | if ( l->lock.recurse_cnt == 1 ) |
182 | 35.9k | { |
183 | 35.9k | l->locker_function = "nobody"; |
184 | 35.9k | __set_lock_level(l->unlock_level); |
185 | 35.9k | } |
186 | 35.9k | spin_unlock_recursive(&l->lock); |
187 | 35.9k | } Unexecuted instantiation: nested_hap.c:mm_unlock Unexecuted instantiation: guest_walk.c:mm_unlock Line | Count | Source | 180 | 1.38k | { | 181 | 1.38k | if ( l->lock.recurse_cnt == 1 ) | 182 | 1.38k | { | 183 | 1.38k | l->locker_function = "nobody"; | 184 | 1.38k | __set_lock_level(l->unlock_level); | 185 | 1.38k | } | 186 | 1.38k | spin_unlock_recursive(&l->lock); | 187 | 1.38k | } |
Unexecuted instantiation: multi.c:mm_unlock Unexecuted instantiation: nested_ept.c:mm_unlock Unexecuted instantiation: mem_access.c:mm_unlock Unexecuted instantiation: mem_sharing.c:mm_unlock Unexecuted instantiation: p2m-pod.c:mm_unlock Unexecuted instantiation: p2m-ept.c:mm_unlock Unexecuted instantiation: p2m-pt.c:mm_unlock Unexecuted instantiation: p2m.c:mm_unlock Line | Count | Source | 180 | 34.5k | { | 181 | 34.5k | if ( l->lock.recurse_cnt == 1 ) | 182 | 34.5k | { | 183 | 34.5k | l->locker_function = "nobody"; | 184 | 34.5k | __set_lock_level(l->unlock_level); | 185 | 34.5k | } | 186 | 34.5k | spin_unlock_recursive(&l->lock); | 187 | 34.5k | } |
Unexecuted instantiation: common.c:mm_unlock |
188 | | |
189 | | static inline void mm_enforce_order_unlock(int unlock_level, |
190 | | unsigned short *recurse_count) |
191 | 0 | { |
192 | 0 | if ( recurse_count ) |
193 | 0 | { |
194 | 0 | BUG_ON(*recurse_count == 0); |
195 | 0 | if ( (*recurse_count)-- == 1 ) |
196 | 0 | { |
197 | 0 | __set_lock_level(unlock_level); |
198 | 0 | } |
199 | 0 | } else { |
200 | 0 | __set_lock_level(unlock_level); |
201 | 0 | } |
202 | 0 | } Unexecuted instantiation: paging.c:mm_enforce_order_unlock Unexecuted instantiation: p2m-pt.c:mm_enforce_order_unlock Unexecuted instantiation: p2m-ept.c:mm_enforce_order_unlock Unexecuted instantiation: p2m-pod.c:mm_enforce_order_unlock Unexecuted instantiation: mem_sharing.c:mm_enforce_order_unlock Unexecuted instantiation: mem_access.c:mm_enforce_order_unlock Unexecuted instantiation: common.c:mm_enforce_order_unlock Unexecuted instantiation: multi.c:mm_enforce_order_unlock Unexecuted instantiation: hap.c:mm_enforce_order_unlock Unexecuted instantiation: guest_walk.c:mm_enforce_order_unlock Unexecuted instantiation: nested_hap.c:mm_enforce_order_unlock Unexecuted instantiation: nested_ept.c:mm_enforce_order_unlock Unexecuted instantiation: p2m.c:mm_enforce_order_unlock |
203 | | |
204 | | /************************************************************************ |
205 | | * * |
206 | | * To avoid deadlocks, these locks _MUST_ be taken in the order listed * |
207 | | * below. The locking functions will enforce this. * |
208 | | * * |
209 | | ************************************************************************/ |
210 | | |
211 | | /* Nested P2M lock (per-domain) |
212 | | * |
213 | | * A per-domain lock that protects the mapping from nested-CR3 to |
214 | | * nested-p2m. In particular it covers: |
215 | | * - the array of nested-p2m tables, and all LRU activity therein; and |
216 | | * - setting the "cr3" field of any p2m table to a non-P2M_BASE_EAADR value. |
217 | | * (i.e. assigning a p2m table to be the shadow of that cr3 */ |
218 | | |
219 | 0 | #define MM_LOCK_ORDER_nestedp2m 8 |
220 | | declare_mm_lock(nestedp2m) |
221 | 0 | #define nestedp2m_lock(d) mm_lock(nestedp2m, &(d)->arch.nested_p2m_lock) |
222 | 0 | #define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock) |
223 | | |
224 | | /* P2M lock (per-non-alt-p2m-table) |
225 | | * |
226 | | * This protects all queries and updates to the p2m table. |
227 | | * Queries may be made under the read lock but all modifications |
228 | | * need the main (write) lock. |
229 | | * |
230 | | * The write lock is recursive as it is common for a code path to look |
231 | | * up a gfn and later mutate it. |
232 | | * |
233 | | * Note that this lock shares its implementation with the altp2m |
234 | | * lock (not the altp2m list lock), so the implementation |
235 | | * is found there. |
236 | | * |
237 | | * Changes made to the host p2m when in altp2m mode are propagated to the |
238 | | * altp2ms synchronously in ept_set_entry(). At that point, we will hold |
239 | | * the host p2m lock; propagating this change involves grabbing the |
240 | | * altp2m_list lock, and the locks of the individual alternate p2ms. In |
241 | | * order to allow us to maintain locking order discipline, we split the p2m |
242 | | * lock into p2m (for host p2ms) and altp2m (for alternate p2ms), putting |
243 | | * the altp2mlist lock in the middle. |
244 | | */ |
245 | | |
246 | 2.47M | #define MM_LOCK_ORDER_p2m 16 |
247 | | declare_mm_rwlock(p2m); |
248 | | |
249 | | /* Sharing per page lock |
250 | | * |
251 | | * This is an external lock, not represented by an mm_lock_t. The memory |
252 | | * sharing lock uses it to protect addition and removal of (gfn,domain) |
253 | | * tuples to a shared page. We enforce order here against the p2m lock, |
254 | | * which is taken after the page_lock to change the gfn's p2m entry. |
255 | | * |
256 | | * The lock is recursive because during share we lock two pages. */ |
257 | | |
258 | 0 | #define MM_LOCK_ORDER_per_page_sharing 24 |
259 | | declare_mm_order_constraint(per_page_sharing) |
260 | 0 | #define page_sharing_mm_pre_lock() mm_enforce_order_lock_pre_per_page_sharing() |
261 | | #define page_sharing_mm_post_lock(l, r) \ |
262 | 0 | mm_enforce_order_lock_post_per_page_sharing((l), (r)) |
263 | 0 | #define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r)) |
264 | | |
265 | | /* Alternate P2M list lock (per-domain) |
266 | | * |
267 | | * A per-domain lock that protects the list of alternate p2m's. |
268 | | * Any operation that walks the list needs to acquire this lock. |
269 | | * Additionally, before destroying an alternate p2m all VCPU's |
270 | | * in the target domain must be paused. |
271 | | */ |
272 | | |
273 | 0 | #define MM_LOCK_ORDER_altp2mlist 32 |
274 | | declare_mm_lock(altp2mlist) |
275 | 1.06M | #define altp2m_list_lock(d) mm_lock(altp2mlist, &(d)->arch.altp2m_list_lock) |
276 | 0 | #define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m_list_lock) |
277 | | |
278 | | /* P2M lock (per-altp2m-table) |
279 | | * |
280 | | * This protects all queries and updates to the p2m table. |
281 | | * Queries may be made under the read lock but all modifications |
282 | | * need the main (write) lock. |
283 | | * |
284 | | * The write lock is recursive as it is common for a code path to look |
285 | | * up a gfn and later mutate it. |
286 | | */ |
287 | | |
288 | 0 | #define MM_LOCK_ORDER_altp2m 40 |
289 | | declare_mm_rwlock(altp2m); |
290 | | #define p2m_lock(p) \ |
291 | 510k | do { \ |
292 | 510k | if ( p2m_is_altp2m(p) ) \ |
293 | 510k | mm_write_lock(altp2m, &(p)->lock); \ |
294 | 510k | else \ |
295 | 510k | mm_write_lock(p2m, &(p)->lock); \ |
296 | 510k | (p)->defer_flush++; \ |
297 | 510k | } while (0) |
298 | | #define p2m_unlock(p) \ |
299 | 510k | do { \ |
300 | 510k | if ( --(p)->defer_flush == 0 ) \ |
301 | 510k | p2m_unlock_and_tlb_flush(p); \ |
302 | 510k | else \ |
303 | 18.4E | mm_write_unlock(&(p)->lock); \ |
304 | 510k | } while (0) |
305 | 507k | #define gfn_lock(p,g,o) p2m_lock(p) |
306 | 507k | #define gfn_unlock(p,g,o) p2m_unlock(p) |
307 | 1.96M | #define p2m_read_lock(p) mm_read_lock(p2m, &(p)->lock) |
308 | 1.96M | #define p2m_read_unlock(p) mm_read_unlock(&(p)->lock) |
309 | | #define p2m_locked_by_me(p) mm_write_locked_by_me(&(p)->lock) |
310 | | #define gfn_locked_by_me(p,g) p2m_locked_by_me(p) |
311 | | |
312 | | /* PoD lock (per-p2m-table) |
313 | | * |
314 | | * Protects private PoD data structs: entry and cache |
315 | | * counts, page lists, sweep parameters. */ |
316 | | |
317 | 0 | #define MM_LOCK_ORDER_pod 48 |
318 | | declare_mm_lock(pod) |
319 | 0 | #define pod_lock(p) mm_lock(pod, &(p)->pod.lock) |
320 | 0 | #define pod_unlock(p) mm_unlock(&(p)->pod.lock) |
321 | | #define pod_locked_by_me(p) mm_locked_by_me(&(p)->pod.lock) |
322 | | |
323 | | /* Page alloc lock (per-domain) |
324 | | * |
325 | | * This is an external lock, not represented by an mm_lock_t. However, |
326 | | * pod code uses it in conjunction with the p2m lock, and expecting |
327 | | * the ordering which we enforce here. |
328 | | * The lock is not recursive. */ |
329 | | |
330 | 0 | #define MM_LOCK_ORDER_page_alloc 56 |
331 | | declare_mm_order_constraint(page_alloc) |
332 | 0 | #define page_alloc_mm_pre_lock() mm_enforce_order_lock_pre_page_alloc() |
333 | 0 | #define page_alloc_mm_post_lock(l) mm_enforce_order_lock_post_page_alloc(&(l), NULL) |
334 | 0 | #define page_alloc_mm_unlock(l) mm_enforce_order_unlock((l), NULL) |
335 | | |
336 | | /* Paging lock (per-domain) |
337 | | * |
338 | | * For shadow pagetables, this lock protects |
339 | | * - all changes to shadow page table pages |
340 | | * - the shadow hash table |
341 | | * - the shadow page allocator |
342 | | * - all changes to guest page table pages |
343 | | * - all changes to the page_info->tlbflush_timestamp |
344 | | * - the page_info->count fields on shadow pages |
345 | | * |
346 | | * For HAP, it protects the NPT/EPT tables and mode changes. |
347 | | * |
348 | | * It also protects the log-dirty bitmap from concurrent accesses (and |
349 | | * teardowns, etc). */ |
350 | | |
351 | 35.9k | #define MM_LOCK_ORDER_paging 64 |
352 | | declare_mm_lock(paging) |
353 | 34.6k | #define paging_lock(d) mm_lock(paging, &(d)->arch.paging.lock) |
354 | | #define paging_lock_recursive(d) \ |
355 | 1.28k | mm_lock_recursive(paging, &(d)->arch.paging.lock) |
356 | 35.9k | #define paging_unlock(d) mm_unlock(&(d)->arch.paging.lock) |
357 | | #define paging_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.lock) |
358 | | |
359 | | #endif /* _MM_LOCKS_H */ |