/root/src/xen/xen/arch/x86/sysctl.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * Arch-specific sysctl.c |
3 | | * |
4 | | * System management operations. For use by node control stack. |
5 | | * |
6 | | * Copyright (c) 2002-2006, K Fraser |
7 | | */ |
8 | | |
9 | | #include <xen/types.h> |
10 | | #include <xen/lib.h> |
11 | | #include <xen/mm.h> |
12 | | #include <xen/guest_access.h> |
13 | | #include <xen/hypercall.h> |
14 | | #include <public/sysctl.h> |
15 | | #include <xen/sched.h> |
16 | | #include <xen/event.h> |
17 | | #include <xen/domain_page.h> |
18 | | #include <asm/msr.h> |
19 | | #include <xen/trace.h> |
20 | | #include <xen/console.h> |
21 | | #include <xen/iocap.h> |
22 | | #include <asm/irq.h> |
23 | | #include <asm/hvm/hvm.h> |
24 | | #include <asm/hvm/support.h> |
25 | | #include <asm/processor.h> |
26 | | #include <asm/smp.h> |
27 | | #include <asm/numa.h> |
28 | | #include <xen/nodemask.h> |
29 | | #include <xen/cpu.h> |
30 | | #include <xsm/xsm.h> |
31 | | #include <asm/psr.h> |
32 | | #include <asm/cpuid.h> |
33 | | |
34 | | struct l3_cache_info { |
35 | | int ret; |
36 | | unsigned long size; |
37 | | }; |
38 | | |
39 | | static void l3_cache_get(void *arg) |
40 | 0 | { |
41 | 0 | struct cpuid4_info info; |
42 | 0 | struct l3_cache_info *l3_info = arg; |
43 | 0 |
|
44 | 0 | l3_info->ret = cpuid4_cache_lookup(3, &info); |
45 | 0 | if ( !l3_info->ret ) |
46 | 0 | l3_info->size = info.size / 1024; /* in KB unit */ |
47 | 0 | } |
48 | | |
49 | | long cpu_up_helper(void *data) |
50 | 0 | { |
51 | 0 | int cpu = (unsigned long)data; |
52 | 0 | int ret = cpu_up(cpu); |
53 | 0 | if ( ret == -EBUSY ) |
54 | 0 | { |
55 | 0 | /* On EBUSY, flush RCU work and have one more go. */ |
56 | 0 | rcu_barrier(); |
57 | 0 | ret = cpu_up(cpu); |
58 | 0 | } |
59 | 0 | return ret; |
60 | 0 | } |
61 | | |
62 | | long cpu_down_helper(void *data) |
63 | 0 | { |
64 | 0 | int cpu = (unsigned long)data; |
65 | 0 | int ret = cpu_down(cpu); |
66 | 0 | if ( ret == -EBUSY ) |
67 | 0 | { |
68 | 0 | /* On EBUSY, flush RCU work and have one more go. */ |
69 | 0 | rcu_barrier(); |
70 | 0 | ret = cpu_down(cpu); |
71 | 0 | } |
72 | 0 | return ret; |
73 | 0 | } |
74 | | |
75 | | void arch_do_physinfo(struct xen_sysctl_physinfo *pi) |
76 | 0 | { |
77 | 0 | memcpy(pi->hw_cap, boot_cpu_data.x86_capability, |
78 | 0 | min(sizeof(pi->hw_cap), sizeof(boot_cpu_data.x86_capability))); |
79 | 0 | if ( hvm_enabled ) |
80 | 0 | pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm; |
81 | 0 | if ( iommu_enabled ) |
82 | 0 | pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio; |
83 | 0 | } |
84 | | |
85 | | long arch_do_sysctl( |
86 | | struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) |
87 | 0 | { |
88 | 0 | long ret = 0; |
89 | 0 |
|
90 | 0 | switch ( sysctl->cmd ) |
91 | 0 | { |
92 | 0 |
|
93 | 0 | case XEN_SYSCTL_cpu_hotplug: |
94 | 0 | { |
95 | 0 | unsigned int cpu = sysctl->u.cpu_hotplug.cpu; |
96 | 0 |
|
97 | 0 | switch ( sysctl->u.cpu_hotplug.op ) |
98 | 0 | { |
99 | 0 | case XEN_SYSCTL_CPU_HOTPLUG_ONLINE: |
100 | 0 | ret = xsm_resource_plug_core(XSM_HOOK); |
101 | 0 | if ( ret ) |
102 | 0 | break; |
103 | 0 | ret = continue_hypercall_on_cpu( |
104 | 0 | 0, cpu_up_helper, (void *)(unsigned long)cpu); |
105 | 0 | break; |
106 | 0 | case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE: |
107 | 0 | ret = xsm_resource_unplug_core(XSM_HOOK); |
108 | 0 | if ( ret ) |
109 | 0 | break; |
110 | 0 | ret = continue_hypercall_on_cpu( |
111 | 0 | 0, cpu_down_helper, (void *)(unsigned long)cpu); |
112 | 0 | break; |
113 | 0 | default: |
114 | 0 | ret = -EINVAL; |
115 | 0 | break; |
116 | 0 | } |
117 | 0 | } |
118 | 0 | break; |
119 | 0 |
|
120 | 0 | case XEN_SYSCTL_psr_cmt_op: |
121 | 0 | if ( !psr_cmt_enabled() ) |
122 | 0 | return -ENODEV; |
123 | 0 |
|
124 | 0 | if ( sysctl->u.psr_cmt_op.flags != 0 ) |
125 | 0 | return -EINVAL; |
126 | 0 |
|
127 | 0 | switch ( sysctl->u.psr_cmt_op.cmd ) |
128 | 0 | { |
129 | 0 | case XEN_SYSCTL_PSR_CMT_enabled: |
130 | 0 | sysctl->u.psr_cmt_op.u.data = |
131 | 0 | (psr_cmt->features & PSR_RESOURCE_TYPE_L3) && |
132 | 0 | (psr_cmt->l3.features & PSR_CMT_L3_OCCUPANCY); |
133 | 0 | break; |
134 | 0 | case XEN_SYSCTL_PSR_CMT_get_total_rmid: |
135 | 0 | sysctl->u.psr_cmt_op.u.data = psr_cmt->rmid_max; |
136 | 0 | break; |
137 | 0 | case XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor: |
138 | 0 | sysctl->u.psr_cmt_op.u.data = psr_cmt->l3.upscaling_factor; |
139 | 0 | break; |
140 | 0 | case XEN_SYSCTL_PSR_CMT_get_l3_cache_size: |
141 | 0 | { |
142 | 0 | struct l3_cache_info info; |
143 | 0 | unsigned int cpu = sysctl->u.psr_cmt_op.u.l3_cache.cpu; |
144 | 0 |
|
145 | 0 | if ( (cpu >= nr_cpu_ids) || !cpu_online(cpu) ) |
146 | 0 | { |
147 | 0 | ret = -ENODEV; |
148 | 0 | sysctl->u.psr_cmt_op.u.data = 0; |
149 | 0 | break; |
150 | 0 | } |
151 | 0 | if ( cpu == smp_processor_id() ) |
152 | 0 | l3_cache_get(&info); |
153 | 0 | else |
154 | 0 | on_selected_cpus(cpumask_of(cpu), l3_cache_get, &info, 1); |
155 | 0 |
|
156 | 0 | ret = info.ret; |
157 | 0 | sysctl->u.psr_cmt_op.u.data = (ret ? 0 : info.size); |
158 | 0 | break; |
159 | 0 | } |
160 | 0 | case XEN_SYSCTL_PSR_CMT_get_l3_event_mask: |
161 | 0 | sysctl->u.psr_cmt_op.u.data = psr_cmt->l3.features; |
162 | 0 | break; |
163 | 0 | default: |
164 | 0 | sysctl->u.psr_cmt_op.u.data = 0; |
165 | 0 | ret = -ENOSYS; |
166 | 0 | break; |
167 | 0 | } |
168 | 0 |
|
169 | 0 | if ( __copy_to_guest(u_sysctl, sysctl, 1) ) |
170 | 0 | ret = -EFAULT; |
171 | 0 |
|
172 | 0 | break; |
173 | 0 |
|
174 | 0 | case XEN_SYSCTL_psr_cat_op: |
175 | 0 | switch ( sysctl->u.psr_cat_op.cmd ) |
176 | 0 | { |
177 | 0 | uint32_t data[PSR_INFO_ARRAY_SIZE]; |
178 | 0 |
|
179 | 0 | case XEN_SYSCTL_PSR_CAT_get_l3_info: |
180 | 0 | { |
181 | 0 | ret = psr_get_info(sysctl->u.psr_cat_op.target, |
182 | 0 | PSR_CBM_TYPE_L3, data, ARRAY_SIZE(data)); |
183 | 0 | if ( ret ) |
184 | 0 | break; |
185 | 0 |
|
186 | 0 | sysctl->u.psr_cat_op.u.cat_info.cos_max = |
187 | 0 | data[PSR_INFO_IDX_COS_MAX]; |
188 | 0 | sysctl->u.psr_cat_op.u.cat_info.cbm_len = |
189 | 0 | data[PSR_INFO_IDX_CAT_CBM_LEN]; |
190 | 0 | sysctl->u.psr_cat_op.u.cat_info.flags = |
191 | 0 | data[PSR_INFO_IDX_CAT_FLAG]; |
192 | 0 |
|
193 | 0 | if ( __copy_field_to_guest(u_sysctl, sysctl, u.psr_cat_op) ) |
194 | 0 | ret = -EFAULT; |
195 | 0 | break; |
196 | 0 | } |
197 | 0 |
|
198 | 0 | case XEN_SYSCTL_PSR_CAT_get_l2_info: |
199 | 0 | { |
200 | 0 | ret = psr_get_info(sysctl->u.psr_cat_op.target, |
201 | 0 | PSR_CBM_TYPE_L2, data, ARRAY_SIZE(data)); |
202 | 0 | if ( ret ) |
203 | 0 | break; |
204 | 0 |
|
205 | 0 | sysctl->u.psr_cat_op.u.cat_info.cos_max = |
206 | 0 | data[PSR_INFO_IDX_COS_MAX]; |
207 | 0 | sysctl->u.psr_cat_op.u.cat_info.cbm_len = |
208 | 0 | data[PSR_INFO_IDX_CAT_CBM_LEN]; |
209 | 0 | sysctl->u.psr_cat_op.u.cat_info.flags = |
210 | 0 | data[PSR_INFO_IDX_CAT_FLAG]; |
211 | 0 |
|
212 | 0 | if ( __copy_field_to_guest(u_sysctl, sysctl, u.psr_cat_op) ) |
213 | 0 | ret = -EFAULT; |
214 | 0 | break; |
215 | 0 | } |
216 | 0 |
|
217 | 0 | default: |
218 | 0 | ret = -EOPNOTSUPP; |
219 | 0 | break; |
220 | 0 | } |
221 | 0 | break; |
222 | 0 |
|
223 | 0 | case XEN_SYSCTL_get_cpu_levelling_caps: |
224 | 0 | sysctl->u.cpu_levelling_caps.caps = levelling_caps; |
225 | 0 | if ( __copy_field_to_guest(u_sysctl, sysctl, u.cpu_levelling_caps.caps) ) |
226 | 0 | ret = -EFAULT; |
227 | 0 | break; |
228 | 0 |
|
229 | 0 | case XEN_SYSCTL_get_cpu_featureset: |
230 | 0 | { |
231 | 0 | static const struct cpuid_policy *const policy_table[] = { |
232 | 0 | [XEN_SYSCTL_cpu_featureset_raw] = &raw_cpuid_policy, |
233 | 0 | [XEN_SYSCTL_cpu_featureset_host] = &host_cpuid_policy, |
234 | 0 | [XEN_SYSCTL_cpu_featureset_pv] = &pv_max_cpuid_policy, |
235 | 0 | [XEN_SYSCTL_cpu_featureset_hvm] = &hvm_max_cpuid_policy, |
236 | 0 | }; |
237 | 0 | const struct cpuid_policy *p = NULL; |
238 | 0 | uint32_t featureset[FSCAPINTS]; |
239 | 0 | unsigned int nr; |
240 | 0 |
|
241 | 0 | /* Request for maximum number of features? */ |
242 | 0 | if ( guest_handle_is_null(sysctl->u.cpu_featureset.features) ) |
243 | 0 | { |
244 | 0 | sysctl->u.cpu_featureset.nr_features = FSCAPINTS; |
245 | 0 | if ( __copy_field_to_guest(u_sysctl, sysctl, |
246 | 0 | u.cpu_featureset.nr_features) ) |
247 | 0 | ret = -EFAULT; |
248 | 0 | break; |
249 | 0 | } |
250 | 0 |
|
251 | 0 | /* Clip the number of entries. */ |
252 | 0 | nr = min_t(unsigned int, sysctl->u.cpu_featureset.nr_features, |
253 | 0 | FSCAPINTS); |
254 | 0 |
|
255 | 0 | /* Look up requested featureset. */ |
256 | 0 | if ( sysctl->u.cpu_featureset.index < ARRAY_SIZE(policy_table) ) |
257 | 0 | p = policy_table[sysctl->u.cpu_featureset.index]; |
258 | 0 |
|
259 | 0 | /* Bad featureset index? */ |
260 | 0 | if ( !p ) |
261 | 0 | ret = -EINVAL; |
262 | 0 | else |
263 | 0 | cpuid_policy_to_featureset(p, featureset); |
264 | 0 |
|
265 | 0 | /* Copy the requested featureset into place. */ |
266 | 0 | if ( !ret && copy_to_guest(sysctl->u.cpu_featureset.features, |
267 | 0 | featureset, nr) ) |
268 | 0 | ret = -EFAULT; |
269 | 0 |
|
270 | 0 | /* Inform the caller of how many features we wrote. */ |
271 | 0 | sysctl->u.cpu_featureset.nr_features = nr; |
272 | 0 | if ( !ret && __copy_field_to_guest(u_sysctl, sysctl, |
273 | 0 | u.cpu_featureset.nr_features) ) |
274 | 0 | ret = -EFAULT; |
275 | 0 |
|
276 | 0 | /* Inform the caller if there was more data to provide. */ |
277 | 0 | if ( !ret && nr < FSCAPINTS ) |
278 | 0 | ret = -ENOBUFS; |
279 | 0 |
|
280 | 0 | break; |
281 | 0 | } |
282 | 0 |
|
283 | 0 | default: |
284 | 0 | ret = -ENOSYS; |
285 | 0 | break; |
286 | 0 | } |
287 | 0 |
|
288 | 0 | return ret; |
289 | 0 | } |
290 | | |
291 | | /* |
292 | | * Local variables: |
293 | | * mode: C |
294 | | * c-file-style: "BSD" |
295 | | * c-basic-offset: 4 |
296 | | * tab-width: 4 |
297 | | * indent-tabs-mode: nil |
298 | | * End: |
299 | | */ |