debuggers.hg

view xen/common/sysctl.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 7d07116efc25
children
line source
1 /******************************************************************************
2 * sysctl.c
3 *
4 * System management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/guest_access.h>
21 #include <xen/keyhandler.h>
22 #include <asm/current.h>
23 #include <xen/hypercall.h>
24 #include <public/sysctl.h>
25 #include <asm/numa.h>
26 #include <xen/nodemask.h>
27 #include <xsm/xsm.h>
28 #include <xen/pmstat.h>
30 extern long arch_do_sysctl(
31 struct xen_sysctl *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl);
32 #ifdef LOCK_PROFILE
33 extern int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc);
34 #endif
36 long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
37 {
38 long ret = 0;
39 struct xen_sysctl curop, *op = &curop;
40 static DEFINE_SPINLOCK(sysctl_lock);
42 if ( !IS_PRIV(current->domain) )
43 return -EPERM;
45 if ( copy_from_guest(op, u_sysctl, 1) )
46 return -EFAULT;
48 if ( op->interface_version != XEN_SYSCTL_INTERFACE_VERSION )
49 return -EACCES;
51 /*
52 * Trylock here avoids deadlock with an existing sysctl critical section
53 * which might (for some current or future reason) want to synchronise
54 * with this vcpu.
55 */
56 while ( !spin_trylock(&sysctl_lock) )
57 if ( hypercall_preempt_check() )
58 return hypercall_create_continuation(
59 __HYPERVISOR_sysctl, "h", u_sysctl);
61 switch ( op->cmd )
62 {
63 case XEN_SYSCTL_readconsole:
64 {
65 ret = xsm_readconsole(op->u.readconsole.clear);
66 if ( ret )
67 break;
69 ret = read_console_ring(&op->u.readconsole);
70 if ( copy_to_guest(u_sysctl, op, 1) )
71 ret = -EFAULT;
72 }
73 break;
75 case XEN_SYSCTL_tbuf_op:
76 {
77 ret = xsm_tbufcontrol();
78 if ( ret )
79 break;
81 ret = tb_control(&op->u.tbuf_op);
82 if ( copy_to_guest(u_sysctl, op, 1) )
83 ret = -EFAULT;
84 }
85 break;
87 case XEN_SYSCTL_sched_id:
88 {
89 ret = xsm_sched_id();
90 if ( ret )
91 break;
93 op->u.sched_id.sched_id = sched_id();
94 if ( copy_to_guest(u_sysctl, op, 1) )
95 ret = -EFAULT;
96 else
97 ret = 0;
98 }
99 break;
101 case XEN_SYSCTL_getdomaininfolist:
102 {
103 struct domain *d;
104 struct xen_domctl_getdomaininfo info;
105 u32 num_domains = 0;
107 rcu_read_lock(&domlist_read_lock);
109 for_each_domain ( d )
110 {
111 if ( d->domain_id < op->u.getdomaininfolist.first_domain )
112 continue;
113 if ( num_domains == op->u.getdomaininfolist.max_domains )
114 break;
116 ret = xsm_getdomaininfo(d);
117 if ( ret )
118 continue;
120 getdomaininfo(d, &info);
122 if ( copy_to_guest_offset(op->u.getdomaininfolist.buffer,
123 num_domains, &info, 1) )
124 {
125 ret = -EFAULT;
126 break;
127 }
129 num_domains++;
130 }
132 rcu_read_unlock(&domlist_read_lock);
134 if ( ret != 0 )
135 break;
137 op->u.getdomaininfolist.num_domains = num_domains;
139 if ( copy_to_guest(u_sysctl, op, 1) )
140 ret = -EFAULT;
141 }
142 break;
144 #ifdef PERF_COUNTERS
145 case XEN_SYSCTL_perfc_op:
146 {
147 ret = xsm_perfcontrol();
148 if ( ret )
149 break;
151 ret = perfc_control(&op->u.perfc_op);
152 if ( copy_to_guest(u_sysctl, op, 1) )
153 ret = -EFAULT;
154 }
155 break;
156 #endif
158 #ifdef LOCK_PROFILE
159 case XEN_SYSCTL_lockprof_op:
160 {
161 ret = spinlock_profile_control(&op->u.lockprof_op);
162 if ( copy_to_guest(u_sysctl, op, 1) )
163 ret = -EFAULT;
164 }
165 break;
166 #endif
167 case XEN_SYSCTL_debug_keys:
168 {
169 char c;
170 uint32_t i;
172 ret = xsm_debug_keys();
173 if ( ret )
174 break;
176 ret = -EFAULT;
177 for ( i = 0; i < op->u.debug_keys.nr_keys; i++ )
178 {
179 if ( copy_from_guest_offset(&c, op->u.debug_keys.keys, i, 1) )
180 goto out;
181 handle_keypress(c, guest_cpu_user_regs());
182 }
183 ret = 0;
184 }
185 break;
187 case XEN_SYSCTL_getcpuinfo:
188 {
189 uint32_t i, nr_cpus;
190 struct xen_sysctl_cpuinfo cpuinfo;
192 nr_cpus = min_t(uint32_t, op->u.getcpuinfo.max_cpus, NR_CPUS);
194 ret = xsm_getcpuinfo();
195 if ( ret )
196 break;
198 for ( i = 0; i < nr_cpus; i++ )
199 {
200 cpuinfo.idletime = get_cpu_idle_time(i);
202 ret = -EFAULT;
203 if ( copy_to_guest_offset(op->u.getcpuinfo.info, i, &cpuinfo, 1) )
204 goto out;
205 }
207 op->u.getcpuinfo.nr_cpus = i;
208 ret = copy_to_guest(u_sysctl, op, 1) ? -EFAULT : 0;
209 }
210 break;
212 case XEN_SYSCTL_availheap:
213 {
214 ret = xsm_availheap();
215 if ( ret )
216 break;
218 op->u.availheap.avail_bytes = avail_domheap_pages_region(
219 op->u.availheap.node,
220 op->u.availheap.min_bitwidth,
221 op->u.availheap.max_bitwidth);
222 op->u.availheap.avail_bytes <<= PAGE_SHIFT;
224 ret = copy_to_guest(u_sysctl, op, 1) ? -EFAULT : 0;
225 }
226 break;
228 case XEN_SYSCTL_get_pmstat:
229 {
230 ret = xsm_get_pmstat();
231 if ( ret )
232 break;
234 ret = do_get_pm_info(&op->u.get_pmstat);
235 if ( ret )
236 break;
238 if ( copy_to_guest(u_sysctl, op, 1) )
239 {
240 ret = -EFAULT;
241 break;
242 }
243 }
244 break;
246 case XEN_SYSCTL_pm_op:
247 {
248 ret = xsm_pm_op();
249 if ( ret )
250 break;
252 ret = do_pm_op(&op->u.pm_op);
253 if ( ret && (ret != -EAGAIN) )
254 break;
256 if ( copy_to_guest(u_sysctl, op, 1) )
257 {
258 ret = -EFAULT;
259 break;
260 }
261 }
262 break;
264 case XEN_SYSCTL_page_offline_op:
265 {
266 uint32_t *status, *ptr;
267 unsigned long pfn;
269 ptr = status = xmalloc_bytes( sizeof(uint32_t) *
270 (op->u.page_offline.end -
271 op->u.page_offline.start + 1));
272 if ( !status )
273 {
274 dprintk(XENLOG_WARNING, "Out of memory for page offline op\n");
275 ret = -ENOMEM;
276 break;
277 }
279 memset(status, PG_OFFLINE_INVALID, sizeof(uint32_t) *
280 (op->u.page_offline.end - op->u.page_offline.start + 1));
282 for ( pfn = op->u.page_offline.start;
283 pfn <= op->u.page_offline.end;
284 pfn ++ )
285 {
286 switch ( op->u.page_offline.cmd )
287 {
288 /* Shall revert her if failed, or leave caller do it? */
289 case sysctl_page_offline:
290 ret = offline_page(pfn, 0, ptr++);
291 break;
292 case sysctl_page_online:
293 ret = online_page(pfn, ptr++);
294 break;
295 case sysctl_query_page_offline:
296 ret = query_page_offline(pfn, ptr++);
297 break;
298 default:
299 gdprintk(XENLOG_WARNING, "invalid page offline op %x\n",
300 op->u.page_offline.cmd);
301 ret = -EINVAL;
302 break;
303 }
305 if (ret)
306 break;
307 }
309 if ( copy_to_guest(
310 op->u.page_offline.status, status,
311 op->u.page_offline.end - op->u.page_offline.start + 1) )
312 {
313 ret = -EFAULT;
314 break;
315 }
317 xfree(status);
318 }
319 break;
321 case XEN_SYSCTL_cpupool_op:
322 {
323 ret = cpupool_do_sysctl(&op->u.cpupool_op);
324 if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
325 ret = -EFAULT;
326 }
327 break;
329 case XEN_SYSCTL_scheduler_op:
330 {
331 ret = sched_adjust_global(&op->u.scheduler_op);
332 if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
333 ret = -EFAULT;
334 }
335 break;
337 default:
338 ret = arch_do_sysctl(op, u_sysctl);
339 break;
340 }
342 out:
343 spin_unlock(&sysctl_lock);
345 return ret;
346 }
348 /*
349 * Local variables:
350 * mode: C
351 * c-set-style: "BSD"
352 * c-basic-offset: 4
353 * tab-width: 4
354 * indent-tabs-mode: nil
355 * End:
356 */